diff options
1044 files changed, 36292 insertions, 17010 deletions
@@ -107,6 +107,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com> Mark Brown <broonie@sirena.org.uk> +Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> Martin Kepplinger <martink@posteo.de> <martin.kepplinger@ginzinger.com> Matthieu CASTET <castet.matthieu@free.fr> diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst index b2598cc9834c..7242cbda15dd 100644 --- a/Documentation/admin-guide/kernel-parameters.rst +++ b/Documentation/admin-guide/kernel-parameters.rst @@ -109,6 +109,7 @@ parameter is applicable:: IPV6 IPv6 support is enabled. ISAPNP ISA PnP code is enabled. ISDN Appropriate ISDN support is enabled. + ISOL CPU Isolation is enabled. JOY Appropriate joystick support is enabled. KGDB Kernel debugger support is enabled. KVM Kernel Virtual Machine support is enabled. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 6571fbfdb2a1..af7104aaffd9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -328,11 +328,15 @@ not play well with APC CPU idle - disable it if you have APC and your system crashes randomly. - apic= [APIC,X86-32] Advanced Programmable Interrupt Controller + apic= [APIC,X86] Advanced Programmable Interrupt Controller Change the output verbosity whilst booting Format: { quiet (default) | verbose | debug } Change the amount of debugging information output when initialising the APIC and IO-APIC components. + For X86-32, this can also be used to specify an APIC + driver name. + Format: apic=driver_name + Examples: apic=bigsmp apic_extnmi= [APIC,X86] External NMI delivery setting Format: { bsp (default) | all | none } @@ -1737,7 +1741,7 @@ isapnp= [ISAPNP] Format: <RDP>,<reset>,<pci_scan>,<verbosity> - isolcpus= [KNL,SMP] Isolate a given set of CPUs from disturbance. + isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance. [Deprecated - use cpusets instead] Format: [flag-list,]<cpu-list> @@ -2662,7 +2666,7 @@ Valid arguments: on, off Default: on - nohz_full= [KNL,BOOT] + nohz_full= [KNL,BOOT,SMP,ISOL] The argument is a cpu list, as described above. In kernels built with CONFIG_NO_HZ_FULL=y, set the specified list of CPUs whose tick will be stopped @@ -2708,6 +2712,8 @@ steal time is computed, but won't influence scheduler behaviour + nopti [X86-64] Disable kernel page table isolation + nolapic [X86-32,APIC] Do not enable or use the local APIC. nolapic_timer [X86-32,APIC] Do not use the local APIC timer. @@ -3282,6 +3288,12 @@ pt. [PARIDE] See Documentation/blockdev/paride.txt. + pti= [X86_64] + Control user/kernel address space isolation: + on - enable + off - disable + auto - default setting + pty.legacy_count= [KNL] Number of legacy pty's. Overwrites compiled-in default number. diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst index de50a8561774..9b55952039a6 100644 --- a/Documentation/admin-guide/thunderbolt.rst +++ b/Documentation/admin-guide/thunderbolt.rst @@ -230,7 +230,7 @@ If supported by your machine this will be exposed by the WMI bus with a sysfs attribute called "force_power". For example the intel-wmi-thunderbolt driver exposes this attribute in: - /sys/devices/platform/PNP0C14:00/wmi_bus/wmi_bus-PNP0C14:00/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power + /sys/bus/wmi/devices/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power To force the power to on, write 1 to this attribute file. To disable force power, write 0 to this attribute file. diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index 72860ce7f610..d2169a56f5e3 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -55,10 +55,10 @@ Optional Properties: - reset-gpios: The GPIO phandle and specifier for the PHY reset signal. -- reset-delay-us: Delay after the reset was asserted in microseconds. +- reset-assert-us: Delay after the reset was asserted in microseconds. If this property is missing the delay will be skipped. -- reset-post-delay-us: Delay after the reset was deasserted in microseconds. +- reset-deassert-us: Delay after the reset was deasserted in microseconds. If this property is missing the delay will be skipped. Example: @@ -70,6 +70,6 @@ ethernet-phy@0 { reg = <0>; reset-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; - reset-delay-us = <1000>; - reset-post-delay-us = <2000>; + reset-assert-us = <1000>; + reset-deassert-us = <2000>; }; diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt new file mode 100644 index 000000000000..270ea4efff13 --- /dev/null +++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt @@ -0,0 +1,48 @@ +* Socionext AVE ethernet controller + +This describes the devicetree bindings for AVE ethernet controller +implemented on Socionext UniPhier SoCs. + +Required properties: + - compatible: Should be + - "socionext,uniphier-pro4-ave4" : for Pro4 SoC + - "socionext,uniphier-pxs2-ave4" : for PXs2 SoC + - "socionext,uniphier-ld11-ave4" : for LD11 SoC + - "socionext,uniphier-ld20-ave4" : for LD20 SoC + - reg: Address where registers are mapped and size of region. + - interrupts: Should contain the MAC interrupt. + - phy-mode: See ethernet.txt in the same directory. Allow to choose + "rgmii", "rmii", or "mii" according to the PHY. + - phy-handle: Should point to the external phy device. + See ethernet.txt file in the same directory. + - clocks: A phandle to the clock for the MAC. + +Optional properties: + - resets: A phandle to the reset control for the MAC. + - local-mac-address: See ethernet.txt in the same directory. + +Required subnode: + - mdio: A container for child nodes representing phy nodes. + See phy.txt in the same directory. + +Example: + + ether: ethernet@65000000 { + compatible = "socionext,uniphier-ld20-ave4"; + reg = <0x65000000 0x8500>; + interrupts = <0 66 4>; + phy-mode = "rgmii"; + phy-handle = <ðphy>; + clocks = <&sys_clk 6>; + resets = <&sys_rst 6>; + local-mac-address = [00 00 00 00 00 00]; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy: ethphy@1 { + reg = <1>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/socionext-netsec.txt b/Documentation/devicetree/bindings/net/socionext-netsec.txt new file mode 100644 index 000000000000..0cff94fb0433 --- /dev/null +++ b/Documentation/devicetree/bindings/net/socionext-netsec.txt @@ -0,0 +1,53 @@ +* Socionext NetSec Ethernet Controller IP + +Required properties: +- compatible: Should be "socionext,synquacer-netsec" +- reg: Address and length of the control register area, followed by the + address and length of the EEPROM holding the MAC address and + microengine firmware +- interrupts: Should contain ethernet controller interrupt +- clocks: phandle to the PHY reference clock +- clock-names: Should be "phy_ref_clk" +- phy-mode: See ethernet.txt file in the same directory +- phy-handle: See ethernet.txt in the same directory. + +- mdio device tree subnode: When the Netsec has a phy connected to its local + mdio, there must be device tree subnode with the following + required properties: + + - #address-cells: Must be <1>. + - #size-cells: Must be <0>. + + For each phy on the mdio bus, there must be a node with the following + fields: + - compatible: Refer to phy.txt + - reg: phy id used to communicate to phy. + +Optional properties: (See ethernet.txt file in the same directory) +- dma-coherent: Boolean property, must only be present if memory + accesses performed by the device are cache coherent. +- local-mac-address: See ethernet.txt in the same directory. +- mac-address: See ethernet.txt in the same directory. +- max-speed: See ethernet.txt in the same directory. +- max-frame-size: See ethernet.txt in the same directory. + +Example: + eth0: ethernet@522d0000 { + compatible = "socionext,synquacer-netsec"; + reg = <0 0x522d0000 0x0 0x10000>, <0 0x10000000 0x0 0x10000>; + interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clk_netsec>; + clock-names = "phy_ref_clk"; + phy-mode = "rgmii"; + max-speed = <1000>; + max-frame-size = <9000>; + phy-handle = <&phy1>; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + phy1: ethernet-phy@1 { + compatible = "ethernet-phy-ieee802.3-c22"; + reg = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/sound/da7218.txt b/Documentation/devicetree/bindings/sound/da7218.txt index 5ca5a709b6aa..3ab9dfef38d1 100644 --- a/Documentation/devicetree/bindings/sound/da7218.txt +++ b/Documentation/devicetree/bindings/sound/da7218.txt @@ -73,7 +73,7 @@ Example: compatible = "dlg,da7218"; reg = <0x1a>; interrupt-parent = <&gpio6>; - interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; wakeup-source; VDD-supply = <®_audio>; diff --git a/Documentation/devicetree/bindings/sound/da7219.txt b/Documentation/devicetree/bindings/sound/da7219.txt index cf61681826b6..5b54d2d045c3 100644 --- a/Documentation/devicetree/bindings/sound/da7219.txt +++ b/Documentation/devicetree/bindings/sound/da7219.txt @@ -77,7 +77,7 @@ Example: reg = <0x1a>; interrupt-parent = <&gpio6>; - interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; VDD-supply = <®_audio>; VDDMIC-supply = <®_audio>; diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst index 2e7ee0313c1c..e94d3ac2bdd0 100644 --- a/Documentation/gpu/i915.rst +++ b/Documentation/gpu/i915.rst @@ -341,10 +341,7 @@ GuC GuC-specific firmware loader ---------------------------- -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c - :doc: GuC-specific firmware loader - -.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_loader.c +.. kernel-doc:: drivers/gpu/drm/i915/intel_guc_fw.c :internal: GuC-based command submission diff --git a/Documentation/networking/xfrm_proc.txt b/Documentation/networking/xfrm_proc.txt index d0d8bafa9016..2eae619ab67b 100644 --- a/Documentation/networking/xfrm_proc.txt +++ b/Documentation/networking/xfrm_proc.txt @@ -5,13 +5,15 @@ Masahide NAKAMURA <nakam@linux-ipv6.org> Transformation Statistics ------------------------- -xfrm_proc is a statistics shown factor dropped by transformation -for developer. -It is a counter designed from current transformation source code -and defined like linux private MIB. -Inbound statistics -~~~~~~~~~~~~~~~~~~ +The xfrm_proc code is a set of statistics showing numbers of packets +dropped by the transformation code and why. These counters are defined +as part of the linux private MIB. These counters can be viewed in +/proc/net/xfrm_stat. + + +Inbound errors +~~~~~~~~~~~~~~ XfrmInError: All errors which is not matched others XfrmInBufferError: @@ -46,6 +48,10 @@ XfrmInPolBlock: Policy discards XfrmInPolError: Policy error +XfrmAcquireError: + State hasn't been fully acquired before use +XfrmFwdHdrError: + Forward routing of a packet is not allowed Outbound errors ~~~~~~~~~~~~~~~ @@ -72,3 +78,5 @@ XfrmOutPolDead: Policy is dead XfrmOutPolError: Policy error +XfrmOutStateInvalid: + State is invalid, perhaps expired diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt index 3448e675b462..ea91cb61a602 100644 --- a/Documentation/x86/x86_64/mm.txt +++ b/Documentation/x86/x86_64/mm.txt @@ -1,6 +1,4 @@ -<previous description obsolete, deleted> - Virtual memory map with 4 level page tables: 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm @@ -14,13 +12,17 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) ... unused hole ... ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB) ... unused hole ... + vaddr_end for KASLR +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping +fffffe8000000000 - fffffeffffffffff (=39 bits) LDT remap for PTI ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space (variable) -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - [fixmap start] (~1526 MB) module mapping space (variable) +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Virtual memory map with 5 level page tables: @@ -29,26 +31,31 @@ Virtual memory map with 5 level page tables: hole caused by [56:63] sign extension ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory -ff90000000000000 - ff91ffffffffffff (=49 bits) hole -ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space +ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI +ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) ... unused hole ... ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB) ... unused hole ... + vaddr_end for KASLR +fffffe0000000000 - fffffe7fffffffff (=39 bits) cpu_entry_area mapping +... unused hole ... ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks ... unused hole ... ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space ... unused hole ... ffffffff80000000 - ffffffff9fffffff (=512 MB) kernel text mapping, from phys 0 -ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space -ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls +ffffffffa0000000 - fffffffffeffffff (1520 MB) module mapping space +[fixmap start] - ffffffffff5fffff kernel-internal fixmap range +ffffffffff600000 - ffffffffff600fff (=4 kB) legacy vsyscall ABI ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole Architecture defines a 64-bit virtual address. Implementations can support less. Currently supported are 48- and 57-bit virtual addresses. Bits 63 -through to the most-significant implemented bit are set to either all ones -or all zero. This causes hole between user space and kernel addresses. +through to the most-significant implemented bit are sign extended. +This causes hole between user space and kernel addresses if you interpret them +as unsigned. The direct mapping covers all memory in the system up to the highest memory address (this means in some cases it can also include PCI memory @@ -58,19 +65,15 @@ vmalloc space is lazily synchronized into the different PML4/PML5 pages of the processes using the page fault handler, with init_top_pgt as reference. -Current X86-64 implementations support up to 46 bits of address space (64 TB), -which is our current limit. This expands into MBZ space in the page tables. - We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual memory window (this size is arbitrary, it can be raised later if needed). The mappings are not part of any other kernel PGD and are only available during EFI runtime calls. -The module mapping space size changes based on the CONFIG requirements for the -following fixmap section. - Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all physical memory, vmalloc/ioremap space and virtual memory map are randomized. Their order is preserved but their base will be offset early at boot time. --Andi Kleen, Jul 2004 +Be very careful vs. KASLR when changing anything here. The KASLR address +range must not overlap with anything except the KASAN shadow area, which is +correct as KASAN disables KASLR. diff --git a/MAINTAINERS b/MAINTAINERS index 753799d24cd9..e22ca0ae995d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2622,24 +2622,22 @@ F: fs/bfs/ F: include/uapi/linux/bfs_fs.h BLACKFIN ARCHITECTURE -M: Steven Miao <realmz6@gmail.com> L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) T: git git://git.code.sf.net/p/adi-linux/code W: http://blackfin.uclinux.org -S: Supported +S: Orphan F: arch/blackfin/ BLACKFIN EMAC DRIVER L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org -S: Supported +S: Orphan F: drivers/net/ethernet/adi/ BLACKFIN MEDIA DRIVER -M: Scott Jiang <scott.jiang.linux@gmail.com> L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org/ -S: Supported +S: Orphan F: drivers/media/platform/blackfin/ F: drivers/media/i2c/adv7183* F: drivers/media/i2c/vs6624* @@ -2647,25 +2645,25 @@ F: drivers/media/i2c/vs6624* BLACKFIN RTC DRIVER L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org -S: Supported +S: Orphan F: drivers/rtc/rtc-bfin.c BLACKFIN SDH DRIVER L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org -S: Supported +S: Orphan F: drivers/mmc/host/bfin_sdh.c BLACKFIN SERIAL DRIVER L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org -S: Supported +S: Orphan F: drivers/tty/serial/bfin_uart.c BLACKFIN WATCHDOG DRIVER L: adi-buildroot-devel@lists.sourceforge.net (moderated for non-subscribers) W: http://blackfin.uclinux.org -S: Supported +S: Orphan F: drivers/watchdog/bfin_wdt.c BLINKM RGB LED DRIVER @@ -4944,6 +4942,11 @@ S: Maintained F: lib/dynamic_debug.c F: include/linux/dynamic_debug.h +DYNAMIC INTERRUPT MODERATION +M: Tal Gilboa <talgi@mellanox.com> +S: Maintained +F: include/linux/net_dim.h + DZ DECSTATION DZ11 SERIAL DRIVER M: "Maciej W. Rozycki" <macro@linux-mips.org> S: Maintained @@ -5154,15 +5157,15 @@ F: sound/usb/misc/ua101.c EFI TEST DRIVER L: linux-efi@vger.kernel.org M: Ivan Hu <ivan.hu@canonical.com> -M: Matt Fleming <matt@codeblueprint.co.uk> +M: Ard Biesheuvel <ard.biesheuvel@linaro.org> S: Maintained F: drivers/firmware/efi/test/ EFI VARIABLE FILESYSTEM M: Matthew Garrett <matthew.garrett@nebula.com> M: Jeremy Kerr <jk@ozlabs.org> -M: Matt Fleming <matt@codeblueprint.co.uk> -T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git +M: Ard Biesheuvel <ard.biesheuvel@linaro.org> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git L: linux-efi@vger.kernel.org S: Maintained F: fs/efivarfs/ @@ -5323,7 +5326,6 @@ S: Supported F: security/integrity/evm/ EXTENSIBLE FIRMWARE INTERFACE (EFI) -M: Matt Fleming <matt@codeblueprint.co.uk> M: Ard Biesheuvel <ard.biesheuvel@linaro.org> L: linux-efi@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi.git @@ -10152,7 +10154,7 @@ F: drivers/irqchip/irq-ompic.c F: drivers/irqchip/irq-or1k-* OPENVSWITCH -M: Pravin Shelar <pshelar@nicira.com> +M: Pravin B Shelar <pshelar@ovn.org> L: netdev@vger.kernel.org L: dev@openvswitch.org W: http://openvswitch.org @@ -12645,6 +12647,13 @@ F: drivers/md/raid* F: include/linux/raid/ F: include/uapi/linux/raid/ +SOCIONEXT (SNI) NETSEC NETWORK DRIVER +M: Jassi Brar <jaswinder.singh@linaro.org> +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/socionext/netsec.c +F: Documentation/devicetree/bindings/net/socionext-netsec.txt + SONIC NETWORK DRIVER M: Thomas Bogendoerfer <tsbogend@alpha.franken.de> L: netdev@vger.kernel.org @@ -13508,6 +13517,7 @@ M: Mika Westerberg <mika.westerberg@linux.intel.com> M: Yehezkel Bernat <yehezkel.bernat@intel.com> T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git S: Maintained +F: Documentation/admin-guide/thunderbolt.rst F: drivers/thunderbolt/ F: include/linux/thunderbolt.h @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 15 SUBLEVEL = 0 -EXTRAVERSION = -rc4 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* @@ -789,6 +789,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign) # disable invalid "can't wrap" optimizations for signed / pointers KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow) +# Make sure -fstack-check isn't enabled (like gentoo apparently did) +KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,) + # conserve stack if available KBUILD_CFLAGS += $(call cc-option,-fconserve-stack) diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 4e6e9f57e790..dc91c663bcc0 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi @@ -35,6 +35,14 @@ reg = <0x80 0x10>, <0x100 0x10>; #clock-cells = <0>; clocks = <&input_clk>; + + /* + * Set initial core pll output frequency to 90MHz. + * It will be applied at the core pll driver probing + * on early boot. + */ + assigned-clocks = <&core_clk>; + assigned-clock-rates = <90000000>; }; core_intc: archs-intc@cpu { diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 63954a8b0100..69ff4895f2ba 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi @@ -35,6 +35,14 @@ reg = <0x80 0x10>, <0x100 0x10>; #clock-cells = <0>; clocks = <&input_clk>; + + /* + * Set initial core pll output frequency to 100MHz. + * It will be applied at the core pll driver probing + * on early boot. + */ + assigned-clocks = <&core_clk>; + assigned-clock-rates = <100000000>; }; core_intc: archs-intc@cpu { diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 8f627c200d60..006aa3de5348 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts @@ -114,6 +114,14 @@ reg = <0x00 0x10>, <0x14B8 0x4>; #clock-cells = <0>; clocks = <&input_clk>; + + /* + * Set initial core pll output frequency to 1GHz. + * It will be applied at the core pll driver probing + * on early boot. + */ + assigned-clocks = <&core_clk>; + assigned-clock-rates = <1000000000>; }; serial: serial@5000 { diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 7b8f8faf8a24..ac6b0ed8341e 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig @@ -49,10 +49,11 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_HW_RANDOM is not set # CONFIG_HWMON is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_DRM_UDL=y CONFIG_FB=y -CONFIG_FB_UDL=y CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_USB=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index f35974ee7264..c9173c02081c 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) return 0; __asm__ __volatile__( + " mov lp_count, %5 \n" " lp 3f \n" "1: ldb.ab %3, [%2, 1] \n" " breq.d %3, 0, 3f \n" @@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) " .word 1b, 4b \n" " .previous \n" : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) - : "g"(-EFAULT), "l"(count) - : "memory"); + : "g"(-EFAULT), "r"(count) + : "lp_count", "lp_start", "lp_end", "memory"); return res; } diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 7ef7d9a8ff89..9d27331fe69a 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void) unsigned int exec_ctrl; READ_BCR(AUX_EXEC_CTRL, exec_ctrl); - cpu->extn.dual_enb = exec_ctrl & 1; + cpu->extn.dual_enb = !(exec_ctrl & 1); /* dual issue always present for this core */ cpu->extn.dual = 1; diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index 74315f302971..bf40e06f3fb8 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, */ static int __print_sym(unsigned int address, void *unused) { - __print_symbol(" %s\n", address); + printk(" %pS\n", (void *)address); return 0; } diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c index bcd7c9fc5d0f..133a4dae41fe 100644 --- a/arch/arc/kernel/traps.c +++ b/arch/arc/kernel/traps.c @@ -83,6 +83,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC) DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR) DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) +DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0) /* * Entry Point for Misaligned Data access Exception, for emulating in software @@ -115,6 +116,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs) * Thus TRAP_S <n> can be used for specific purpose * -1 used for software breakpointing (gdb) * -2 used by kprobes + * -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as + * -fno-isolate-erroneous-paths-dereference */ void do_non_swi_trap(unsigned long address, struct pt_regs *regs) { @@ -134,6 +137,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs) kgdb_trap(regs); break; + case 5: + do_trap5_error(address, regs); + break; default: break; } @@ -155,3 +161,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs) insterror_is_error(address, regs); } + +/* + * abort() call generated by older gcc for __builtin_trap() + */ +void abort(void) +{ + __asm__ __volatile__("trap_s 5\n"); +} diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 7d8c1d6c2f60..6e9a0a9a6a04 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs) else pr_cont("Bus Error, check PRM\n"); #endif + } else if (vec == ECR_V_TRAP) { + if (regs->ecr_param == 5) + pr_cont("gcc generated __builtin_trap\n"); } else { pr_cont("Check Programmer's Manual\n"); } diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index f1ac6790da5f..46544e88492d 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c @@ -317,25 +317,23 @@ static void __init axs103_early_init(void) * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack * of fudging the freq in DT */ +#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000 + unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F; if (num_cores > 2) { - u32 freq = 50, orig; - /* - * TODO: use cpu node "cpu-freq" param instead of platform-specific - * "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu. - */ + u32 freq; int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk"); const struct fdt_property *prop; prop = fdt_get_property(initial_boot_params, off, - "clock-frequency", NULL); - orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000; + "assigned-clock-rates", NULL); + freq = be32_to_cpu(*(u32 *)(prop->data)); /* Patching .dtb in-place with new core clock value */ - if (freq != orig ) { - freq = cpu_to_be32(freq * 1000000); + if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) { + freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ); fdt_setprop_inplace(initial_boot_params, off, - "clock-frequency", &freq, sizeof(freq)); + "assigned-clock-rates", &freq, sizeof(freq)); } } #endif diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index fd0ae5e38639..2958aedb649a 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c @@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu) #define CREG_PAE (CREG_BASE + 0x180) #define CREG_PAE_UPDATE (CREG_BASE + 0x194) -#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8) -#define CREG_CORE_IF_CLK_DIV_2 0x1 -#define CGU_BASE ARC_PERIPHERAL_BASE -#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4) -#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0) -#define CGU_PLL_STATUS_LOCK BIT(0) -#define CGU_PLL_STATUS_ERR BIT(1) -#define CGU_PLL_CTRL_1GHZ 0x3A10 -#define HSDK_PLL_LOCK_TIMEOUT 500 - -#define HSDK_PLL_LOCKED() \ - !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK) - -#define HSDK_PLL_ERR() \ - !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR) - -static void __init hsdk_set_cpu_freq_1ghz(void) -{ - u32 timeout = HSDK_PLL_LOCK_TIMEOUT; - - /* - * As we set cpu clock which exceeds 500MHz, the divider for the interface - * clock must be programmed to div-by-2. - */ - iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV); - - /* Set cpu clock to 1GHz */ - iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL); - - while (!HSDK_PLL_LOCKED() && timeout--) - cpu_relax(); - - if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR()) - pr_err("Failed to setup CPU frequency to 1GHz!"); -} - #define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) @@ -98,12 +62,6 @@ static void __init hsdk_init_early(void) * minimum possible div-by-2. */ iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); - - /* - * Setup CPU frequency to 1GHz. - * TODO: remove it after smart hsdk pll driver will be introduced. - */ - hsdk_set_cpu_freq_1ghz(); } static const char *hsdk_compat[] __initconst = { diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi index 45d815a86d42..de08d9045cb8 100644 --- a/arch/arm/boot/dts/aspeed-g4.dtsi +++ b/arch/arm/boot/dts/aspeed-g4.dtsi @@ -219,7 +219,7 @@ compatible = "aspeed,ast2400-vuart"; reg = <0x1e787000 0x40>; reg-shift = <2>; - interrupts = <10>; + interrupts = <8>; clocks = <&clk_uart>; no-loopback-test; status = "disabled"; diff --git a/arch/arm/boot/dts/at91-tse850-3.dts b/arch/arm/boot/dts/at91-tse850-3.dts index 5f29010cdbd8..9b82cc8843e1 100644 --- a/arch/arm/boot/dts/at91-tse850-3.dts +++ b/arch/arm/boot/dts/at91-tse850-3.dts @@ -221,6 +221,7 @@ jc42@18 { compatible = "nxp,se97b", "jedec,jc-42.4-temp"; reg = <0x18>; + smbus-timeout-disable; }; dpot: mcp4651-104@28 { diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts index 413dbd5d9f64..81942ae83e1f 100644 --- a/arch/arm/boot/dts/da850-lego-ev3.dts +++ b/arch/arm/boot/dts/da850-lego-ev3.dts @@ -178,7 +178,7 @@ */ battery { pinctrl-names = "default"; - pintctrl-0 = <&battery_pins>; + pinctrl-0 = <&battery_pins>; compatible = "lego,ev3-battery"; io-channels = <&adc 4>, <&adc 3>; io-channel-names = "voltage", "current"; @@ -392,7 +392,7 @@ batt_volt_en { gpio-hog; gpios = <6 GPIO_ACTIVE_HIGH>; - output-low; + output-high; }; }; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index b2b95ff205e8..0029ec27819c 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -664,6 +664,10 @@ status = "okay"; }; +&mixer { + status = "okay"; +}; + /* eMMC flash */ &mmc_0 { status = "okay"; diff --git a/arch/arm/boot/dts/ls1021a-qds.dts b/arch/arm/boot/dts/ls1021a-qds.dts index 4f211e3c903a..7bb402d3e9d0 100644 --- a/arch/arm/boot/dts/ls1021a-qds.dts +++ b/arch/arm/boot/dts/ls1021a-qds.dts @@ -215,7 +215,7 @@ reg = <0x2a>; VDDA-supply = <®_3p3v>; VDDIO-supply = <®_3p3v>; - clocks = <&sys_mclk 1>; + clocks = <&sys_mclk>; }; }; }; diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts index 7202d9c504be..860b898141f0 100644 --- a/arch/arm/boot/dts/ls1021a-twr.dts +++ b/arch/arm/boot/dts/ls1021a-twr.dts @@ -187,7 +187,7 @@ reg = <0x0a>; VDDA-supply = <®_3p3v>; VDDIO-supply = <®_3p3v>; - clocks = <&sys_mclk 1>; + clocks = <&sys_mclk>; }; }; diff --git a/arch/arm/boot/dts/rk3066a-marsboard.dts b/arch/arm/boot/dts/rk3066a-marsboard.dts index c6d92c25df42..d23ee6d911ac 100644 --- a/arch/arm/boot/dts/rk3066a-marsboard.dts +++ b/arch/arm/boot/dts/rk3066a-marsboard.dts @@ -83,6 +83,10 @@ }; }; +&cpu0 { + cpu0-supply = <&vdd_arm>; +}; + &i2c1 { status = "okay"; clock-frequency = <400000>; diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index cd24894ee5c6..6102e4e7f35c 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi @@ -956,7 +956,7 @@ iep_mmu: iommu@ff900800 { compatible = "rockchip,iommu"; reg = <0x0 0xff900800 0x0 0x40>; - interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; + interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "iep_mmu"; #iommu-cells = <0>; status = "disabled"; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index b91300d49a31..5840f5c75c3b 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -502,8 +502,8 @@ reg = <0x01c16000 0x1000>; interrupts = <58>; clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, - <&ccu 9>, - <&ccu 18>; + <&ccu CLK_PLL_VIDEO0_2X>, + <&ccu CLK_PLL_VIDEO1_2X>; clock-names = "ahb", "mod", "pll-0", "pll-1"; dmas = <&dma SUN4I_DMA_NORMAL 16>, <&dma SUN4I_DMA_NORMAL 16>, diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 6ae4d95e230e..316cb8b2945b 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -82,8 +82,8 @@ reg = <0x01c16000 0x1000>; interrupts = <58>; clocks = <&ccu CLK_AHB_HDMI>, <&ccu CLK_HDMI>, - <&ccu 9>, - <&ccu 16>; + <&ccu CLK_PLL_VIDEO0_2X>, + <&ccu CLK_PLL_VIDEO1_2X>; clock-names = "ahb", "mod", "pll-0", "pll-1"; dmas = <&dma SUN4I_DMA_NORMAL 16>, <&dma SUN4I_DMA_NORMAL 16>, diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 8bfa12b548e0..72d3fe44ecaf 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -429,8 +429,8 @@ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>; clocks = <&ccu CLK_AHB1_HDMI>, <&ccu CLK_HDMI>, <&ccu CLK_HDMI_DDC>, - <&ccu 7>, - <&ccu 13>; + <&ccu CLK_PLL_VIDEO0_2X>, + <&ccu CLK_PLL_VIDEO1_2X>; clock-names = "ahb", "mod", "ddc", "pll-0", "pll-1"; resets = <&ccu RST_AHB1_HDMI>; reset-names = "ahb"; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 68dfa82544fc..59655e42e4b0 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -581,8 +581,8 @@ reg = <0x01c16000 0x1000>; interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>; clocks = <&ccu CLK_AHB_HDMI0>, <&ccu CLK_HDMI>, - <&ccu 9>, - <&ccu 18>; + <&ccu CLK_PLL_VIDEO0_2X>, + <&ccu CLK_PLL_VIDEO1_2X>; clock-names = "ahb", "mod", "pll-0", "pll-1"; dmas = <&dma SUN4I_DMA_NORMAL 16>, <&dma SUN4I_DMA_NORMAL 16>, diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts index 98715538932f..a021ee6da396 100644 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts @@ -146,6 +146,7 @@ status = "okay"; axp81x: pmic@3a3 { + compatible = "x-powers,axp813"; reg = <0x3a3>; interrupt-parent = <&r_intc>; interrupts = <0 IRQ_TYPE_LEVEL_LOW>; diff --git a/arch/arm/boot/dts/tango4-common.dtsi b/arch/arm/boot/dts/tango4-common.dtsi index 0ec1b0a317b4..ff72a8efb73d 100644 --- a/arch/arm/boot/dts/tango4-common.dtsi +++ b/arch/arm/boot/dts/tango4-common.dtsi @@ -156,7 +156,6 @@ reg = <0x6e000 0x400>; ranges = <0 0x6e000 0x400>; interrupt-parent = <&gic>; - interrupt-controller; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 5cf04888c581..3e26c6f7a191 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -793,7 +793,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index 8be04ec95adf..5ace9380626a 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c @@ -868,10 +868,10 @@ static const struct dma_slave_map dm365_edma_map[] = { { "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 17) }, { "spi_davinci.3", "tx", EDMA_FILTER_PARAM(0, 18) }, { "spi_davinci.3", "rx", EDMA_FILTER_PARAM(0, 19) }, - { "dm6441-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) }, - { "dm6441-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) }, - { "dm6441-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) }, - { "dm6441-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) }, + { "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 26) }, + { "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 27) }, + { "da830-mmc.1", "rx", EDMA_FILTER_PARAM(0, 30) }, + { "da830-mmc.1", "tx", EDMA_FILTER_PARAM(0, 31) }, }; static struct edma_soc_info dm365_edma_pdata = { @@ -925,12 +925,14 @@ static struct resource edma_resources[] = { /* not using TC*_ERR */ }; -static struct platform_device dm365_edma_device = { - .name = "edma", - .id = 0, - .dev.platform_data = &dm365_edma_pdata, - .num_resources = ARRAY_SIZE(edma_resources), - .resource = edma_resources, +static const struct platform_device_info dm365_edma_device __initconst = { + .name = "edma", + .id = 0, + .dma_mask = DMA_BIT_MASK(32), + .res = edma_resources, + .num_res = ARRAY_SIZE(edma_resources), + .data = &dm365_edma_pdata, + .size_data = sizeof(dm365_edma_pdata), }; static struct resource dm365_asp_resources[] = { @@ -1428,13 +1430,18 @@ int __init dm365_init_video(struct vpfe_config *vpfe_cfg, static int __init dm365_init_devices(void) { + struct platform_device *edma_pdev; int ret = 0; if (!cpu_is_davinci_dm365()) return 0; davinci_cfg_reg(DM365_INT_EDMA_CC); - platform_device_register(&dm365_edma_device); + edma_pdev = platform_device_register_full(&dm365_edma_device); + if (IS_ERR(edma_pdev)) { + pr_warn("%s: Failed to register eDMA\n", __func__); + return PTR_ERR(edma_pdev); + } platform_device_register(&dm365_mdio_device); platform_device_register(&dm365_emac_device); diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts index 45bdbfb96126..4a8d3f83a36e 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts @@ -75,6 +75,7 @@ pinctrl-0 = <&rgmii_pins>; phy-mode = "rgmii"; phy-handle = <&ext_rgmii_phy>; + phy-supply = <®_dc1sw>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts index 806442d3e846..604cdaedac38 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts @@ -77,6 +77,7 @@ pinctrl-0 = <&rmii_pins>; phy-mode = "rmii"; phy-handle = <&ext_rmii_phy1>; + phy-supply = <®_dc1sw>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts index 0eb2acedf8c3..abe179de35d7 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts @@ -82,6 +82,7 @@ pinctrl-0 = <&rgmii_pins>; phy-mode = "rgmii"; phy-handle = <&ext_rgmii_phy>; + phy-supply = <®_dc1sw>; status = "okay"; }; @@ -95,7 +96,7 @@ &mmc2 { pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; - vmmc-supply = <®_vcc3v3>; + vmmc-supply = <®_dcdc1>; vqmmc-supply = <®_vcc1v8>; bus-width = <8>; non-removable; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi index a5da18a6f286..43418bd881d8 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi @@ -45,19 +45,10 @@ #include "sun50i-a64.dtsi" -/ { - reg_vcc3v3: vcc3v3 { - compatible = "regulator-fixed"; - regulator-name = "vcc3v3"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; - }; -}; - &mmc0 { pinctrl-names = "default"; pinctrl-0 = <&mmc0_pins>; - vmmc-supply = <®_vcc3v3>; + vmmc-supply = <®_dcdc1>; non-removable; disable-wp; bus-width = <4>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts index b6b7a561df8c..a42fd79a62a3 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h5-orangepi-zero-plus2.dts @@ -71,7 +71,7 @@ pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin>; vmmc-supply = <®_vcc3v3>; bus-width = <4>; - cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; + cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index a298df74ca6c..dbe2648649db 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi @@ -255,7 +255,6 @@ &avb { pinctrl-0 = <&avb_pins>; pinctrl-names = "default"; - renesas,no-ether-link; phy-handle = <&phy0>; status = "okay"; diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi index 0d85b315ce71..73439cf48659 100644 --- a/arch/arm64/boot/dts/renesas/ulcb.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi @@ -145,7 +145,6 @@ &avb { pinctrl-0 = <&avb_pins>; pinctrl-names = "default"; - renesas,no-ether-link; phy-handle = <&phy0>; status = "okay"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index d4f80786e7c2..3890468678ce 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts @@ -132,6 +132,8 @@ assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>; assigned-clock-parents = <&gmac_clkin>, <&gmac_clkin>; clock_in_out = "input"; + /* shows instability at 1GBit right now */ + max-speed = <100>; phy-supply = <&vcc_io>; phy-mode = "rgmii"; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 41d61840fb99..2426da631938 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -514,7 +514,7 @@ tsadc: tsadc@ff250000 { compatible = "rockchip,rk3328-tsadc"; reg = <0x0 0xff250000 0x0 0x100>; - interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH 0>; + interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>; assigned-clocks = <&cru SCLK_TSADC>; assigned-clock-rates = <50000>; clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 910628d18add..1fc5060d7027 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -155,17 +155,6 @@ regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; }; - - vdd_log: vdd-log { - compatible = "pwm-regulator"; - pwms = <&pwm2 0 25000 0>; - regulator-name = "vdd_log"; - regulator-min-microvolt = <800000>; - regulator-max-microvolt = <1400000>; - regulator-always-on; - regulator-boot-on; - status = "okay"; - }; }; &cpu_b0 { diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi index 48e733136db4..0ac2ace82435 100644 --- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi +++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi @@ -198,8 +198,8 @@ gpio-controller; #gpio-cells = <2>; gpio-ranges = <&pinctrl 0 0 0>, - <&pinctrl 96 0 0>, - <&pinctrl 160 0 0>; + <&pinctrl 104 0 0>, + <&pinctrl 168 0 0>; gpio-ranges-group-names = "gpio_range0", "gpio_range1", "gpio_range2"; diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 396490cf7316..acaa935ed977 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -897,6 +897,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) image_ptr = jit_data->image; header = jit_data->header; extra_pass = true; + image_size = sizeof(u32) * ctx.idx; goto skip_init_ctx; } memset(&ctx, 0, sizeof(ctx)); diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index cb79fba79d43..b88a8dd14933 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c @@ -122,7 +122,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h index dd5a08aaa4da..3eb4bfc1fb36 100644 --- a/arch/parisc/include/asm/ldcw.h +++ b/arch/parisc/include/asm/ldcw.h @@ -12,6 +12,7 @@ for the semaphore. */ #define __PA_LDCW_ALIGNMENT 16 +#define __PA_LDCW_ALIGN_ORDER 4 #define __ldcw_align(a) ({ \ unsigned long __ret = (unsigned long) &(a)->lock[0]; \ __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ @@ -29,6 +30,7 @@ ldcd). */ #define __PA_LDCW_ALIGNMENT 4 +#define __PA_LDCW_ALIGN_ORDER 2 #define __ldcw_align(a) (&(a)->slock) #define __LDCW "ldcw,co" diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index d8f77358e2ba..29b99b8964aa 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev) static int count; print_pa_hwpath(dev, hw_path); - printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", + printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }", ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type, dev->id.hversion_rev, dev->id.hversion, dev->id.sversion); diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index f3cecf5117cf..e95207c0565e 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -35,6 +35,7 @@ #include <asm/pgtable.h> #include <asm/signal.h> #include <asm/unistd.h> +#include <asm/ldcw.h> #include <asm/thread_info.h> #include <linux/linkage.h> @@ -46,6 +47,14 @@ #endif .import pa_tlb_lock,data + .macro load_pa_tlb_lock reg +#if __PA_LDCW_ALIGNMENT > 4 + load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg + depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg +#else + load32 PA(pa_tlb_lock), \reg +#endif + .endm /* space_to_prot macro creates a prot id from a space id */ @@ -457,7 +466,7 @@ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault #ifdef CONFIG_SMP cmpib,COND(=),n 0,\spc,2f - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp 1: LDCW 0(\tmp),\tmp1 cmpib,COND(=) 0,\tmp1,1b nop @@ -480,7 +489,7 @@ /* Release pa_tlb_lock lock. */ .macro tlb_unlock1 spc,tmp #ifdef CONFIG_SMP - load32 PA(pa_tlb_lock),\tmp + load_pa_tlb_lock \tmp tlb_unlock0 \spc,\tmp #endif .endm diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index adf7187f8951..2d40c4ff3f69 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -36,6 +36,7 @@ #include <asm/assembly.h> #include <asm/pgtable.h> #include <asm/cache.h> +#include <asm/ldcw.h> #include <linux/linkage.h> .text @@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local) .macro tlb_lock la,flags,tmp #ifdef CONFIG_SMP - ldil L%pa_tlb_lock,%r1 - ldo R%pa_tlb_lock(%r1),\la +#if __PA_LDCW_ALIGNMENT > 4 + load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la + depi 0,31,__PA_LDCW_ALIGN_ORDER, \la +#else + load32 pa_tlb_lock, \la +#endif rsm PSW_SM_I,\flags 1: LDCW 0(\la),\tmp cmpib,<>,n 0,\tmp,3f diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 30f92391a93e..cad3e8661cd6 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -39,6 +39,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/cpu.h> #include <linux/module.h> #include <linux/personality.h> #include <linux/ptrace.h> @@ -184,6 +185,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r) } /* + * Idle thread support + * + * Detect when running on QEMU with SeaBIOS PDC Firmware and let + * QEMU idle the host too. + */ + +int running_on_qemu __read_mostly; + +void __cpuidle arch_cpu_idle_dead(void) +{ + /* nop on real hardware, qemu will offline CPU. */ + asm volatile("or %%r31,%%r31,%%r31\n":::); +} + +void __cpuidle arch_cpu_idle(void) +{ + local_irq_enable(); + + /* nop on real hardware, qemu will idle sleep. */ + asm volatile("or %%r10,%%r10,%%r10\n":::); +} + +static int __init parisc_idle_init(void) +{ + const char *marker; + + /* check QEMU/SeaBIOS marker in PAGE0 */ + marker = (char *) &PAGE0->pad0; + running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0); + + if (!running_on_qemu) + cpu_idle_poll_ctrl(1); + + return 0; +} +arch_initcall(parisc_idle_init); + +/* * Copy architecture-specific thread state */ int diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 13f7854e0d49..48f41399fc0b 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -631,11 +631,11 @@ void __init mem_init(void) mem_init_print_info(NULL); #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */ printk("virtual kernel memory layout:\n" - " vmalloc : 0x%p - 0x%p (%4ld MB)\n" - " memory : 0x%p - 0x%p (%4ld MB)\n" - " .init : 0x%p - 0x%p (%4ld kB)\n" - " .data : 0x%p - 0x%p (%4ld kB)\n" - " .text : 0x%p - 0x%p (%4ld kB)\n", + " vmalloc : 0x%px - 0x%px (%4ld MB)\n" + " memory : 0x%px - 0x%px (%4ld MB)\n" + " .init : 0x%px - 0x%px (%4ld kB)\n" + " .data : 0x%px - 0x%px (%4ld kB)\n" + " .text : 0x%px - 0x%px (%4ld kB)\n", (void*)VMALLOC_START, (void*)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 6177d43f0ce8..e2a2b8400490 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h @@ -160,9 +160,10 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, #endif } -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } #ifndef CONFIG_PPC_BOOK3S_64 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 5acb5a176dbe..72be0c32e902 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1403,7 +1403,7 @@ void show_regs(struct pt_regs * regs) printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); - printk("REGS: %p TRAP: %04lx %s (%s)\n", + printk("REGS: %px TRAP: %04lx %s (%s)\n", regs, regs->trap, print_tainted(), init_utsname()->release); printk("MSR: "REG" ", regs->msr); print_msr_bits(regs->msr); diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index bf457843e032..0d750d274c4e 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -725,7 +725,8 @@ u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) /* Return the per-cpu state for state saving/migration */ return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | - (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT; + (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | + (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; } int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) @@ -1558,7 +1559,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) /* * Restore P and Q. If the interrupt was pending, we - * force both P and Q, which will trigger a resend. + * force Q and !P, which will trigger a resend. * * That means that a guest that had both an interrupt * pending (queued) and Q set will restore with only @@ -1566,7 +1567,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) * is perfectly fine as coalescing interrupts that haven't * been presented yet is always allowed. */ - if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING) + if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) state->old_p = true; if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) state->old_q = true; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 4797d08581ce..6e1e39035380 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address) return __bad_area(regs, address, SEGV_MAPERR); } +static noinline int bad_access(struct pt_regs *regs, unsigned long address) +{ + return __bad_area(regs, address, SEGV_ACCERR); +} + static int do_sigbus(struct pt_regs *regs, unsigned long address, unsigned int fault) { @@ -490,7 +495,7 @@ retry: good_area: if (unlikely(access_error(is_write, is_exec, vma))) - return bad_area(regs, address); + return bad_access(regs, address); /* * If for any reason at all we couldn't handle the fault, diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 153812966365..fce545774d50 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -410,8 +410,12 @@ static __u64 power_pmu_bhrb_to(u64 addr) int ret; __u64 target; - if (is_kernel_addr(addr)) - return branch_target((unsigned int *)addr); + if (is_kernel_addr(addr)) { + if (probe_kernel_read(&instr, (void *)addr, sizeof(instr))) + return 0; + + return branch_target(&instr); + } /* Userspace: need copy instruction here then translate it */ pagefault_disable(); diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 0ead3cd73caa..be4e7f84f70a 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -310,6 +310,19 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu) return 0; /* + * Check whether nest_imc is registered. We could end up here if the + * cpuhotplug callback registration fails. i.e, callback invokes the + * offline path for all successfully registered nodes. At this stage, + * nest_imc pmu will not be registered and we should return here. + * + * We return with a zero since this is not an offline failure. And + * cpuhp_setup_state() returns the actual failure reason to the caller, + * which in turn will call the cleanup routine. + */ + if (!nest_pmus) + return 0; + + /* * Now that this cpu is one of the designated, * find a next cpu a) which is online and b) in same chip. */ @@ -1171,6 +1184,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) if (nest_pmus == 1) { cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE); kfree(nest_imc_refc); + kfree(per_nest_pmu_arr); } if (nest_pmus > 0) @@ -1195,7 +1209,6 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); kfree(pmu_ptr); - kfree(per_nest_pmu_arr); return; } @@ -1309,6 +1322,8 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id ret = nest_pmu_cpumask_init(); if (ret) { mutex_unlock(&nest_init_lock); + kfree(nest_imc_refc); + kfree(per_nest_pmu_arr); goto err_free; } } diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 44cbf4c12ea1..df95102e732c 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -354,6 +354,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) } static struct lock_class_key fsl_msi_irq_class; +static struct lock_class_key fsl_msi_irq_request_class; static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, int offset, int irq_index) @@ -373,7 +374,8 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev, dev_err(&dev->dev, "No memory for MSI cascade data\n"); return -ENOMEM; } - irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class); + irq_set_lockdep_class(virt_msir, &fsl_msi_irq_class, + &fsl_msi_irq_request_class); cascade_data->index = offset; cascade_data->msi_data = msi; cascade_data->virq = virt_msir; diff --git a/arch/s390/include/asm/diag.h b/arch/s390/include/asm/diag.h index 6db78567294c..cdbaad50c7c7 100644 --- a/arch/s390/include/asm/diag.h +++ b/arch/s390/include/asm/diag.h @@ -229,13 +229,55 @@ struct diag204_x_phys_block { } __packed; enum diag26c_sc { + DIAG26C_PORT_VNIC = 0x00000024, DIAG26C_MAC_SERVICES = 0x00000030 }; enum diag26c_version { - DIAG26C_VERSION2 = 0x00000002 /* z/VM 5.4.0 */ + DIAG26C_VERSION2 = 0x00000002, /* z/VM 5.4.0 */ + DIAG26C_VERSION6_VM65918 = 0x00020006 /* z/VM 6.4.0 + VM65918 */ }; +#define DIAG26C_VNIC_INFO 0x0002 +struct diag26c_vnic_req { + u32 resp_buf_len; + u32 resp_version; + u16 req_format; + u16 vlan_id; + u64 sys_name; + u8 res[2]; + u16 devno; +} __packed __aligned(8); + +#define VNIC_INFO_PROT_L3 1 +#define VNIC_INFO_PROT_L2 2 +/* Note: this is the bare minimum, use it for uninitialized VNICs only. */ +struct diag26c_vnic_resp { + u32 version; + u32 entry_cnt; + /* VNIC info: */ + u32 next_entry; + u64 owner; + u16 devno; + u8 status; + u8 type; + u64 lan_owner; + u64 lan_name; + u64 port_name; + u8 port_type; + u8 ext_status:6; + u8 protocol:2; + u16 base_devno; + u32 port_num; + u32 ifindex; + u32 maxinfo; + u32 dev_count; + /* 3x device info: */ + u8 dev_info1[28]; + u8 dev_info2[28]; + u8 dev_info3[28]; +} __packed __aligned(8); + #define DIAG26C_GET_MAC 0x0000 struct diag26c_mac_req { u32 resp_buf_len; diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ec8b68e97d3c..2c93cbbcd15e 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -792,11 +792,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm) if (kvm->arch.use_cmma) { /* - * Get the last slot. They should be sorted by base_gfn, so the - * last slot is also the one at the end of the address space. - * We have verified above that at least one slot is present. + * Get the first slot. They are reverse sorted by base_gfn, so + * the first slot is also the one at the end of the address + * space. We have verified above that at least one slot is + * present. */ - ms = slots->memslots + slots->used_slots - 1; + ms = slots->memslots; /* round up so we only use full longs */ ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG); /* allocate enough bytes to store all the bits */ diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 572496c688cc..0714bfa56da0 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c @@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc) cbrlo[entries] = gfn << PAGE_SHIFT; } - if (orc) { + if (orc && gfn < ms->bitmap_size) { /* increment only if we are really flipping the bit to 1 */ if (!test_and_set_bit(gfn, ms->pgste_bitmap)) atomic64_inc(&ms->dirty_pages); diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index cae5a1e16cbd..c4f8039a35e8 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -89,11 +89,11 @@ EXPORT_SYMBOL(enable_sacf_uaccess); void disable_sacf_uaccess(mm_segment_t old_fs) { + current->thread.mm_segment = old_fs; if (old_fs == USER_DS && test_facility(27)) { __ctl_load(S390_lowcore.user_asce, 1, 1); clear_cpu_flag(CIF_ASCE_PRIMARY); } - current->thread.mm_segment = old_fs; } EXPORT_SYMBOL(disable_sacf_uaccess); diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index f7aa5a77827e..2d15d84c20ed 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -181,6 +181,9 @@ out_unlock: static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr, size_t size, int flags) { + unsigned long irqflags; + int ret; + /* * With zdev->tlb_refresh == 0, rpcit is not required to establish new * translations when previously invalid translation-table entries are @@ -196,8 +199,22 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr, return 0; } - return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr, - PAGE_ALIGN(size)); + ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr, + PAGE_ALIGN(size)); + if (ret == -ENOMEM && !s390_iommu_strict) { + /* enable the hypervisor to free some resources */ + if (zpci_refresh_global(zdev)) + goto out; + + spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags); + bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap, + zdev->lazy_bitmap, zdev->iommu_pages); + bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages); + spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags); + ret = 0; + } +out: + return ret; } static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 19bcb3b45a70..f069929e8211 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c @@ -89,6 +89,9 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range) if (cc) zpci_err_insn(cc, status, addr, range); + if (cc == 1 && (status == 4 || status == 16)) + return -ENOMEM; + return (cc) ? -EIO : 0; } diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c index 7291e2f11a47..4d6be53058d6 100644 --- a/arch/sh/boards/board-espt.c +++ b/arch/sh/boards/board-espt.c @@ -79,7 +79,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh7763_eth_pdata = { .phy = 0, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c index 0104c8199c48..1bde08dc067d 100644 --- a/arch/sh/boards/board-sh7757lcr.c +++ b/arch/sh/boards/board-sh7757lcr.c @@ -76,7 +76,6 @@ static struct resource sh_eth0_resources[] = { static struct sh_eth_plat_data sh7757_eth0_pdata = { .phy = 1, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; @@ -104,7 +103,6 @@ static struct resource sh_eth1_resources[] = { static struct sh_eth_plat_data sh7757_eth1_pdata = { .phy = 1, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .set_mdio_gate = sh7757_eth_set_mdio_gate, }; @@ -148,7 +146,6 @@ static struct resource sh_eth_giga0_resources[] = { static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { .phy = 18, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; @@ -182,7 +179,6 @@ static struct resource sh_eth_giga1_resources[] = { static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { .phy = 19, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, }; diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c index 1faf6cb93dcb..6f929abe0b50 100644 --- a/arch/sh/boards/mach-ecovec24/setup.c +++ b/arch/sh/boards/mach-ecovec24/setup.c @@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8700 */ - .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, .ether_link_active_low = 1 }; diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index f1fecd395679..255952555656 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -374,7 +374,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh_eth_plat = { .phy = 0x1f, /* SMSC LAN8187 */ - .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c index 2c8fb04685d4..6e62686b81b1 100644 --- a/arch/sh/boards/mach-sh7763rdp/setup.c +++ b/arch/sh/boards/mach-sh7763rdp/setup.c @@ -87,7 +87,6 @@ static struct resource sh_eth_resources[] = { static struct sh_eth_plat_data sh7763_eth_pdata = { .phy = 1, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c index 95796ad00fbe..d08db08dec38 100644 --- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -122,7 +122,6 @@ static struct platform_device scif2_device = { static struct sh_eth_plat_data eth_platform_data = { .phy = 1, - .edmac_endian = EDMAC_LITTLE_ENDIAN, .phy_interface = PHY_INTERFACE_MODE_MII, }; diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S index e5547b22cd18..0ddbbb031822 100644 --- a/arch/sparc/lib/hweight.S +++ b/arch/sparc/lib/hweight.S @@ -44,8 +44,8 @@ EXPORT_SYMBOL(__arch_hweight32) .previous ENTRY(__arch_hweight64) - sethi %hi(__sw_hweight16), %g1 - jmpl %g1 + %lo(__sw_hweight16), %g0 + sethi %hi(__sw_hweight64), %g1 + jmpl %g1 + %lo(__sw_hweight64), %g0 nop ENDPROC(__arch_hweight64) EXPORT_SYMBOL(__arch_hweight64) diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 22aff21fa44d..635fdefd4ae2 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1509,11 +1509,19 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = 0x91d02005; /* ta 5 */ } +struct sparc64_jit_data { + struct bpf_binary_header *header; + u8 *image; + struct jit_ctx ctx; +}; + struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { struct bpf_prog *tmp, *orig_prog = prog; + struct sparc64_jit_data *jit_data; struct bpf_binary_header *header; bool tmp_blinded = false; + bool extra_pass = false; struct jit_ctx ctx; u32 image_size; u8 *image_ptr; @@ -1533,13 +1541,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) prog = tmp; } + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + image_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + memset(&ctx, 0, sizeof(ctx)); ctx.prog = prog; ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); if (ctx.offset == NULL) { prog = orig_prog; - goto out; + goto out_off; } /* Fake pass to detect features used, and get an accurate assessment @@ -1562,7 +1588,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } ctx.image = (u32 *)image_ptr; - +skip_init_ctx: for (pass = 1; pass < 3; pass++) { ctx.idx = 0; @@ -1593,14 +1619,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE)); - bpf_jit_binary_lock_ro(header); + if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } prog->bpf_func = (void *)ctx.image; prog->jited = 1; prog->jited_len = image_size; + if (!prog->is_func || extra_pass) { out_off: - kfree(ctx.offset); + kfree(ctx.offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } out: if (tmp_blinded) bpf_jit_prog_release_other(prog, prog == orig_prog ? diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index b668e351fd6c..fca34b2177e2 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -15,9 +15,10 @@ extern void uml_setup_stubs(struct mm_struct *mm); /* * Needed since we do not use the asm-generic/mm_hooks.h: */ -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { uml_setup_stubs(mm); + return 0; } extern void arch_exit_mmap(struct mm_struct *mm); static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/unicore32/include/asm/mmu_context.h b/arch/unicore32/include/asm/mmu_context.h index 59b06b48f27d..5c205a9cb5a6 100644 --- a/arch/unicore32/include/asm/mmu_context.h +++ b/arch/unicore32/include/asm/mmu_context.h @@ -81,9 +81,10 @@ do { \ } \ } while (0) -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_unmap(struct mm_struct *mm, diff --git a/arch/unicore32/kernel/traps.c b/arch/unicore32/kernel/traps.c index 5f25b39f04d4..c4ac6043ebb0 100644 --- a/arch/unicore32/kernel/traps.c +++ b/arch/unicore32/kernel/traps.c @@ -298,7 +298,6 @@ void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } -EXPORT_SYMBOL(abort); void __init trap_init(void) { diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 04d66e6fa447..45dc6233f2b9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -927,7 +927,8 @@ config MAXSMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK + range 2 64 if SMP && X86_32 && X86_BIGSMP + range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 default "1" if !SMP default "8192" if MAXSMP diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c index d5364ca2e3f9..b5e5e02f8cde 100644 --- a/arch/x86/boot/compressed/pagetable.c +++ b/arch/x86/boot/compressed/pagetable.c @@ -23,6 +23,9 @@ */ #undef CONFIG_AMD_MEM_ENCRYPT +/* No PAGE_TABLE_ISOLATION support needed either: */ +#undef CONFIG_PAGE_TABLE_ISOLATION + #include "misc.h" /* These actually do the work of building the kernel identity maps. */ diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh index c9e8499fbfe7..6a10d52a4145 100644 --- a/arch/x86/boot/genimage.sh +++ b/arch/x86/boot/genimage.sh @@ -80,39 +80,43 @@ genfdimage288() { mcopy $FBZIMAGE w:linux } -genisoimage() { +geniso() { tmp_dir=`dirname $FIMAGE`/isoimage rm -rf $tmp_dir mkdir $tmp_dir - for i in lib lib64 share end ; do + for i in lib lib64 share ; do for j in syslinux ISOLINUX ; do if [ -f /usr/$i/$j/isolinux.bin ] ; then isolinux=/usr/$i/$j/isolinux.bin - cp $isolinux $tmp_dir fi done for j in syslinux syslinux/modules/bios ; do if [ -f /usr/$i/$j/ldlinux.c32 ]; then ldlinux=/usr/$i/$j/ldlinux.c32 - cp $ldlinux $tmp_dir fi done if [ -n "$isolinux" -a -n "$ldlinux" ] ; then break fi - if [ $i = end -a -z "$isolinux" ] ; then - echo 'Need an isolinux.bin file, please install syslinux/isolinux.' - exit 1 - fi done + if [ -z "$isolinux" ] ; then + echo 'Need an isolinux.bin file, please install syslinux/isolinux.' + exit 1 + fi + if [ -z "$ldlinux" ] ; then + echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.' + exit 1 + fi + cp $isolinux $tmp_dir + cp $ldlinux $tmp_dir cp $FBZIMAGE $tmp_dir/linux echo "$KCMDLINE" > $tmp_dir/isolinux.cfg if [ -f "$FDINITRD" ] ; then cp "$FDINITRD" $tmp_dir/initrd.img fi - mkisofs -J -r -input-charset=utf-8 -quiet -o $FIMAGE -b isolinux.bin \ - -c boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table \ - $tmp_dir + genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \ + -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \ + -boot-info-table $tmp_dir isohybrid $FIMAGE 2>/dev/null || true rm -rf $tmp_dir } @@ -121,6 +125,6 @@ case $1 in bzdisk) genbzdisk;; fdimage144) genfdimage144;; fdimage288) genfdimage288;; - isoimage) genisoimage;; + isoimage) geniso;; *) echo 'Unknown image format'; exit 1; esac diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3fd8bc560fae..45a63e00a6af 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -1,6 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/jump_label.h> #include <asm/unwind_hints.h> +#include <asm/cpufeatures.h> +#include <asm/page_types.h> +#include <asm/percpu.h> +#include <asm/asm-offsets.h> +#include <asm/processor-flags.h> /* @@ -187,6 +192,146 @@ For 32-bit we have the following conventions - kernel is built with #endif .endm +#ifdef CONFIG_PAGE_TABLE_ISOLATION + +/* + * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two + * halves: + */ +#define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT) +#define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT)) + +.macro SET_NOFLUSH_BIT reg:req + bts $X86_CR3_PCID_NOFLUSH_BIT, \reg +.endm + +.macro ADJUST_KERNEL_CR3 reg:req + ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID + /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ + andq $(~PTI_SWITCH_MASK), \reg +.endm + +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + mov %cr3, \scratch_reg + ADJUST_KERNEL_CR3 \scratch_reg + mov \scratch_reg, %cr3 +.Lend_\@: +.endm + +#define THIS_CPU_user_pcid_flush_mask \ + PER_CPU_VAR(cpu_tlbstate) + TLB_STATE_user_pcid_flush_mask + +.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + mov %cr3, \scratch_reg + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * Test if the ASID needs a flush. + */ + movq \scratch_reg, \scratch_reg2 + andq $(0x7FF), \scratch_reg /* mask ASID */ + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + /* Flush needed, clear the bit */ + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + movq \scratch_reg2, \scratch_reg + jmp .Lwrcr3_\@ + +.Lnoflush_\@: + movq \scratch_reg2, \scratch_reg + SET_NOFLUSH_BIT \scratch_reg + +.Lwrcr3_\@: + /* Flip the PGD and ASID to the user version */ + orq $(PTI_SWITCH_MASK), \scratch_reg + mov \scratch_reg, %cr3 +.Lend_\@: +.endm + +.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req + pushq %rax + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=\scratch_reg scratch_reg2=%rax + popq %rax +.endm + +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI + movq %cr3, \scratch_reg + movq \scratch_reg, \save_reg + /* + * Is the "switch mask" all zero? That means that both of + * these are zero: + * + * 1. The user/kernel PCID bit, and + * 2. The user/kernel "bit" that points CR3 to the + * bottom half of the 8k PGD + * + * That indicates a kernel CR3 value, not a user CR3. + */ + testq $(PTI_SWITCH_MASK), \scratch_reg + jz .Ldone_\@ + + ADJUST_KERNEL_CR3 \scratch_reg + movq \scratch_reg, %cr3 + +.Ldone_\@: +.endm + +.macro RESTORE_CR3 scratch_reg:req save_reg:req + ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI + + ALTERNATIVE "jmp .Lwrcr3_\@", "", X86_FEATURE_PCID + + /* + * KERNEL pages can always resume with NOFLUSH as we do + * explicit flushes. + */ + bt $X86_CR3_PTI_SWITCH_BIT, \save_reg + jnc .Lnoflush_\@ + + /* + * Check if there's a pending flush for the user ASID we're + * about to set. + */ + movq \save_reg, \scratch_reg + andq $(0x7FF), \scratch_reg + bt \scratch_reg, THIS_CPU_user_pcid_flush_mask + jnc .Lnoflush_\@ + + btr \scratch_reg, THIS_CPU_user_pcid_flush_mask + jmp .Lwrcr3_\@ + +.Lnoflush_\@: + SET_NOFLUSH_BIT \save_reg + +.Lwrcr3_\@: + /* + * The CR3 write could be avoided when not changing its value, + * but would require a CR3 read *and* a scratch register. + */ + movq \save_reg, %cr3 +.Lend_\@: +.endm + +#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */ + +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req +.endm +.macro SWITCH_TO_USER_CR3_NOSTACK scratch_reg:req scratch_reg2:req +.endm +.macro SWITCH_TO_USER_CR3_STACK scratch_reg:req +.endm +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req +.endm +.macro RESTORE_CR3 scratch_reg:req save_reg:req +.endm + +#endif + #endif /* CONFIG_X86_64 */ /* diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index bd8b57a5c874..ace8f321a5a1 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -942,9 +942,9 @@ ENTRY(debug) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Ldebug_from_sysenter_stack TRACE_IRQS_OFF @@ -986,9 +986,9 @@ ENTRY(nmi) /* Are we currently on the SYSENTER stack? */ movl PER_CPU_VAR(cpu_entry_area), %ecx - addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx - subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ - cmpl $SIZEOF_SYSENTER_stack, %ecx + addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx + subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ + cmpl $SIZEOF_entry_stack, %ecx jb .Lnmi_from_sysenter_stack /* Not on SYSENTER stack. */ diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 423885bee398..f048e384ff54 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -23,7 +23,6 @@ #include <asm/segment.h> #include <asm/cache.h> #include <asm/errno.h> -#include "calling.h" #include <asm/asm-offsets.h> #include <asm/msr.h> #include <asm/unistd.h> @@ -40,6 +39,8 @@ #include <asm/frame.h> #include <linux/err.h> +#include "calling.h" + .code64 .section .entry.text, "ax" @@ -158,8 +159,8 @@ END(native_usergs_sysret64) _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ -#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \ - SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA +#define RSP_SCRATCH CPU_ENTRY_AREA_entry_stack + \ + SIZEOF_entry_stack - 8 + CPU_ENTRY_AREA ENTRY(entry_SYSCALL_64_trampoline) UNWIND_HINT_EMPTY @@ -168,6 +169,9 @@ ENTRY(entry_SYSCALL_64_trampoline) /* Stash the user RSP. */ movq %rsp, RSP_SCRATCH + /* Note: using %rsp as a scratch reg. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + /* Load the top of the task stack into RSP */ movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp @@ -207,6 +211,10 @@ ENTRY(entry_SYSCALL_64) */ swapgs + /* + * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it + * is not required to switch CR3. + */ movq %rsp, PER_CPU_VAR(rsp_scratch) movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp @@ -403,6 +411,7 @@ syscall_return_via_sysret: * We are on the trampoline stack. All regs except RDI are live. * We can do future final exit work right here. */ + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi popq %rdi popq %rsp @@ -740,6 +749,8 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) * We can do future final exit work right here. */ + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi + /* Restore RDI. */ popq %rdi SWAPGS @@ -822,7 +833,9 @@ native_irq_return_ldt: */ pushq %rdi /* Stash user RDI */ - SWAPGS + SWAPGS /* to kernel GS */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ + movq PER_CPU_VAR(espfix_waddr), %rdi movq %rax, (0*8)(%rdi) /* user RAX */ movq (1*8)(%rsp), %rax /* user RIP */ @@ -838,7 +851,6 @@ native_irq_return_ldt: /* Now RAX == RSP. */ andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ - popq %rdi /* Restore user RDI */ /* * espfix_stack[31:16] == 0. The page tables are set up such that @@ -849,7 +861,11 @@ native_irq_return_ldt: * still points to an RO alias of the ESPFIX stack. */ orq PER_CPU_VAR(espfix_stack), %rax - SWAPGS + + SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi + SWAPGS /* to user GS */ + popq %rdi /* Restore user RDI */ + movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 @@ -949,6 +965,8 @@ ENTRY(switch_to_thread_stack) UNWIND_HINT_FUNC pushq %rdi + /* Need to switch before accessing the thread stack. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi movq %rsp, %rdi movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI @@ -1250,7 +1268,11 @@ ENTRY(paranoid_entry) js 1f /* negative -> in kernel */ SWAPGS xorl %ebx, %ebx -1: ret + +1: + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 + + ret END(paranoid_entry) /* @@ -1272,6 +1294,7 @@ ENTRY(paranoid_exit) testl %ebx, %ebx /* swapgs needed? */ jnz .Lparanoid_exit_no_swapgs TRACE_IRQS_IRETQ + RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 SWAPGS_UNSAFE_STACK jmp .Lparanoid_exit_restore .Lparanoid_exit_no_swapgs: @@ -1299,6 +1322,8 @@ ENTRY(error_entry) * from user mode due to an IRET fault. */ SWAPGS + /* We have user CR3. Change to kernel CR3. */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax .Lerror_entry_from_usermode_after_swapgs: /* Put us onto the real thread stack. */ @@ -1345,6 +1370,7 @@ ENTRY(error_entry) * .Lgs_change's error handler with kernel gsbase. */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax jmp .Lerror_entry_done .Lbstep_iret: @@ -1354,10 +1380,11 @@ ENTRY(error_entry) .Lerror_bad_iret: /* - * We came from an IRET to user mode, so we have user gsbase. - * Switch to kernel gsbase: + * We came from an IRET to user mode, so we have user + * gsbase and CR3. Switch to kernel gsbase and CR3: */ SWAPGS + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax /* * Pretend that the exception came from user mode: set up pt_regs @@ -1389,6 +1416,10 @@ END(error_exit) /* * Runs on exception stack. Xen PV does not go through this path at all, * so we can use real assembly here. + * + * Registers: + * %r14: Used to save/restore the CR3 of the interrupted context + * when PAGE_TABLE_ISOLATION is in use. Do not clobber. */ ENTRY(nmi) UNWIND_HINT_IRET_REGS @@ -1452,6 +1483,7 @@ ENTRY(nmi) swapgs cld + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 @@ -1704,6 +1736,8 @@ end_repeat_nmi: movq $-1, %rsi call do_nmi + RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 + testl %ebx, %ebx /* swapgs needed? */ jnz nmi_restore nmi_swapgs: diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 95ad40eb7eff..98d5358e4041 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -49,6 +49,10 @@ ENTRY(entry_SYSENTER_compat) /* Interrupts are off on entry. */ SWAPGS + + /* We are about to clobber %rsp anyway, clobbering here is OK */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* @@ -186,8 +190,13 @@ ENTRY(entry_SYSCALL_compat) /* Interrupts are off on entry. */ swapgs - /* Stash user ESP and switch to the kernel stack. */ + /* Stash user ESP */ movl %esp, %r8d + + /* Use %rsp as scratch reg. User ESP is stashed in r8 */ + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp + + /* Switch to the kernel stack */ movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp /* Construct struct pt_regs on stack */ @@ -256,10 +265,22 @@ sysret32_from_system_call: * when the system call started, which is already known to user * code. We zero R8-R10 to avoid info leaks. */ + movq RSP-ORIG_RAX(%rsp), %rsp + + /* + * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored + * on the process stack which is not mapped to userspace and + * not readable after we SWITCH_TO_USER_CR3. Delay the CR3 + * switch until after after the last reference to the process + * stack. + * + * %r8/%r9 are zeroed before the sysret, thus safe to clobber. + */ + SWITCH_TO_USER_CR3_NOSTACK scratch_reg=%r8 scratch_reg2=%r9 + xorq %r8, %r8 xorq %r9, %r9 xorq %r10, %r10 - movq RSP-ORIG_RAX(%rsp), %rsp swapgs sysretl END(entry_SYSCALL_compat) diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index f279ba2643dc..577fa8adb785 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c @@ -37,6 +37,7 @@ #include <asm/unistd.h> #include <asm/fixmap.h> #include <asm/traps.h> +#include <asm/paravirt.h> #define CREATE_TRACE_POINTS #include "vsyscall_trace.h" @@ -138,6 +139,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) WARN_ON_ONCE(address != regs->ip); + /* This should be unreachable in NATIVE mode. */ + if (WARN_ON(vsyscall_mode == NATIVE)) + return false; + if (vsyscall_mode == NONE) { warn_bad_vsyscall(KERN_INFO, regs, "vsyscall attempted with vsyscall=none"); @@ -329,16 +334,47 @@ int in_gate_area_no_mm(unsigned long addr) return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; } +/* + * The VSYSCALL page is the only user-accessible page in the kernel address + * range. Normally, the kernel page tables can have _PAGE_USER clear, but + * the tables covering VSYSCALL_ADDR need _PAGE_USER set if vsyscalls + * are enabled. + * + * Some day we may create a "minimal" vsyscall mode in which we emulate + * vsyscalls but leave the page not present. If so, we skip calling + * this. + */ +void __init set_vsyscall_pgtable_user_bits(pgd_t *root) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_pgd(root, VSYSCALL_ADDR); + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER)); + p4d = p4d_offset(pgd, VSYSCALL_ADDR); +#if CONFIG_PGTABLE_LEVELS >= 5 + p4d->p4d |= _PAGE_USER; +#endif + pud = pud_offset(p4d, VSYSCALL_ADDR); + set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER)); + pmd = pmd_offset(pud, VSYSCALL_ADDR); + set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_USER)); +} + void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); - if (vsyscall_mode != NONE) + if (vsyscall_mode != NONE) { __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); + set_vsyscall_pgtable_user_bits(swapper_pg_dir); + } BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 09c26a4f139c..731153a4681e 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3847,6 +3847,8 @@ static struct attribute *intel_pmu_attrs[] = { __init int intel_pmu_init(void) { + struct attribute **extra_attr = NULL; + struct attribute **to_free = NULL; union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; @@ -3854,7 +3856,6 @@ __init int intel_pmu_init(void) unsigned int unused; struct extra_reg *er; int version, i; - struct attribute **extra_attr = NULL; char *name; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { @@ -4294,6 +4295,7 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; extra_attr = merge_attr(extra_attr, skl_format_attr); + to_free = extra_attr; x86_pmu.cpu_events = get_hsw_events_attrs(); intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); @@ -4401,6 +4403,7 @@ __init int intel_pmu_init(void) pr_cont("full-width counters, "); } + kfree(to_free); return 0; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3674a4b6f8bd..8156e47da7ba 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -3,16 +3,19 @@ #include <linux/types.h> #include <linux/slab.h> +#include <asm/cpu_entry_area.h> #include <asm/perf_event.h> +#include <asm/tlbflush.h> #include <asm/insn.h> #include "../perf_event.h" +/* Waste a full page so it can be mapped into the cpu_entry_area */ +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + /* The size of a BTS record in bytes: */ #define BTS_RECORD_SIZE 24 -#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) #define PEBS_FIXUP_SIZE PAGE_SIZE /* @@ -279,17 +282,67 @@ void fini_debug_store_on_cpu(int cpu) static DEFINE_PER_CPU(void *, insn_buffer); -static int alloc_pebs_buffer(int cpu) +static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + unsigned long start = (unsigned long)cea; + phys_addr_t pa; + size_t msz = 0; + + pa = virt_to_phys(addr); + + preempt_disable(); + for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, pa, prot); + + /* + * This is a cross-CPU update of the cpu_entry_area, we must shoot down + * all TLB entries for it. + */ + flush_tlb_kernel_range(start, start + size); + preempt_enable(); +} + +static void ds_clear_cea(void *cea, size_t size) +{ + unsigned long start = (unsigned long)cea; + size_t msz = 0; + + preempt_disable(); + for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); + + flush_tlb_kernel_range(start, start + size); + preempt_enable(); +} + +static void *dsalloc_pages(size_t size, gfp_t flags, int cpu) +{ + unsigned int order = get_order(size); int node = cpu_to_node(cpu); - int max; - void *buffer, *ibuffer; + struct page *page; + + page = __alloc_pages_node(node, flags | __GFP_ZERO, order); + return page ? page_address(page) : NULL; +} + +static void dsfree_pages(const void *buffer, size_t size) +{ + if (buffer) + free_pages((unsigned long)buffer, get_order(size)); +} + +static int alloc_pebs_buffer(int cpu) +{ + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + size_t bsiz = x86_pmu.pebs_buffer_size; + int max, node = cpu_to_node(cpu); + void *buffer, *ibuffer, *cea; if (!x86_pmu.pebs) return 0; - buffer = kzalloc_node(x86_pmu.pebs_buffer_size, GFP_KERNEL, node); + buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu); if (unlikely(!buffer)) return -ENOMEM; @@ -300,25 +353,27 @@ static int alloc_pebs_buffer(int cpu) if (x86_pmu.intel_cap.pebs_format < 2) { ibuffer = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node); if (!ibuffer) { - kfree(buffer); + dsfree_pages(buffer, bsiz); return -ENOMEM; } per_cpu(insn_buffer, cpu) = ibuffer; } - - max = x86_pmu.pebs_buffer_size / x86_pmu.pebs_record_size; - - ds->pebs_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_pebs_vaddr = buffer; + /* Update the cpu entry area mapping */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds->pebs_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL); ds->pebs_index = ds->pebs_buffer_base; - ds->pebs_absolute_maximum = ds->pebs_buffer_base + - max * x86_pmu.pebs_record_size; - + max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size); + ds->pebs_absolute_maximum = ds->pebs_buffer_base + max; return 0; } static void release_pebs_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.pebs) return; @@ -326,73 +381,70 @@ static void release_pebs_buffer(int cpu) kfree(per_cpu(insn_buffer, cpu)); per_cpu(insn_buffer, cpu) = NULL; - kfree((void *)(unsigned long)ds->pebs_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer; + ds_clear_cea(cea, x86_pmu.pebs_buffer_size); ds->pebs_buffer_base = 0; + dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size); + hwev->ds_pebs_vaddr = NULL; } static int alloc_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - int node = cpu_to_node(cpu); - int max, thresh; - void *buffer; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *buffer, *cea; + int max; if (!x86_pmu.bts) return 0; - buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); + buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu); if (unlikely(!buffer)) { WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); return -ENOMEM; } - - max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; - thresh = max / 16; - - ds->bts_buffer_base = (u64)(unsigned long)buffer; + hwev->ds_bts_vaddr = buffer; + /* Update the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds->bts_buffer_base = (unsigned long) cea; + ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL); ds->bts_index = ds->bts_buffer_base; - ds->bts_absolute_maximum = ds->bts_buffer_base + - max * BTS_RECORD_SIZE; - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - - thresh * BTS_RECORD_SIZE; - + max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE); + ds->bts_absolute_maximum = ds->bts_buffer_base + max; + ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16); return 0; } static void release_bts_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; + struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu); + struct debug_store *ds = hwev->ds; + void *cea; if (!ds || !x86_pmu.bts) return; - kfree((void *)(unsigned long)ds->bts_buffer_base); + /* Clear the fixmap */ + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer; + ds_clear_cea(cea, BTS_BUFFER_SIZE); ds->bts_buffer_base = 0; + dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE); + hwev->ds_bts_vaddr = NULL; } static int alloc_ds_buffer(int cpu) { - int node = cpu_to_node(cpu); - struct debug_store *ds; - - ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); - if (unlikely(!ds)) - return -ENOMEM; + struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store; + memset(ds, 0, sizeof(*ds)); per_cpu(cpu_hw_events, cpu).ds = ds; - return 0; } static void release_ds_buffer(int cpu) { - struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; - - if (!ds) - return; - per_cpu(cpu_hw_events, cpu).ds = NULL; - kfree(ds); } void release_ds_buffers(void) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index f7aaadf9331f..8e4ea143ed96 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -14,6 +14,8 @@ #include <linux/perf_event.h> +#include <asm/intel_ds.h> + /* To enable MSR tracing please use the generic trace points. */ /* @@ -77,8 +79,6 @@ struct amd_nb { struct event_constraint event_constraints[X86_PMC_IDX_MAX]; }; -/* The maximal number of PEBS events: */ -#define MAX_PEBS_EVENTS 8 #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1) /* @@ -95,23 +95,6 @@ struct amd_nb { PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER) -/* - * A debug store configuration. - * - * We only support architectures that use 64bit fields. - */ -struct debug_store { - u64 bts_buffer_base; - u64 bts_index; - u64 bts_absolute_maximum; - u64 bts_interrupt_threshold; - u64 pebs_buffer_base; - u64 pebs_index; - u64 pebs_absolute_maximum; - u64 pebs_interrupt_threshold; - u64 pebs_event_reset[MAX_PEBS_EVENTS]; -}; - #define PEBS_REGS \ (PERF_REG_X86_AX | \ PERF_REG_X86_BX | \ @@ -216,6 +199,8 @@ struct cpu_hw_events { * Intel DebugStore bits */ struct debug_store *ds; + void *ds_pebs_vaddr; + void *ds_bts_vaddr; u64 pebs_enabled; int n_pebs; int n_large_pebs; diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index dbfd0854651f..cf5961ca8677 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -140,7 +140,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".popsection\n" \ ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \ - ".popsection" + ".popsection\n" #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\ OLDINSTR_2(oldinstr, 1, 2) \ @@ -151,7 +151,7 @@ static inline int alternatives_text_reserved(void *start, void *end) ".pushsection .altinstr_replacement, \"ax\"\n" \ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \ - ".popsection" + ".popsection\n" /* * Alternative instructions for different CPU types or capabilities. diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 219faaec51df..386a6900e206 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -136,6 +136,7 @@ #endif #ifndef __ASSEMBLY__ +#ifndef __BPF__ /* * This output constraint should be used for any inline asm which has a "call" * instruction. Otherwise the asm may be inserted before the frame pointer @@ -145,5 +146,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #endif +#endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h new file mode 100644 index 000000000000..4a7884b8dca5 --- /dev/null +++ b/arch/x86/include/asm/cpu_entry_area.h @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifndef _ASM_X86_CPU_ENTRY_AREA_H +#define _ASM_X86_CPU_ENTRY_AREA_H + +#include <linux/percpu-defs.h> +#include <asm/processor.h> +#include <asm/intel_ds.h> + +/* + * cpu_entry_area is a percpu region that contains things needed by the CPU + * and early entry/exit code. Real types aren't used for all fields here + * to avoid circular header dependencies. + * + * Every field is a virtual alias of some other allocated backing store. + * There is no direct allocation of a struct cpu_entry_area. + */ +struct cpu_entry_area { + char gdt[PAGE_SIZE]; + + /* + * The GDT is just below entry_stack and thus serves (on x86_64) as + * a a read-only guard page. + */ + struct entry_stack_page entry_stack_page; + + /* + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because + * we need task switches to work, and task switches write to the TSS. + */ + struct tss_struct tss; + + char entry_trampoline[PAGE_SIZE]; + +#ifdef CONFIG_X86_64 + /* + * Exception stacks used for IST entries. + * + * In the future, this should have a separate slot for each stack + * with guard pages between them. + */ + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; +#endif +#ifdef CONFIG_CPU_SUP_INTEL + /* + * Per CPU debug store for Intel performance monitoring. Wastes a + * full page at the moment. + */ + struct debug_store cpu_debug_store; + /* + * The actual PEBS/BTS buffers must be mapped to user space + * Reserve enough fixmap PTEs. + */ + struct debug_store_buffers cpu_debug_buffers; +#endif +}; + +#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area)) +#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS) + +DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); + +extern void setup_cpu_entry_areas(void); +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags); + +#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE +#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE) + +#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT) + +#define CPU_ENTRY_AREA_MAP_SIZE \ + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE) + +extern struct cpu_entry_area *get_cpu_entry_area(int cpu); + +static inline struct entry_stack *cpu_entry_stack(int cpu) +{ + return &get_cpu_entry_area(cpu)->entry_stack_page.stack; +} + +#endif diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 800104c8a3ed..21ac898df2d8 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -197,11 +197,12 @@ #define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */ #define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */ #define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */ +#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ - +#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ @@ -340,5 +341,6 @@ #define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */ #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ +#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index aab4fe9f49f8..13c5ee878a47 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -7,6 +7,7 @@ #include <asm/mmu.h> #include <asm/fixmap.h> #include <asm/irq_vectors.h> +#include <asm/cpu_entry_area.h> #include <linux/smp.h> #include <linux/percpu.h> @@ -20,6 +21,8 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; + /* Set the ACCESS bit so it can be mapped RO */ + desc->type |= 1; desc->s = 1; desc->dpl = 0x3; diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 14d6d5007314..b027633e7300 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -50,6 +50,12 @@ # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) #endif +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define DISABLE_PTI 0 +#else +# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -60,7 +66,7 @@ #define DISABLED_MASK4 (DISABLE_PCID) #define DISABLED_MASK5 0 #define DISABLED_MASK6 0 -#define DISABLED_MASK7 0 +#define DISABLED_MASK7 (DISABLE_PTI) #define DISABLED_MASK8 0 #define DISABLED_MASK9 (DISABLE_MPX) #define DISABLED_MASK10 0 diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h index 0211029076ea..6777480d8a42 100644 --- a/arch/x86/include/asm/espfix.h +++ b/arch/x86/include/asm/espfix.h @@ -2,7 +2,7 @@ #ifndef _ASM_X86_ESPFIX_H #define _ASM_X86_ESPFIX_H -#ifdef CONFIG_X86_64 +#ifdef CONFIG_X86_ESPFIX64 #include <asm/percpu.h> @@ -11,7 +11,8 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr); extern void init_espfix_bsp(void); extern void init_espfix_ap(int cpu); - -#endif /* CONFIG_X86_64 */ +#else +static inline void init_espfix_ap(int cpu) { } +#endif #endif /* _ASM_X86_ESPFIX_H */ diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 94fc4fa14127..64c4a30e0d39 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -45,46 +45,6 @@ extern unsigned long __FIXADDR_TOP; #endif /* - * cpu_entry_area is a percpu region in the fixmap that contains things - * needed by the CPU and early entry/exit code. Real types aren't used - * for all fields here to avoid circular header dependencies. - * - * Every field is a virtual alias of some other allocated backing store. - * There is no direct allocation of a struct cpu_entry_area. - */ -struct cpu_entry_area { - char gdt[PAGE_SIZE]; - - /* - * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as - * a a read-only guard page. - */ - struct SYSENTER_stack_page SYSENTER_stack_page; - - /* - * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because - * we need task switches to work, and task switches write to the TSS. - */ - struct tss_struct tss; - - char entry_trampoline[PAGE_SIZE]; - -#ifdef CONFIG_X86_64 - /* - * Exception stacks used for IST entries. - * - * In the future, this should have a separate slot for each stack - * with guard pages between them. - */ - char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; -#endif -}; - -#define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) - -extern void setup_cpu_entry_areas(void); - -/* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only @@ -123,7 +83,6 @@ enum fixed_addresses { FIX_IO_APIC_BASE_0, FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, #endif - FIX_RO_IDT, /* Virtual mapping for read-only IDT */ #ifdef CONFIG_X86_32 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, @@ -139,9 +98,6 @@ enum fixed_addresses { #ifdef CONFIG_X86_INTEL_MID FIX_LNW_VRTC, #endif - /* Fixmap entries to remap the GDTs, one per processor. */ - FIX_CPU_ENTRY_AREA_TOP, - FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, #ifdef CONFIG_ACPI_APEI_GHES /* Used for GHES mapping from assorted contexts */ @@ -182,7 +138,7 @@ enum fixed_addresses { extern void reserve_top_address(unsigned long reserve); #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) extern int fixmaps_set; @@ -230,30 +186,5 @@ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, void __early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags); -static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) -{ - BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); - - return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; -} - -#define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ - BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ - __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ - }) - -#define get_cpu_entry_area_index(cpu, field) \ - __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) - -static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) -{ - return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); -} - -static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) -{ - return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack; -} - #endif /* !__ASSEMBLY__ */ #endif /* _ASM_X86_FIXMAP_H */ diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h new file mode 100644 index 000000000000..62a9f4966b42 --- /dev/null +++ b/arch/x86/include/asm/intel_ds.h @@ -0,0 +1,36 @@ +#ifndef _ASM_INTEL_DS_H +#define _ASM_INTEL_DS_H + +#include <linux/percpu-defs.h> + +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4) +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4) + +/* The maximal number of PEBS events: */ +#define MAX_PEBS_EVENTS 8 + +/* + * A debug store configuration. + * + * We only support architectures that use 64bit fields. + */ +struct debug_store { + u64 bts_buffer_base; + u64 bts_index; + u64 bts_absolute_maximum; + u64 bts_interrupt_threshold; + u64 pebs_buffer_base; + u64 pebs_index; + u64 pebs_absolute_maximum; + u64 pebs_interrupt_threshold; + u64 pebs_event_reset[MAX_PEBS_EVENTS]; +} __aligned(PAGE_SIZE); + +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store); + +struct debug_store_buffers { + char bts_buffer[BTS_BUFFER_SIZE]; + char pebs_buffer[PEBS_BUFFER_SIZE]; +}; + +#endif diff --git a/arch/x86/include/asm/invpcid.h b/arch/x86/include/asm/invpcid.h new file mode 100644 index 000000000000..989cfa86de85 --- /dev/null +++ b/arch/x86/include/asm/invpcid.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_INVPCID +#define _ASM_X86_INVPCID + +static inline void __invpcid(unsigned long pcid, unsigned long addr, + unsigned long type) +{ + struct { u64 d[2]; } desc = { { pcid, addr } }; + + /* + * The memory clobber is because the whole point is to invalidate + * stale TLB entries and, especially if we're flushing global + * mappings, we don't want the compiler to reorder any subsequent + * memory accesses before the TLB flush. + * + * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and + * invpcid (%rcx), %rax in long mode. + */ + asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" + : : "m" (desc), "a" (type), "c" (&desc) : "memory"); +} + +#define INVPCID_TYPE_INDIV_ADDR 0 +#define INVPCID_TYPE_SINGLE_CTXT 1 +#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 +#define INVPCID_TYPE_ALL_NON_GLOBAL 3 + +/* Flush all mappings for a given pcid and addr, not including globals. */ +static inline void invpcid_flush_one(unsigned long pcid, + unsigned long addr) +{ + __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); +} + +/* Flush all mappings for a given PCID, not including globals. */ +static inline void invpcid_flush_single_context(unsigned long pcid) +{ + __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); +} + +/* Flush all mappings, including globals, for all PCIDs. */ +static inline void invpcid_flush_all(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); +} + +/* Flush all mappings for all PCIDs except globals. */ +static inline void invpcid_flush_all_nonglobals(void) +{ + __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); +} + +#endif /* _ASM_X86_INVPCID */ diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h index 139feef467f7..c066ffae222b 100644 --- a/arch/x86/include/asm/irqdomain.h +++ b/arch/x86/include/asm/irqdomain.h @@ -44,7 +44,7 @@ extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); extern int mp_irqdomain_activate(struct irq_domain *domain, - struct irq_data *irq_data, bool early); + struct irq_data *irq_data, bool reserve); extern void mp_irqdomain_deactivate(struct irq_domain *domain, struct irq_data *irq_data); extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 9ea26f167497..5ff3e8af2c20 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -3,6 +3,7 @@ #define _ASM_X86_MMU_H #include <linux/spinlock.h> +#include <linux/rwsem.h> #include <linux/mutex.h> #include <linux/atomic.h> @@ -27,7 +28,8 @@ typedef struct { atomic64_t tlb_gen; #ifdef CONFIG_MODIFY_LDT_SYSCALL - struct ldt_struct *ldt; + struct rw_semaphore ldt_usr_sem; + struct ldt_struct *ldt; #endif #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 6d16d15d09a0..c931b88982a0 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -50,22 +50,53 @@ struct ldt_struct { * call gates. On native, we could merge the ldt_struct and LDT * allocations, but it's not worth trying to optimize. */ - struct desc_struct *entries; - unsigned int nr_entries; + struct desc_struct *entries; + unsigned int nr_entries; + + /* + * If PTI is in use, then the entries array is not mapped while we're + * in user mode. The whole array will be aliased at the addressed + * given by ldt_slot_va(slot). We use two slots so that we can allocate + * and map, and enable a new LDT without invalidating the mapping + * of an older, still-in-use LDT. + * + * slot will be -1 if this LDT doesn't have an alias mapping. + */ + int slot; }; +/* This is a multiple of PAGE_SIZE. */ +#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE) + +static inline void *ldt_slot_va(int slot) +{ +#ifdef CONFIG_X86_64 + return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot); +#else + BUG(); +#endif +} + /* * Used for LDT copy/destruction. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm); +static inline void init_new_context_ldt(struct mm_struct *mm) +{ + mm->context.ldt = NULL; + init_rwsem(&mm->context.ldt_usr_sem); +} +int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); void destroy_context_ldt(struct mm_struct *mm); +void ldt_arch_exit_mmap(struct mm_struct *mm); #else /* CONFIG_MODIFY_LDT_SYSCALL */ -static inline int init_new_context_ldt(struct task_struct *tsk, - struct mm_struct *mm) +static inline void init_new_context_ldt(struct mm_struct *mm) { } +static inline int ldt_dup_context(struct mm_struct *oldmm, + struct mm_struct *mm) { return 0; } -static inline void destroy_context_ldt(struct mm_struct *mm) {} +static inline void destroy_context_ldt(struct mm_struct *mm) { } +static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } #endif static inline void load_mm_ldt(struct mm_struct *mm) @@ -90,10 +121,31 @@ static inline void load_mm_ldt(struct mm_struct *mm) * that we can see. */ - if (unlikely(ldt)) - set_ldt(ldt->entries, ldt->nr_entries); - else + if (unlikely(ldt)) { + if (static_cpu_has(X86_FEATURE_PTI)) { + if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) { + /* + * Whoops -- either the new LDT isn't mapped + * (if slot == -1) or is mapped into a bogus + * slot (if slot > 1). + */ + clear_LDT(); + return; + } + + /* + * If page table isolation is enabled, ldt->entries + * will not be mapped in the userspace pagetables. + * Tell the CPU to access the LDT through the alias + * at ldt_slot_va(ldt->slot). + */ + set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries); + } else { + set_ldt(ldt->entries, ldt->nr_entries); + } + } else { clear_LDT(); + } #else clear_LDT(); #endif @@ -132,18 +184,21 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { + mutex_init(&mm->context.lock); + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); atomic64_set(&mm->context.tlb_gen, 0); - #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { /* pkey 0 is the default and always allocated */ mm->context.pkey_allocation_map = 0x1; /* -1 means unallocated or invalid */ mm->context.execute_only_pkey = -1; } - #endif - return init_new_context_ldt(tsk, mm); +#endif + init_new_context_ldt(mm); + return 0; } static inline void destroy_context(struct mm_struct *mm) { @@ -176,15 +231,16 @@ do { \ } while (0) #endif -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { paravirt_arch_dup_mmap(oldmm, mm); + return ldt_dup_context(oldmm, mm); } static inline void arch_exit_mmap(struct mm_struct *mm) { paravirt_arch_exit_mmap(mm); + ldt_arch_exit_mmap(mm); } #ifdef CONFIG_X86_64 @@ -282,33 +338,6 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, } /* - * If PCID is on, ASID-aware code paths put the ASID+1 into the PCID - * bits. This serves two purposes. It prevents a nasty situation in - * which PCID-unaware code saves CR3, loads some other value (with PCID - * == 0), and then restores CR3, thus corrupting the TLB for ASID 0 if - * the saved ASID was nonzero. It also means that any bugs involving - * loading a PCID-enabled CR3 with CR4.PCIDE off will trigger - * deterministically. - */ - -static inline unsigned long build_cr3(struct mm_struct *mm, u16 asid) -{ - if (static_cpu_has(X86_FEATURE_PCID)) { - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1); - } else { - VM_WARN_ON_ONCE(asid != 0); - return __sme_pa(mm->pgd); - } -} - -static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) -{ - VM_WARN_ON_ONCE(asid > 4094); - return __sme_pa(mm->pgd) | (asid + 1) | CR3_NOFLUSH; -} - -/* * This can be used from process context to figure out what the value of * CR3 is without needing to do a (slow) __read_cr3(). * @@ -317,7 +346,7 @@ static inline unsigned long build_cr3_noflush(struct mm_struct *mm, u16 asid) */ static inline unsigned long __get_current_cr3_fast(void) { - unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm), + unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, this_cpu_read(cpu_tlbstate.loaded_mm_asid)); /* For now, be very restrictive about when this can be called. */ diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index 4b5e1eafada7..aff42e1da6ee 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -30,6 +30,17 @@ static inline void paravirt_release_p4d(unsigned long pfn) {} */ extern gfp_t __userpte_alloc_gfp; +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Instead of one PGD, we acquire two PGDs. Being order-1, it is + * both 8k in size and 8k-aligned. That lets us just flip bit 12 + * in a pointer to swap between the two 4k halves. + */ +#define PGD_ALLOCATION_ORDER 1 +#else +#define PGD_ALLOCATION_ORDER 0 +#endif + /* * Allocate and free page tables. */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 95e2dfd75521..e42b8943cb1a 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -28,6 +28,7 @@ extern pgd_t early_top_pgt[PTRS_PER_PGD]; int __init __early_make_pgtable(unsigned long address, pmdval_t pmd); void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user); void ptdump_walk_pgd_level_checkwx(void); #ifdef CONFIG_DEBUG_WX @@ -841,7 +842,12 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) static inline int p4d_bad(p4d_t p4d) { - return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; + unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (p4d_flags(p4d) & ~ignore_flags) != 0; } #endif /* CONFIG_PGTABLE_LEVELS > 3 */ @@ -875,7 +881,12 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) static inline int pgd_bad(pgd_t pgd) { - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; + unsigned long ignore_flags = _PAGE_USER; + + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + ignore_flags |= _PAGE_NX; + + return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE; } static inline int pgd_none(pgd_t pgd) @@ -904,7 +915,11 @@ static inline int pgd_none(pgd_t pgd) * pgd_offset() returns a (pgd_t *) * pgd_index() is used get the offset into the pgd page's array of pgd_t's; */ -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) +#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address))) +/* + * a shortcut to get a pgd_t in a given mm + */ +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) /* * a shortcut which implies the use of the kernel's pgd, instead * of a process's @@ -1106,7 +1121,14 @@ static inline int pud_write(pud_t pud) */ static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) { - memcpy(dst, src, count * sizeof(pgd_t)); + memcpy(dst, src, count * sizeof(pgd_t)); +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + /* Clone the user space pgd as well */ + memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src), + count * sizeof(pgd_t)); +#endif } #define PTE_SHIFT ilog2(PTRS_PER_PTE) diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index f2ca9b28fd68..ce245b0cdfca 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -38,13 +38,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ #define LAST_PKMAP 1024 #endif -#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ - & PMD_MASK) +/* + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c + * to avoid include recursion hell + */ +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40) + +#define CPU_ENTRY_AREA_BASE \ + ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK) + +#define PKMAP_BASE \ + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK) #ifdef CONFIG_HIGHMEM # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE) #else -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) +# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE) #endif #define MODULES_VADDR VMALLOC_START diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index e9f05331e732..81462e9a34f6 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -131,9 +131,97 @@ static inline pud_t native_pudp_get_and_clear(pud_t *xp) #endif } +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages + * (8k-aligned and 8k in size). The kernel one is at the beginning 4k and + * the user one is in the last 4k. To switch between them, you + * just need to flip the 12th bit in their addresses. + */ +#define PTI_PGTABLE_SWITCH_BIT PAGE_SHIFT + +/* + * This generates better code than the inline assembly in + * __set_bit(). + */ +static inline void *ptr_set_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr |= BIT(bit); + return (void *)__ptr; +} +static inline void *ptr_clear_bit(void *ptr, int bit) +{ + unsigned long __ptr = (unsigned long)ptr; + + __ptr &= ~BIT(bit); + return (void *)__ptr; +} + +static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp) +{ + return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp) +{ + return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp) +{ + return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} + +static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp) +{ + return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT); +} +#endif /* CONFIG_PAGE_TABLE_ISOLATION */ + +/* + * Page table pages are page-aligned. The lower half of the top + * level is used for userspace and the top half for the kernel. + * + * Returns true for parts of the PGD that map userspace and + * false for the parts that map the kernel. + */ +static inline bool pgdp_maps_userspace(void *__ptr) +{ + unsigned long ptr = (unsigned long)__ptr; + + return (ptr & ~PAGE_MASK) < (PAGE_SIZE / 2); +} + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd); + +/* + * Take a PGD location (pgdp) and a pgd value that needs to be set there. + * Populates the user and returns the resulting PGD that must be set in + * the kernel copy of the page tables. + */ +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return pgd; + return __pti_set_user_pgd(pgdp, pgd); +} +#else +static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + return pgd; +} +#endif + static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { +#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL) + p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd); +#else *p4dp = p4d; +#endif } static inline void native_p4d_clear(p4d_t *p4d) @@ -147,7 +235,11 @@ static inline void native_p4d_clear(p4d_t *p4d) static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { +#ifdef CONFIG_PAGE_TABLE_ISOLATION + *pgdp = pti_set_user_pgd(pgdp, pgd); +#else *pgdp = pgd; +#endif } static inline void native_pgd_clear(pgd_t *pgd) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6d5f45dcd4a1..6b8f73dcbc2c 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -75,33 +75,52 @@ typedef struct { pteval_t pte; } pte_t; #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) -/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +/* + * See Documentation/x86/x86_64/mm.txt for a description of the memory map. + * + * Be very careful vs. KASLR when changing anything here. The KASLR address + * range must not overlap with anything except the KASAN shadow area, which + * is correct as KASAN disables KASLR. + */ +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) + #ifdef CONFIG_X86_5LEVEL -#define VMALLOC_SIZE_TB _AC(16384, UL) -#define __VMALLOC_BASE _AC(0xff92000000000000, UL) -#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define VMALLOC_SIZE_TB _AC(12800, UL) +# define __VMALLOC_BASE _AC(0xffa0000000000000, UL) +# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) +# define LDT_PGD_ENTRY _AC(-112, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #else -#define VMALLOC_SIZE_TB _AC(32, UL) -#define __VMALLOC_BASE _AC(0xffffc90000000000, UL) -#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define VMALLOC_SIZE_TB _AC(32, UL) +# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) +# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) +# define LDT_PGD_ENTRY _AC(-3, UL) +# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif + #ifdef CONFIG_RANDOMIZE_MEMORY -#define VMALLOC_START vmalloc_base -#define VMEMMAP_START vmemmap_base +# define VMALLOC_START vmalloc_base +# define VMEMMAP_START vmemmap_base #else -#define VMALLOC_START __VMALLOC_BASE -#define VMEMMAP_START __VMEMMAP_BASE +# define VMALLOC_START __VMALLOC_BASE +# define VMEMMAP_START __VMEMMAP_BASE #endif /* CONFIG_RANDOMIZE_MEMORY */ -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) -#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) + +#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) + +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1) -#define MODULES_LEN (MODULES_END - MODULES_VADDR) -#define ESPFIX_PGD_ENTRY _AC(-2, UL) -#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) -#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) -#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) +#define MODULES_END _AC(0xffffffffff000000, UL) +#define MODULES_LEN (MODULES_END - MODULES_VADDR) + +#define ESPFIX_PGD_ENTRY _AC(-2, UL) +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT) + +#define CPU_ENTRY_AREA_PGD _AC(-4, UL) +#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT) + +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30)) +#define EFI_VA_END (-68 * (_AC(1, UL) << 30)) #define EARLY_DYNAMIC_PAGE_TABLES 64 diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index 43212a43ee69..6a60fea90b9d 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h @@ -38,6 +38,11 @@ #define CR3_ADDR_MASK __sme_clr(0x7FFFFFFFFFFFF000ull) #define CR3_PCID_MASK 0xFFFull #define CR3_NOFLUSH BIT_ULL(63) + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define X86_CR3_PTI_SWITCH_BIT 11 +#endif + #else /* * CR3_ADDR_MASK needs at least bits 31:5 set on PAE systems, and we save diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1f2434ee9f80..d3a67fba200a 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -337,12 +337,12 @@ struct x86_hw_tss { #define IO_BITMAP_OFFSET (offsetof(struct tss_struct, io_bitmap) - offsetof(struct tss_struct, x86_tss)) #define INVALID_IO_BITMAP_OFFSET 0x8000 -struct SYSENTER_stack { +struct entry_stack { unsigned long words[64]; }; -struct SYSENTER_stack_page { - struct SYSENTER_stack stack; +struct entry_stack_page { + struct entry_stack stack; } __aligned(PAGE_SIZE); struct tss_struct { @@ -852,13 +852,22 @@ static inline void spin_lock_prefetch(const void *x) #else /* - * User space process size. 47bits minus one guard page. The guard - * page is necessary on Intel CPUs: if a SYSCALL instruction is at - * the highest possible canonical userspace address, then that - * syscall will enter the kernel with a non-canonical return - * address, and SYSRET will explode dangerously. We avoid this - * particular problem by preventing anything from being mapped - * at the maximum canonical address. + * User space process size. This is the first address outside the user range. + * There are a few constraints that determine this: + * + * On Intel CPUs, if a SYSCALL instruction is at the highest canonical + * address, then that syscall will enter the kernel with a + * non-canonical return address, and SYSRET will explode dangerously. + * We avoid this particular problem by preventing anything executable + * from being mapped at the maximum canonical address. + * + * On AMD CPUs in the Ryzen family, there's a nasty bug in which the + * CPUs malfunction if they execute code from the highest canonical page. + * They'll speculate right off the end of the canonical space, and + * bad things happen. This is worked around in the same way as the + * Intel problem. + * + * With page table isolation enabled, we map the LDT in ... [stay tuned] */ #define TASK_SIZE_MAX ((1UL << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE) diff --git a/arch/x86/include/asm/pti.h b/arch/x86/include/asm/pti.h new file mode 100644 index 000000000000..0b5ef05b2d2d --- /dev/null +++ b/arch/x86/include/asm/pti.h @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _ASM_X86_PTI_H +#define _ASM_X86_PTI_H +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +extern void pti_init(void); +extern void pti_check_boottime_disable(void); +#else +static inline void pti_check_boottime_disable(void) { } +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_X86_PTI_H */ diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f8062bfd43a0..f73706878772 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -16,7 +16,7 @@ enum stack_type { STACK_TYPE_TASK, STACK_TYPE_IRQ, STACK_TYPE_SOFTIRQ, - STACK_TYPE_SYSENTER, + STACK_TYPE_ENTRY, STACK_TYPE_EXCEPTION, STACK_TYPE_EXCEPTION_LAST = STACK_TYPE_EXCEPTION + N_EXCEPTION_STACKS-1, }; @@ -29,7 +29,7 @@ struct stack_info { bool in_task_stack(unsigned long *stack, struct task_struct *task, struct stack_info *info); -bool in_sysenter_stack(unsigned long *stack, struct stack_info *info); +bool in_entry_stack(unsigned long *stack, struct stack_info *info); int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask); diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 9b6df68d8fd1..eb5f7999a893 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -16,8 +16,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, struct tss_struct *tss); /* This runs runs on the previous thread's stack. */ -static inline void prepare_switch_to(struct task_struct *prev, - struct task_struct *next) +static inline void prepare_switch_to(struct task_struct *next) { #ifdef CONFIG_VMAP_STACK /* @@ -70,7 +69,7 @@ struct fork_frame { #define switch_to(prev, next, last) \ do { \ - prepare_switch_to(prev, next); \ + prepare_switch_to(next); \ \ ((last) = __switch_to_asm((prev), (next))); \ } while (0) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 877b5c1a1b12..4a08dd2ab32a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -9,70 +9,130 @@ #include <asm/cpufeature.h> #include <asm/special_insns.h> #include <asm/smp.h> +#include <asm/invpcid.h> +#include <asm/pti.h> +#include <asm/processor-flags.h> -static inline void __invpcid(unsigned long pcid, unsigned long addr, - unsigned long type) -{ - struct { u64 d[2]; } desc = { { pcid, addr } }; +/* + * The x86 feature is called PCID (Process Context IDentifier). It is similar + * to what is traditionally called ASID on the RISC processors. + * + * We don't use the traditional ASID implementation, where each process/mm gets + * its own ASID and flush/restart when we run out of ASID space. + * + * Instead we have a small per-cpu array of ASIDs and cache the last few mm's + * that came by on this CPU, allowing cheaper switch_mm between processes on + * this CPU. + * + * We end up with different spaces for different things. To avoid confusion we + * use different names for each of them: + * + * ASID - [0, TLB_NR_DYN_ASIDS-1] + * the canonical identifier for an mm + * + * kPCID - [1, TLB_NR_DYN_ASIDS] + * the value we write into the PCID part of CR3; corresponds to the + * ASID+1, because PCID 0 is special. + * + * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] + * for KPTI each mm has two address spaces and thus needs two + * PCID values, but we can still do with a single ASID denomination + * for each mm. Corresponds to kPCID + 2048. + * + */ - /* - * The memory clobber is because the whole point is to invalidate - * stale TLB entries and, especially if we're flushing global - * mappings, we don't want the compiler to reorder any subsequent - * memory accesses before the TLB flush. - * - * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and - * invpcid (%rcx), %rax in long mode. - */ - asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" - : : "m" (desc), "a" (type), "c" (&desc) : "memory"); -} +/* There are 12 bits of space for ASIDS in CR3 */ +#define CR3_HW_ASID_BITS 12 -#define INVPCID_TYPE_INDIV_ADDR 0 -#define INVPCID_TYPE_SINGLE_CTXT 1 -#define INVPCID_TYPE_ALL_INCL_GLOBAL 2 -#define INVPCID_TYPE_ALL_NON_GLOBAL 3 +/* + * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for + * user/kernel switches + */ +#ifdef CONFIG_PAGE_TABLE_ISOLATION +# define PTI_CONSUMED_PCID_BITS 1 +#else +# define PTI_CONSUMED_PCID_BITS 0 +#endif -/* Flush all mappings for a given pcid and addr, not including globals. */ -static inline void invpcid_flush_one(unsigned long pcid, - unsigned long addr) -{ - __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); -} +#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) + +/* + * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account + * for them being zero-based. Another -1 is because PCID 0 is reserved for + * use by non-PCID-aware users. + */ +#define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) + +/* + * 6 because 6 should be plenty and struct tlb_state will fit in two cache + * lines. + */ +#define TLB_NR_DYN_ASIDS 6 -/* Flush all mappings for a given PCID, not including globals. */ -static inline void invpcid_flush_single_context(unsigned long pcid) +/* + * Given @asid, compute kPCID + */ +static inline u16 kern_pcid(u16 asid) { - __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + /* + * Make sure that the dynamic ASID space does not confict with the + * bit we are using to switch between user and kernel ASIDs. + */ + BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT)); + + /* + * The ASID being passed in here should have respected the + * MAX_ASID_AVAILABLE and thus never have the switch bit set. + */ + VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); +#endif + /* + * The dynamically-assigned ASIDs that get passed in are small + * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, + * so do not bother to clear it. + * + * If PCID is on, ASID-aware code paths put the ASID+1 into the + * PCID bits. This serves two purposes. It prevents a nasty + * situation in which PCID-unaware code saves CR3, loads some other + * value (with PCID == 0), and then restores CR3, thus corrupting + * the TLB for ASID 0 if the saved ASID was nonzero. It also means + * that any bugs involving loading a PCID-enabled CR3 with + * CR4.PCIDE off will trigger deterministically. + */ + return asid + 1; } -/* Flush all mappings, including globals, for all PCIDs. */ -static inline void invpcid_flush_all(void) +/* + * Given @asid, compute uPCID + */ +static inline u16 user_pcid(u16 asid) { - __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); + u16 ret = kern_pcid(asid); +#ifdef CONFIG_PAGE_TABLE_ISOLATION + ret |= 1 << X86_CR3_PTI_SWITCH_BIT; +#endif + return ret; } -/* Flush all mappings for all PCIDs except globals. */ -static inline void invpcid_flush_all_nonglobals(void) +struct pgd_t; +static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) { - __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); + if (static_cpu_has(X86_FEATURE_PCID)) { + return __sme_pa(pgd) | kern_pcid(asid); + } else { + VM_WARN_ON_ONCE(asid != 0); + return __sme_pa(pgd); + } } -static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) +static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) { - u64 new_tlb_gen; - - /* - * Bump the generation count. This also serves as a full barrier - * that synchronizes with switch_mm(): callers are required to order - * their read of mm_cpumask after their writes to the paging - * structures. - */ - smp_mb__before_atomic(); - new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); - smp_mb__after_atomic(); - - return new_tlb_gen; + VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); + VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); + return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; } #ifdef CONFIG_PARAVIRT @@ -99,12 +159,6 @@ static inline bool tlb_defer_switch_to_init_mm(void) return !static_cpu_has(X86_FEATURE_PCID); } -/* - * 6 because 6 should be plenty and struct tlb_state will fit in - * two cache lines. - */ -#define TLB_NR_DYN_ASIDS 6 - struct tlb_context { u64 ctx_id; u64 tlb_gen; @@ -139,6 +193,24 @@ struct tlb_state { bool is_lazy; /* + * If set we changed the page tables in such a way that we + * needed an invalidation of all contexts (aka. PCIDs / ASIDs). + * This tells us to go invalidate all the non-loaded ctxs[] + * on the next context switch. + * + * The current ctx was kept up-to-date as it ran and does not + * need to be invalidated. + */ + bool invalidate_other; + + /* + * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate + * the corresponding user PCID needs a flush next time we + * switch to it; see SWITCH_TO_USER_CR3. + */ + unsigned short user_pcid_flush_mask; + + /* * Access to this CR4 shadow and to H/W CR4 is protected by * disabling interrupts when modifying either one. */ @@ -219,6 +291,14 @@ static inline unsigned long cr4_read_shadow(void) } /* + * Mark all other ASIDs as invalid, preserves the current. + */ +static inline void invalidate_other_asid(void) +{ + this_cpu_write(cpu_tlbstate.invalidate_other, true); +} + +/* * Save some of cr4 feature set we're using (e.g. Pentium 4MB * enable and PPro Global page enable), so that any CPU's that boot * up after us can get the correct flags. This should only be used @@ -237,37 +317,63 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask) extern void initialize_tlbstate_and_flush(void); -static inline void __native_flush_tlb(void) +/* + * Given an ASID, flush the corresponding user ASID. We can delay this + * until the next time we switch to it. + * + * See SWITCH_TO_USER_CR3. + */ +static inline void invalidate_user_asid(u16 asid) { + /* There is no user ASID if address space separation is off */ + if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) + return; + /* - * If current->mm == NULL then we borrow a mm which may change during a - * task switch and therefore we must not be preempted while we write CR3 - * back: + * We only have a single ASID if PCID is off and the CR3 + * write will have flushed it. */ - preempt_disable(); - native_write_cr3(__native_read_cr3()); - preempt_enable(); + if (!cpu_feature_enabled(X86_FEATURE_PCID)) + return; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + __set_bit(kern_pcid(asid), + (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); } -static inline void __native_flush_tlb_global_irq_disabled(void) +/* + * flush the entire current user mapping + */ +static inline void __native_flush_tlb(void) { - unsigned long cr4; + /* + * Preemption or interrupts must be disabled to protect the access + * to the per CPU variable and to prevent being preempted between + * read_cr3() and write_cr3(). + */ + WARN_ON_ONCE(preemptible()); - cr4 = this_cpu_read(cpu_tlbstate.cr4); - /* clear PGE */ - native_write_cr4(cr4 & ~X86_CR4_PGE); - /* write old PGE again and flush TLBs */ - native_write_cr4(cr4); + invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); + + /* If current->mm == NULL then the read_cr3() "borrows" an mm */ + native_write_cr3(__native_read_cr3()); } +/* + * flush everything + */ static inline void __native_flush_tlb_global(void) { - unsigned long flags; + unsigned long cr4, flags; if (static_cpu_has(X86_FEATURE_INVPCID)) { /* * Using INVPCID is considerably faster than a pair of writes * to CR4 sandwiched inside an IRQ flag save/restore. + * + * Note, this works with CR4.PCIDE=0 or 1. */ invpcid_flush_all(); return; @@ -280,36 +386,69 @@ static inline void __native_flush_tlb_global(void) */ raw_local_irq_save(flags); - __native_flush_tlb_global_irq_disabled(); + cr4 = this_cpu_read(cpu_tlbstate.cr4); + /* toggle PGE */ + native_write_cr4(cr4 ^ X86_CR4_PGE); + /* write old PGE again and flush TLBs */ + native_write_cr4(cr4); raw_local_irq_restore(flags); } +/* + * flush one page in the user mapping + */ static inline void __native_flush_tlb_single(unsigned long addr) { + u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. + * Just use invalidate_user_asid() in case we are called early. + */ + if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) + invalidate_user_asid(loaded_mm_asid); + else + invpcid_flush_one(user_pcid(loaded_mm_asid), addr); } +/* + * flush everything + */ static inline void __flush_tlb_all(void) { - if (boot_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) { __flush_tlb_global(); - else + } else { + /* + * !PGE -> !PCID (setup_pcid()), thus every flush is total. + */ __flush_tlb(); - - /* - * Note: if we somehow had PCID but not PGE, then this wouldn't work -- - * we'd end up flushing kernel translations for the current ASID but - * we might fail to flush kernel translations for other cached ASIDs. - * - * To avoid this issue, we force PCID off if PGE is off. - */ + } } +/* + * flush one page in the kernel mapping + */ static inline void __flush_tlb_one(unsigned long addr) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); __flush_tlb_single(addr); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + /* + * __flush_tlb_single() will have cleared the TLB entry for this ASID, + * but since kernel space is replicated across all, we must also + * invalidate all others. + */ + invalidate_other_asid(); } #define TLB_FLUSH_ALL -1UL @@ -370,6 +509,17 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) void native_flush_tlb_others(const struct cpumask *cpumask, const struct flush_tlb_info *info); +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) +{ + /* + * Bump the generation count. This also serves as a full barrier + * that synchronizes with switch_mm(): callers are required to order + * their read of mm_cpumask after their writes to the paging + * structures. + */ + return atomic64_inc_return(&mm->context.tlb_gen); +} + static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm) { diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 84b9ec0c1bc0..22647a642e98 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -283,34 +283,34 @@ TRACE_EVENT(vector_alloc_managed, DECLARE_EVENT_CLASS(vector_activate, TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve, - bool early), + bool reserve), - TP_ARGS(irq, is_managed, can_reserve, early), + TP_ARGS(irq, is_managed, can_reserve, reserve), TP_STRUCT__entry( __field( unsigned int, irq ) __field( bool, is_managed ) __field( bool, can_reserve ) - __field( bool, early ) + __field( bool, reserve ) ), TP_fast_assign( __entry->irq = irq; __entry->is_managed = is_managed; __entry->can_reserve = can_reserve; - __entry->early = early; + __entry->reserve = reserve; ), - TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d", + TP_printk("irq=%u is_managed=%d can_reserve=%d reserve=%d", __entry->irq, __entry->is_managed, __entry->can_reserve, - __entry->early) + __entry->reserve) ); #define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name) \ DEFINE_EVENT_FN(vector_activate, name, \ TP_PROTO(unsigned int irq, bool is_managed, \ - bool can_reserve, bool early), \ - TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL); \ + bool can_reserve, bool reserve), \ + TP_ARGS(irq, is_managed, can_reserve, reserve), NULL, NULL); \ DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate); DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate); diff --git a/arch/x86/include/asm/unwind.h b/arch/x86/include/asm/unwind.h index c1688c2d0a12..1f86e1b0a5cd 100644 --- a/arch/x86/include/asm/unwind.h +++ b/arch/x86/include/asm/unwind.h @@ -56,18 +56,27 @@ void unwind_start(struct unwind_state *state, struct task_struct *task, #if defined(CONFIG_UNWINDER_ORC) || defined(CONFIG_UNWINDER_FRAME_POINTER) /* - * WARNING: The entire pt_regs may not be safe to dereference. In some cases, - * only the iret frame registers are accessible. Use with caution! + * If 'partial' returns true, only the iret frame registers are valid. */ -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { if (unwind_done(state)) return NULL; + if (partial) { +#ifdef CONFIG_UNWINDER_ORC + *partial = !state->full_regs; +#else + *partial = false; +#endif + } + return state->regs; } #else -static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state) +static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state, + bool *partial) { return NULL; } diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d9a7c659009c..b986b2ca688a 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -7,6 +7,7 @@ #ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); +extern void set_vsyscall_pgtable_user_bits(pgd_t *root); /* * Called on instruction fetch fault in vsyscall page. diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h index 7e1e730396ae..bcba3c643e63 100644 --- a/arch/x86/include/uapi/asm/processor-flags.h +++ b/arch/x86/include/uapi/asm/processor-flags.h @@ -78,7 +78,12 @@ #define X86_CR3_PWT _BITUL(X86_CR3_PWT_BIT) #define X86_CR3_PCD_BIT 4 /* Page Cache Disable */ #define X86_CR3_PCD _BITUL(X86_CR3_PCD_BIT) -#define X86_CR3_PCID_MASK _AC(0x00000fff,UL) /* PCID Mask */ + +#define X86_CR3_PCID_BITS 12 +#define X86_CR3_PCID_MASK (_AC((1UL << X86_CR3_PCID_BITS) - 1, UL)) + +#define X86_CR3_PCID_NOFLUSH_BIT 63 /* Preserve old PCID */ +#define X86_CR3_PCID_NOFLUSH _BITULL(X86_CR3_PCID_NOFLUSH_BIT) /* * Intel CPU features in CR4 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 6e272f3ea984..880441f24146 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2626,11 +2626,13 @@ static int __init apic_set_verbosity(char *arg) apic_verbosity = APIC_DEBUG; else if (strcmp("verbose", arg) == 0) apic_verbosity = APIC_VERBOSE; +#ifdef CONFIG_X86_64 else { pr_warning("APIC Verbosity level %s not recognised" " use apic=verbose or apic=debug\n", arg); return -EINVAL; } +#endif return 0; } diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index aa85690e9b64..25a87028cb3f 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -151,7 +151,7 @@ static struct apic apic_flat __ro_after_init = { .apic_id_valid = default_apic_id_valid, .apic_id_registered = flat_apic_id_registered, - .irq_delivery_mode = dest_LowestPrio, + .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 1, /* logical */ .disable_esr = 0, diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c index 7b659c4480c9..5078b5ce63a7 100644 --- a/arch/x86/kernel/apic/apic_noop.c +++ b/arch/x86/kernel/apic/apic_noop.c @@ -110,7 +110,7 @@ struct apic apic_noop __ro_after_init = { .apic_id_valid = default_apic_id_valid, .apic_id_registered = noop_apic_id_registered, - .irq_delivery_mode = dest_LowestPrio, + .irq_delivery_mode = dest_Fixed, /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 201579dc5242..8a7963421460 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2988,7 +2988,7 @@ void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq, } int mp_irqdomain_activate(struct irq_domain *domain, - struct irq_data *irq_data, bool early) + struct irq_data *irq_data, bool reserve) { unsigned long flags; diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c index 9b18be764422..ce503c99f5c4 100644 --- a/arch/x86/kernel/apic/msi.c +++ b/arch/x86/kernel/apic/msi.c @@ -39,17 +39,13 @@ static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) ((apic->irq_dest_mode == 0) ? MSI_ADDR_DEST_MODE_PHYSICAL : MSI_ADDR_DEST_MODE_LOGICAL) | - ((apic->irq_delivery_mode != dest_LowestPrio) ? - MSI_ADDR_REDIRECTION_CPU : - MSI_ADDR_REDIRECTION_LOWPRI) | + MSI_ADDR_REDIRECTION_CPU | MSI_ADDR_DEST_ID(cfg->dest_apicid); msg->data = MSI_DATA_TRIGGER_EDGE | MSI_DATA_LEVEL_ASSERT | - ((apic->irq_delivery_mode != dest_LowestPrio) ? - MSI_DATA_DELIVERY_FIXED : - MSI_DATA_DELIVERY_LOWPRI) | + MSI_DATA_DELIVERY_FIXED | MSI_DATA_VECTOR(cfg->vector); } diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index fa22017de806..02e8acb134f8 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c @@ -105,7 +105,7 @@ static struct apic apic_default __ro_after_init = { .apic_id_valid = default_apic_id_valid, .apic_id_registered = default_apic_id_registered, - .irq_delivery_mode = dest_LowestPrio, + .irq_delivery_mode = dest_Fixed, /* logical delivery broadcast to all CPUs: */ .irq_dest_mode = 1, diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 750449152b04..f8b03bb8e725 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -184,6 +184,7 @@ static void reserve_irq_vector_locked(struct irq_data *irqd) irq_matrix_reserve(vector_matrix); apicd->can_reserve = true; apicd->has_reserved = true; + irqd_set_can_reserve(irqd); trace_vector_reserve(irqd->irq, 0); vector_assign_managed_shutdown(irqd); } @@ -368,8 +369,18 @@ static int activate_reserved(struct irq_data *irqd) int ret; ret = assign_irq_vector_any_locked(irqd); - if (!ret) + if (!ret) { apicd->has_reserved = false; + /* + * Core might have disabled reservation mode after + * allocating the irq descriptor. Ideally this should + * happen before allocation time, but that would require + * completely convoluted ways of transporting that + * information. + */ + if (!irqd_can_reserve(irqd)) + apicd->can_reserve = false; + } return ret; } @@ -398,21 +409,21 @@ static int activate_managed(struct irq_data *irqd) } static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, - bool early) + bool reserve) { struct apic_chip_data *apicd = apic_chip_data(irqd); unsigned long flags; int ret = 0; trace_vector_activate(irqd->irq, apicd->is_managed, - apicd->can_reserve, early); + apicd->can_reserve, reserve); /* Nothing to do for fixed assigned vectors */ if (!apicd->can_reserve && !apicd->is_managed) return 0; raw_spin_lock_irqsave(&vector_lock, flags); - if (early || irqd_is_managed_and_shutdown(irqd)) + if (reserve || irqd_is_managed_and_shutdown(irqd)) vector_assign_managed_shutdown(irqd); else if (apicd->is_managed) ret = activate_managed(irqd); @@ -478,6 +489,7 @@ static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd, } else { /* Release the vector */ apicd->can_reserve = true; + irqd_set_can_reserve(irqd); clear_irq_vector(irqd); realloc = true; } diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index 622f13ca8a94..8b04234e010b 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -184,7 +184,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = { .apic_id_valid = x2apic_apic_id_valid, .apic_id_registered = x2apic_apic_id_registered, - .irq_delivery_mode = dest_LowestPrio, + .irq_delivery_mode = dest_Fixed, .irq_dest_mode = 1, /* logical */ .disable_esr = 0, diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index cd360a5e0dca..76417a9aab73 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -17,6 +17,7 @@ #include <asm/sigframe.h> #include <asm/bootparam.h> #include <asm/suspend.h> +#include <asm/tlbflush.h> #ifdef CONFIG_XEN #include <xen/interface/xen.h> @@ -94,9 +95,12 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + /* TLB state for the entry code */ + OFFSET(TLB_STATE_user_pcid_flush_mask, tlb_state, user_pcid_flush_mask); + /* Layout info for cpu_entry_area */ OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); - OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page); - DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); + OFFSET(CPU_ENTRY_AREA_entry_stack, cpu_entry_area, entry_stack_page); + DEFINE(SIZEOF_entry_stack, sizeof(struct entry_stack)); } diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 7d20d9c0b3d6..fa1261eefa16 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -48,7 +48,7 @@ void foo(void) /* Offset from the sysenter stack to tss.sp0 */ DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - - offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack)); + offsetofend(struct cpu_entry_area, entry_stack_page.stack)); #ifdef CONFIG_CC_STACKPROTECTOR BLANK(); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7416da3ec4df..39d7ea865207 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -506,102 +506,8 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, [DEBUG_STACK - 1] = DEBUG_STKSZ }; - -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); -#endif - -static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page, - SYSENTER_stack_storage); - -static void __init -set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) -{ - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); -} - -/* Setup the fixmap mappings only once per-processor */ -static void __init setup_cpu_entry_area(int cpu) -{ -#ifdef CONFIG_X86_64 - extern char _entry_trampoline[]; - - /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ - pgprot_t gdt_prot = PAGE_KERNEL_RO; - pgprot_t tss_prot = PAGE_KERNEL_RO; -#else - /* - * On native 32-bit systems, the GDT cannot be read-only because - * our double fault handler uses a task gate, and entering through - * a task gate needs to change an available TSS to busy. If the - * GDT is read-only, that will triple fault. The TSS cannot be - * read-only because the CPU writes to it on task switches. - * - * On Xen PV, the GDT must be read-only because the hypervisor - * requires it. - */ - pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? - PAGE_KERNEL_RO : PAGE_KERNEL; - pgprot_t tss_prot = PAGE_KERNEL; #endif - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page), - per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1, - PAGE_KERNEL); - - /* - * The Intel SDM says (Volume 3, 7.2.1): - * - * Avoid placing a page boundary in the part of the TSS that the - * processor reads during a task switch (the first 104 bytes). The - * processor may not correctly perform address translations if a - * boundary occurs in this area. During a task switch, the processor - * reads and writes into the first 104 bytes of each TSS (using - * contiguous physical addresses beginning with the physical address - * of the first byte of the TSS). So, after TSS access begins, if - * part of the 104 bytes is not physically contiguous, the processor - * will access incorrect information without generating a page-fault - * exception. - * - * There are also a lot of errata involving the TSS spanning a page - * boundary. Assert that we're not doing that. - */ - BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ - offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); - BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), - &per_cpu(cpu_tss_rw, cpu), - sizeof(struct tss_struct) / PAGE_SIZE, - tss_prot); - -#ifdef CONFIG_X86_32 - per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); -#endif - -#ifdef CONFIG_X86_64 - BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); - BUILD_BUG_ON(sizeof(exception_stacks) != - sizeof(((struct cpu_entry_area *)0)->exception_stacks)); - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), - &per_cpu(exception_stacks, cpu), - sizeof(exception_stacks) / PAGE_SIZE, - PAGE_KERNEL); - - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), - __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); -#endif -} - -void __init setup_cpu_entry_areas(void) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - setup_cpu_entry_area(cpu); -} - /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) { @@ -1016,6 +922,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) } setup_force_cpu_cap(X86_FEATURE_ALWAYS); + + if (c->x86_vendor != X86_VENDOR_AMD) + setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); + fpu__init_system(c); #ifdef CONFIG_X86_32 @@ -1348,7 +1258,7 @@ void enable_sep_cpu(void) tss->x86_tss.ss1 = __KERNEL_CS; wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0); - wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1), 0); + wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0); wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0); put_cpu(); @@ -1454,7 +1364,10 @@ void syscall_init(void) (entry_SYSCALL_64_trampoline - _entry_trampoline); wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS); - wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); + if (static_cpu_has(X86_FEATURE_PTI)) + wrmsrl(MSR_LSTAR, SYSCALL64_entry_trampoline); + else + wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64); #ifdef CONFIG_IA32_EMULATION wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat); @@ -1465,7 +1378,7 @@ void syscall_init(void) * AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit). */ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS); - wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1)); wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); #else wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret); @@ -1680,7 +1593,7 @@ void cpu_init(void) */ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); load_TR_desc(); - load_sp0((unsigned long)(cpu_SYSENTER_stack(cpu) + 1)); + load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1)); load_mm_ldt(&init_mm); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 7dbcb7adf797..8ccdca6d3f9e 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -565,15 +565,6 @@ static void print_ucode(struct ucode_cpu_info *uci) } #else -/* - * Flush global tlb. We only do this in x86_64 where paging has been enabled - * already and PGE should be enabled as well. - */ -static inline void flush_tlb_early(void) -{ - __native_flush_tlb_global_irq_disabled(); -} - static inline void print_ucode(struct ucode_cpu_info *uci) { struct microcode_intel *mc; @@ -602,10 +593,6 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) if (rev != mc->hdr.rev) return -1; -#ifdef CONFIG_X86_64 - /* Flush global tlb. This is precaution. */ - flush_tlb_early(); -#endif uci->cpu_sig.rev = rev; if (early) diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index bbd6d986e2d0..afbecff161d1 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -18,6 +18,7 @@ #include <linux/nmi.h> #include <linux/sysfs.h> +#include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/unwind.h> @@ -43,9 +44,9 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task, return true; } -bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) +bool in_entry_stack(unsigned long *stack, struct stack_info *info) { - struct SYSENTER_stack *ss = cpu_SYSENTER_stack(smp_processor_id()); + struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); void *begin = ss; void *end = ss + 1; @@ -53,7 +54,7 @@ bool in_sysenter_stack(unsigned long *stack, struct stack_info *info) if ((void *)stack < begin || (void *)stack >= end) return false; - info->type = STACK_TYPE_SYSENTER; + info->type = STACK_TYPE_ENTRY; info->begin = begin; info->end = end; info->next_sp = NULL; @@ -75,12 +76,23 @@ void show_iret_regs(struct pt_regs *regs) regs->sp, regs->flags); } -static void show_regs_safe(struct stack_info *info, struct pt_regs *regs) +static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, + bool partial) { - if (on_stack(info, regs, sizeof(*regs))) + /* + * These on_stack() checks aren't strictly necessary: the unwind code + * has already validated the 'regs' pointer. The checks are done for + * ordering reasons: if the registers are on the next stack, we don't + * want to print them out yet. Otherwise they'll be shown as part of + * the wrong stack. Later, when show_trace_log_lvl() switches to the + * next stack, this function will be called again with the same regs so + * they can be printed in the right context. + */ + if (!partial && on_stack(info, regs, sizeof(*regs))) { __show_regs(regs, 0); - else if (on_stack(info, (void *)regs + IRET_FRAME_OFFSET, - IRET_FRAME_SIZE)) { + + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, + IRET_FRAME_SIZE)) { /* * When an interrupt or exception occurs in entry code, the * full pt_regs might not have been saved yet. In that case @@ -97,11 +109,13 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, struct stack_info stack_info = {0}; unsigned long visit_mask = 0; int graph_idx = 0; + bool partial; printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); stack = stack ? : get_stack_pointer(task, regs); + regs = unwind_get_entry_regs(&state, &partial); /* * Iterate through the stacks, starting with the current stack pointer. @@ -111,15 +125,15 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - task stack * - interrupt stack * - HW exception stacks (double fault, nmi, debug, mce) - * - SYSENTER stack + * - entry stack * * x86-32 can have up to four stacks: * - task stack * - softirq stack * - hardirq stack - * - SYSENTER stack + * - entry stack */ - for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { + for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { const char *stack_name; if (get_stack_info(stack, task, &stack_info, &visit_mask)) { @@ -139,7 +153,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%s <%s>\n", log_lvl, stack_name); if (regs) - show_regs_safe(&stack_info, regs); + show_regs_if_on_stack(&stack_info, regs, partial); /* * Scan the stack, printing any text addresses we find. At the @@ -163,7 +177,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, /* * Don't print regs->ip again if it was already printed - * by show_regs_safe() below. + * by show_regs_if_on_stack(). */ if (regs && stack == ®s->ip) goto next; @@ -198,9 +212,9 @@ next: unwind_next_frame(&state); /* if the frame has entry regs, print them */ - regs = unwind_get_entry_regs(&state); + regs = unwind_get_entry_regs(&state, &partial); if (regs) - show_regs_safe(&stack_info, regs); + show_regs_if_on_stack(&stack_info, regs, partial); } if (stack_name) @@ -296,11 +310,13 @@ int __die(const char *str, struct pt_regs *regs, long err) unsigned long sp; #endif printk(KERN_DEFAULT - "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter, + "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", IS_ENABLED(CONFIG_SMP) ? " SMP" : "", debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", - IS_ENABLED(CONFIG_KASAN) ? " KASAN" : ""); + IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", + IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? + (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); if (notify_die(DIE_OOPS, str, regs, err, current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 5ff13a6b3680..04170f63e3a1 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -26,8 +26,8 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_SOFTIRQ) return "SOFTIRQ"; - if (type == STACK_TYPE_SYSENTER) - return "SYSENTER"; + if (type == STACK_TYPE_ENTRY) + return "ENTRY_TRAMPOLINE"; return NULL; } @@ -96,7 +96,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (task != current) goto unknown; - if (in_sysenter_stack(stack, info)) + if (in_entry_stack(stack, info)) goto recursion_check; if (in_hardirq_stack(stack, info)) diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index abc828f8c297..563e28d14f2c 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -37,8 +37,14 @@ const char *stack_type_name(enum stack_type type) if (type == STACK_TYPE_IRQ) return "IRQ"; - if (type == STACK_TYPE_SYSENTER) - return "SYSENTER"; + if (type == STACK_TYPE_ENTRY) { + /* + * On 64-bit, we have a generic entry stack that we + * use for all the kernel entry points, including + * SYSENTER. + */ + return "ENTRY_TRAMPOLINE"; + } if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST) return exception_stack_names[type - STACK_TYPE_EXCEPTION]; @@ -118,7 +124,7 @@ int get_stack_info(unsigned long *stack, struct task_struct *task, if (in_irq_stack(stack, info)) goto recursion_check; - if (in_sysenter_stack(stack, info)) + if (in_entry_stack(stack, info)) goto recursion_check; goto unknown; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 7dca675fe78d..04a625f0fcda 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -341,6 +341,27 @@ GLOBAL(early_recursion_flag) .balign PAGE_SIZE; \ GLOBAL(name) +#ifdef CONFIG_PAGE_TABLE_ISOLATION +/* + * Each PGD needs to be 8k long and 8k aligned. We do not + * ever go out to userspace with these, so we do not + * strictly *need* the second page, but this allows us to + * have a single set_pgd() implementation that does not + * need to worry about whether it has 4k or 8k to work + * with. + * + * This ensures PGDs are 8k long: + */ +#define PTI_USER_PGD_FILL 512 +/* This ensures they are 8k-aligned: */ +#define NEXT_PGD_PAGE(name) \ + .balign 2 * PAGE_SIZE; \ +GLOBAL(name) +#else +#define NEXT_PGD_PAGE(name) NEXT_PAGE(name) +#define PTI_USER_PGD_FILL 0 +#endif + /* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ @@ -350,13 +371,14 @@ GLOBAL(name) .endr __INITDATA -NEXT_PAGE(early_top_pgt) +NEXT_PGD_PAGE(early_top_pgt) .fill 511,8,0 #ifdef CONFIG_X86_5LEVEL .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #else .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC #endif + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0 @@ -364,13 +386,14 @@ NEXT_PAGE(early_dynamic_pgts) .data #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) -NEXT_PAGE(init_top_pgt) +NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC .org init_top_pgt + PGD_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC + .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(level3_ident_pgt) .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC @@ -381,8 +404,9 @@ NEXT_PAGE(level2_ident_pgt) */ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) #else -NEXT_PAGE(init_top_pgt) +NEXT_PGD_PAGE(init_top_pgt) .fill 512,8,0 + .fill PTI_USER_PGD_FILL,8,0 #endif #ifdef CONFIG_X86_5LEVEL diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 1c1eae961340..26d713ecad34 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c @@ -5,6 +5,11 @@ * Copyright (C) 2002 Andi Kleen * * This handles calls from both 32bit and 64bit mode. + * + * Lock order: + * contex.ldt_usr_sem + * mmap_sem + * context.lock */ #include <linux/errno.h> @@ -19,6 +24,7 @@ #include <linux/uaccess.h> #include <asm/ldt.h> +#include <asm/tlb.h> #include <asm/desc.h> #include <asm/mmu_context.h> #include <asm/syscalls.h> @@ -42,17 +48,15 @@ static void refresh_ldt_segments(void) #endif } -/* context.lock is held for us, so we don't need any locking. */ +/* context.lock is held by the task which issued the smp function call */ static void flush_ldt(void *__mm) { struct mm_struct *mm = __mm; - mm_context_t *pc; if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) return; - pc = &mm->context; - set_ldt(pc->ldt->entries, pc->ldt->nr_entries); + load_mm_ldt(mm); refresh_ldt_segments(); } @@ -89,25 +93,143 @@ static struct ldt_struct *alloc_ldt_struct(unsigned int num_entries) return NULL; } + /* The new LDT isn't aliased for PTI yet. */ + new_ldt->slot = -1; + new_ldt->nr_entries = num_entries; return new_ldt; } +/* + * If PTI is enabled, this maps the LDT into the kernelmode and + * usermode tables for the given mm. + * + * There is no corresponding unmap function. Even if the LDT is freed, we + * leave the PTEs around until the slot is reused or the mm is destroyed. + * This is harmless: the LDT is always in ordinary memory, and no one will + * access the freed slot. + * + * If we wanted to unmap freed LDTs, we'd also need to do a flush to make + * it useful, and the flush would slow down modify_ldt(). + */ +static int +map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + bool is_vmalloc, had_top_level_entry; + unsigned long va; + spinlock_t *ptl; + pgd_t *pgd; + int i; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return 0; + + /* + * Any given ldt_struct should have map_ldt_struct() called at most + * once. + */ + WARN_ON(ldt->slot != -1); + + /* + * Did we already have the top level entry allocated? We can't + * use pgd_none() for this because it doens't do anything on + * 4-level page table kernels. + */ + pgd = pgd_offset(mm, LDT_BASE_ADDR); + had_top_level_entry = (pgd->pgd != 0); + + is_vmalloc = is_vmalloc_addr(ldt->entries); + + for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { + unsigned long offset = i << PAGE_SHIFT; + const void *src = (char *)ldt->entries + offset; + unsigned long pfn; + pte_t pte, *ptep; + + va = (unsigned long)ldt_slot_va(slot) + offset; + pfn = is_vmalloc ? vmalloc_to_pfn(src) : + page_to_pfn(virt_to_page(src)); + /* + * Treat the PTI LDT range as a *userspace* range. + * get_locked_pte() will allocate all needed pagetables + * and account for them in this mm. + */ + ptep = get_locked_pte(mm, va, &ptl); + if (!ptep) + return -ENOMEM; + /* + * Map it RO so the easy to find address is not a primary + * target via some kernel interface which misses a + * permission check. + */ + pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)); + set_pte_at(mm, va, ptep, pte); + pte_unmap_unlock(ptep, ptl); + } + + if (mm->context.ldt) { + /* + * We already had an LDT. The top-level entry should already + * have been allocated and synchronized with the usermode + * tables. + */ + WARN_ON(!had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) + WARN_ON(!kernel_to_user_pgdp(pgd)->pgd); + } else { + /* + * This is the first time we're mapping an LDT for this process. + * Sync the pgd to the usermode tables. + */ + WARN_ON(had_top_level_entry); + if (static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON(kernel_to_user_pgdp(pgd)->pgd); + set_pgd(kernel_to_user_pgdp(pgd), *pgd); + } + } + + va = (unsigned long)ldt_slot_va(slot); + flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); + + ldt->slot = slot; +#endif + return 0; +} + +static void free_ldt_pgtables(struct mm_struct *mm) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + struct mmu_gather tlb; + unsigned long start = LDT_BASE_ADDR; + unsigned long end = start + (1UL << PGDIR_SHIFT); + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + tlb_gather_mmu(&tlb, mm, start, end); + free_pgd_range(&tlb, start, end, start, end); + tlb_finish_mmu(&tlb, start, end); +#endif +} + /* After calling this, the LDT is immutable. */ static void finalize_ldt_struct(struct ldt_struct *ldt) { paravirt_alloc_ldt(ldt->entries, ldt->nr_entries); } -/* context.lock is held */ -static void install_ldt(struct mm_struct *current_mm, - struct ldt_struct *ldt) +static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt) { + mutex_lock(&mm->context.lock); + /* Synchronizes with READ_ONCE in load_mm_ldt. */ - smp_store_release(¤t_mm->context.ldt, ldt); + smp_store_release(&mm->context.ldt, ldt); - /* Activate the LDT for all CPUs using current_mm. */ - on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true); + /* Activate the LDT for all CPUs using currents mm. */ + on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true); + + mutex_unlock(&mm->context.lock); } static void free_ldt_struct(struct ldt_struct *ldt) @@ -124,27 +246,20 @@ static void free_ldt_struct(struct ldt_struct *ldt) } /* - * we do not have to muck with descriptors here, that is - * done in switch_mm() as needed. + * Called on fork from arch_dup_mmap(). Just copy the current LDT state, + * the new task is not running, so nothing can be installed. */ -int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) +int ldt_dup_context(struct mm_struct *old_mm, struct mm_struct *mm) { struct ldt_struct *new_ldt; - struct mm_struct *old_mm; int retval = 0; - mutex_init(&mm->context.lock); - old_mm = current->mm; - if (!old_mm) { - mm->context.ldt = NULL; + if (!old_mm) return 0; - } mutex_lock(&old_mm->context.lock); - if (!old_mm->context.ldt) { - mm->context.ldt = NULL; + if (!old_mm->context.ldt) goto out_unlock; - } new_ldt = alloc_ldt_struct(old_mm->context.ldt->nr_entries); if (!new_ldt) { @@ -156,6 +271,12 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm) new_ldt->nr_entries * LDT_ENTRY_SIZE); finalize_ldt_struct(new_ldt); + retval = map_ldt_struct(mm, new_ldt, 0); + if (retval) { + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } mm->context.ldt = new_ldt; out_unlock: @@ -174,13 +295,18 @@ void destroy_context_ldt(struct mm_struct *mm) mm->context.ldt = NULL; } +void ldt_arch_exit_mmap(struct mm_struct *mm) +{ + free_ldt_pgtables(mm); +} + static int read_ldt(void __user *ptr, unsigned long bytecount) { struct mm_struct *mm = current->mm; unsigned long entries_size; int retval; - mutex_lock(&mm->context.lock); + down_read(&mm->context.ldt_usr_sem); if (!mm->context.ldt) { retval = 0; @@ -209,7 +335,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount) retval = bytecount; out_unlock: - mutex_unlock(&mm->context.lock); + up_read(&mm->context.ldt_usr_sem); return retval; } @@ -269,7 +395,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) ldt.avl = 0; } - mutex_lock(&mm->context.lock); + if (down_write_killable(&mm->context.ldt_usr_sem)) + return -EINTR; old_ldt = mm->context.ldt; old_nr_entries = old_ldt ? old_ldt->nr_entries : 0; @@ -286,12 +413,31 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) new_ldt->entries[ldt_info.entry_number] = ldt; finalize_ldt_struct(new_ldt); + /* + * If we are using PTI, map the new LDT into the userspace pagetables. + * If there is already an LDT, use the other slot so that other CPUs + * will continue to use the old LDT until install_ldt() switches + * them over to the new LDT. + */ + error = map_ldt_struct(mm, new_ldt, old_ldt ? !old_ldt->slot : 0); + if (error) { + /* + * This only can fail for the first LDT setup. If an LDT is + * already installed then the PTE page is already + * populated. Mop up a half populated page table. + */ + if (!WARN_ON_ONCE(old_ldt)) + free_ldt_pgtables(mm); + free_ldt_struct(new_ldt); + goto out_unlock; + } + install_ldt(mm, new_ldt); free_ldt_struct(old_ldt); error = 0; out_unlock: - mutex_unlock(&mm->context.lock); + up_write(&mm->context.ldt_usr_sem); out: return error; } diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 00bc751c861c..edfede768688 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -48,8 +48,6 @@ static void load_segments(void) "\tmovl $"STR(__KERNEL_DS)",%%eax\n" "\tmovl %%eax,%%ds\n" "\tmovl %%eax,%%es\n" - "\tmovl %%eax,%%fs\n" - "\tmovl %%eax,%%gs\n" "\tmovl %%eax,%%ss\n" : : : "eax", "memory"); #undef STR @@ -232,8 +230,8 @@ void machine_kexec(struct kimage *image) * The gdt & idt are now invalid. * If you want to load them you must set up your own idt & gdt. */ - set_gdt(phys_to_virt(0), 0); idt_invalidate(phys_to_virt(0)); + set_gdt(phys_to_virt(0), 0); /* now call it */ image->start = relocate_kernel_ptr((unsigned long)image->head, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index aed9d94bd46f..832a6acd730f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -47,7 +47,7 @@ * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = { +__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = { .x86_tss = { /* * .sp0 is only used when entering ring 0 from a lower diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 8af2e8d0c0a1..145810b0edf6 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -906,9 +906,6 @@ void __init setup_arch(char **cmdline_p) set_bit(EFI_BOOT, &efi.flags); set_bit(EFI_64BIT, &efi.flags); } - - if (efi_enabled(EFI_BOOT)) - efi_memblock_x86_reserve_range(); #endif x86_init.oem.arch_setup(); @@ -962,6 +959,8 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + if (efi_enabled(EFI_BOOT)) + efi_memblock_x86_reserve_range(); #ifdef CONFIG_MEMORY_HOTPLUG /* * Memory used by the kernel cannot be hot-removed because Linux diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 35cb20994e32..ed556d50d7ed 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -126,14 +126,10 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) spin_lock_irqsave(&rtc_lock, flags); CMOS_WRITE(0xa, 0xf); spin_unlock_irqrestore(&rtc_lock, flags); - local_flush_tlb(); - pr_debug("1.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4; - pr_debug("2.\n"); *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf; - pr_debug("3.\n"); } static inline void smpboot_restore_warm_reset_vector(void) @@ -141,11 +137,6 @@ static inline void smpboot_restore_warm_reset_vector(void) unsigned long flags; /* - * Install writable page 0 entry to set BIOS data area. - */ - local_flush_tlb(); - - /* * Paranoid: Set warm reset code and vector here back * to default values. */ @@ -932,12 +923,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle, initial_code = (unsigned long)start_secondary; initial_stack = idle->thread.sp; - /* - * Enable the espfix hack for this CPU - */ -#ifdef CONFIG_X86_ESPFIX64 + /* Enable the espfix hack for this CPU */ init_espfix_ap(cpu); -#endif /* So we see what's up */ announce_cpu(cpu, apicid); diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index 77835bc021c7..093f2ea5dd56 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c @@ -102,7 +102,7 @@ __save_stack_trace_reliable(struct stack_trace *trace, for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state); unwind_next_frame(&state)) { - regs = unwind_get_entry_regs(&state); + regs = unwind_get_entry_regs(&state, NULL); if (regs) { /* * Kernel mode registers on the stack indicate an @@ -164,8 +164,12 @@ int save_stack_trace_tsk_reliable(struct task_struct *tsk, { int ret; + /* + * If the task doesn't have a stack (e.g., a zombie), the stack is + * "reliably" empty. + */ if (!try_get_task_stack(tsk)) - return -EINVAL; + return 0; ret = __save_stack_trace_reliable(trace, tsk); diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c index 9a9c9b076955..a5b802a12212 100644 --- a/arch/x86/kernel/tls.c +++ b/arch/x86/kernel/tls.c @@ -93,17 +93,10 @@ static void set_tls_desc(struct task_struct *p, int idx, cpu = get_cpu(); while (n-- > 0) { - if (LDT_empty(info) || LDT_zero(info)) { + if (LDT_empty(info) || LDT_zero(info)) memset(desc, 0, sizeof(*desc)); - } else { + else fill_ldt(desc, info); - - /* - * Always set the accessed bit so that the CPU - * doesn't try to write to the (read-only) GDT. - */ - desc->type |= 1; - } ++info; ++desc; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index e98f8b66a460..446c9ef8cfc3 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -51,6 +51,7 @@ #include <asm/traps.h> #include <asm/desc.h> #include <asm/fpu/internal.h> +#include <asm/cpu_entry_area.h> #include <asm/mce.h> #include <asm/fixmap.h> #include <asm/mach_traps.h> @@ -360,7 +361,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) * * No need for ist_enter here because we don't use RCU. */ - if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && + if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY && regs->cs == __KERNEL_CS && regs->ip == (unsigned long)native_irq_return_iret) { @@ -951,8 +952,9 @@ void __init trap_init(void) * "sidt" instruction will not leak the location of the kernel, and * to defend the IDT against arbitrary memory write vulnerabilities. * It will be reloaded in cpu_init() */ - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); - idt_descr.address = fix_to_virt(FIX_RO_IDT); + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table), + PAGE_KERNEL_RO); + idt_descr.address = CPU_ENTRY_AREA_RO_IDT; /* * Should be a barrier for any external CPU state: diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d2a8b5a24a44..1e413a9326aa 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -61,11 +61,17 @@ jiffies_64 = jiffies; . = ALIGN(HPAGE_SIZE); \ __end_rodata_hpage_align = .; +#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); +#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); + #else #define X64_ALIGN_RODATA_BEGIN #define X64_ALIGN_RODATA_END +#define ALIGN_ENTRY_TEXT_BEGIN +#define ALIGN_ENTRY_TEXT_END + #endif PHDRS { @@ -102,8 +108,10 @@ SECTIONS CPUIDLE_TEXT LOCK_TEXT KPROBES_TEXT + ALIGN_ENTRY_TEXT_BEGIN ENTRY_TEXT IRQENTRY_TEXT + ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT *(.fixup) *(.gnu.warning) diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index eb714f1cdf7e..bb31c801f1fc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4986,6 +4986,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r14, %c[r14](%[svm]) \n\t" "mov %%r15, %c[r15](%[svm]) \n\t" #endif + /* + * Clear host registers marked as clobbered to prevent + * speculative use. + */ + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t" + "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t" + "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t" + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t" + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t" +#ifdef CONFIG_X86_64 + "xor %%r8, %%r8 \n\t" + "xor %%r9, %%r9 \n\t" + "xor %%r10, %%r10 \n\t" + "xor %%r11, %%r11 \n\t" + "xor %%r12, %%r12 \n\t" + "xor %%r13, %%r13 \n\t" + "xor %%r14, %%r14 \n\t" + "xor %%r15, %%r15 \n\t" +#endif "pop %%" _ASM_BP : : [svm]"a"(svm), diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 023afa0c8887..5c14d65f676a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9415,6 +9415,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" + "setbe %c[fail](%0)\n\t" "mov %%" _ASM_AX ", %c[rax](%0) \n\t" "mov %%" _ASM_BX ", %c[rbx](%0) \n\t" __ASM_SIZE(pop) " %c[rcx](%0) \n\t" @@ -9431,12 +9432,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "mov %%r13, %c[r13](%0) \n\t" "mov %%r14, %c[r14](%0) \n\t" "mov %%r15, %c[r15](%0) \n\t" + "xor %%r8d, %%r8d \n\t" + "xor %%r9d, %%r9d \n\t" + "xor %%r10d, %%r10d \n\t" + "xor %%r11d, %%r11d \n\t" + "xor %%r12d, %%r12d \n\t" + "xor %%r13d, %%r13d \n\t" + "xor %%r14d, %%r14d \n\t" + "xor %%r15d, %%r15d \n\t" #endif "mov %%cr2, %%" _ASM_AX " \n\t" "mov %%" _ASM_AX ", %c[cr2](%0) \n\t" + "xor %%eax, %%eax \n\t" + "xor %%ebx, %%ebx \n\t" + "xor %%esi, %%esi \n\t" + "xor %%edi, %%edi \n\t" "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t" - "setbe %c[fail](%0) \n\t" ".pushsection .rodata \n\t" ".global vmx_return \n\t" "vmx_return: " _ASM_PTR " 2b \n\t" diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 8e13b8cc6bed..27e9e90a8d35 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -10,7 +10,7 @@ CFLAGS_REMOVE_mem_encrypt.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) @@ -41,9 +41,10 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o obj-$(CONFIG_ACPI_NUMA) += srat.o obj-$(CONFIG_NUMA_EMU) += numa_emulation.o -obj-$(CONFIG_X86_INTEL_MPX) += mpx.o -obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o -obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_X86_INTEL_MPX) += mpx.o +obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o +obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o +obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c new file mode 100644 index 000000000000..b9283cc27622 --- /dev/null +++ b/arch/x86/mm/cpu_entry_area.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/spinlock.h> +#include <linux/percpu.h> + +#include <asm/cpu_entry_area.h> +#include <asm/pgtable.h> +#include <asm/fixmap.h> +#include <asm/desc.h> + +static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage); + +#ifdef CONFIG_X86_64 +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); +#endif + +struct cpu_entry_area *get_cpu_entry_area(int cpu) +{ + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE; + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); + + return (struct cpu_entry_area *) va; +} +EXPORT_SYMBOL(get_cpu_entry_area); + +void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags) +{ + unsigned long va = (unsigned long) cea_vaddr; + + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags)); +} + +static void __init +cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot) +{ + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE) + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot); +} + +static void percpu_setup_debug_store(int cpu) +{ +#ifdef CONFIG_CPU_SUP_INTEL + int npages; + void *cea; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return; + + cea = &get_cpu_entry_area(cpu)->cpu_debug_store; + npages = sizeof(struct debug_store) / PAGE_SIZE; + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0); + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, + PAGE_KERNEL); + + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers; + /* + * Force the population of PMDs for not yet allocated per cpu + * memory like debug store buffers. + */ + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; + for (; npages; npages--, cea += PAGE_SIZE) + cea_set_pte(cea, 0, PAGE_NONE); +#endif +} + +/* Setup the fixmap mappings only once per-processor */ +static void __init setup_cpu_entry_area(int cpu) +{ +#ifdef CONFIG_X86_64 + extern char _entry_trampoline[]; + + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ + pgprot_t gdt_prot = PAGE_KERNEL_RO; + pgprot_t tss_prot = PAGE_KERNEL_RO; +#else + /* + * On native 32-bit systems, the GDT cannot be read-only because + * our double fault handler uses a task gate, and entering through + * a task gate needs to change an available TSS to busy. If the + * GDT is read-only, that will triple fault. The TSS cannot be + * read-only because the CPU writes to it on task switches. + * + * On Xen PV, the GDT must be read-only because the hypervisor + * requires it. + */ + pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? + PAGE_KERNEL_RO : PAGE_KERNEL; + pgprot_t tss_prot = PAGE_KERNEL; +#endif + + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu), + gdt_prot); + + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page, + per_cpu_ptr(&entry_stack_storage, cpu), 1, + PAGE_KERNEL); + + /* + * The Intel SDM says (Volume 3, 7.2.1): + * + * Avoid placing a page boundary in the part of the TSS that the + * processor reads during a task switch (the first 104 bytes). The + * processor may not correctly perform address translations if a + * boundary occurs in this area. During a task switch, the processor + * reads and writes into the first 104 bytes of each TSS (using + * contiguous physical addresses beginning with the physical address + * of the first byte of the TSS). So, after TSS access begins, if + * part of the 104 bytes is not physically contiguous, the processor + * will access incorrect information without generating a page-fault + * exception. + * + * There are also a lot of errata involving the TSS spanning a page + * boundary. Assert that we're not doing that. + */ + BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^ + offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); + BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss, + &per_cpu(cpu_tss_rw, cpu), + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot); + +#ifdef CONFIG_X86_32 + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); +#endif + +#ifdef CONFIG_X86_64 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + BUILD_BUG_ON(sizeof(exception_stacks) != + sizeof(((struct cpu_entry_area *)0)->exception_stacks)); + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks, + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL); + + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline, + __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); +#endif + percpu_setup_debug_store(cpu); +} + +static __init void setup_cpu_entry_area_ptes(void) +{ +#ifdef CONFIG_X86_32 + unsigned long start, end; + + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); + + start = CPU_ENTRY_AREA_BASE; + end = start + CPU_ENTRY_AREA_MAP_SIZE; + + /* Careful here: start + PMD_SIZE might wrap around */ + for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) + populate_extra_pte(start); +#endif +} + +void __init setup_cpu_entry_areas(void) +{ + unsigned int cpu; + + setup_cpu_entry_area_ptes(); + + for_each_possible_cpu(cpu) + setup_cpu_entry_area(cpu); +} diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index bfcffdf6c577..421f2664ffa0 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -5,7 +5,7 @@ static int ptdump_show(struct seq_file *m, void *v) { - ptdump_walk_pgd_level(m, NULL); + ptdump_walk_pgd_level_debugfs(m, NULL, false); return 0; } @@ -22,21 +22,89 @@ static const struct file_operations ptdump_fops = { .release = single_release, }; -static struct dentry *pe; +static int ptdump_show_curknl(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curknl(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curknl, NULL); +} + +static const struct file_operations ptdump_curknl_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curknl, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +static struct dentry *pe_curusr; + +static int ptdump_show_curusr(struct seq_file *m, void *v) +{ + if (current->mm->pgd) { + down_read(¤t->mm->mmap_sem); + ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true); + up_read(¤t->mm->mmap_sem); + } + return 0; +} + +static int ptdump_open_curusr(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_curusr, NULL); +} + +static const struct file_operations ptdump_curusr_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_curusr, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +static struct dentry *dir, *pe_knl, *pe_curknl; static int __init pt_dump_debug_init(void) { - pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL, - &ptdump_fops); - if (!pe) + dir = debugfs_create_dir("page_tables", NULL); + if (!dir) return -ENOMEM; + pe_knl = debugfs_create_file("kernel", 0400, dir, NULL, + &ptdump_fops); + if (!pe_knl) + goto err; + + pe_curknl = debugfs_create_file("current_kernel", 0400, + dir, NULL, &ptdump_curknl_fops); + if (!pe_curknl) + goto err; + +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pe_curusr = debugfs_create_file("current_user", 0400, + dir, NULL, &ptdump_curusr_fops); + if (!pe_curusr) + goto err; +#endif return 0; +err: + debugfs_remove_recursive(dir); + return -ENOMEM; } static void __exit pt_dump_debug_exit(void) { - debugfs_remove_recursive(pe); + debugfs_remove_recursive(dir); } module_init(pt_dump_debug_init); diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 5e3ac6fe6c9e..2a4849e92831 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -44,68 +44,97 @@ struct addr_marker { unsigned long max_lines; }; -/* indices for address_markers; keep sync'd w/ address_markers below */ +/* Address space markers hints */ + +#ifdef CONFIG_X86_64 + enum address_markers_idx { USER_SPACE_NR = 0, -#ifdef CONFIG_X86_64 KERNEL_SPACE_NR, LOW_KERNEL_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif VMALLOC_START_NR, VMEMMAP_START_NR, #ifdef CONFIG_KASAN KASAN_SHADOW_START_NR, KASAN_SHADOW_END_NR, #endif -# ifdef CONFIG_X86_ESPFIX64 + CPU_ENTRY_AREA_NR, +#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL) + LDT_NR, +#endif +#ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, -# endif +#endif +#ifdef CONFIG_EFI + EFI_END_NR, +#endif HIGH_KERNEL_NR, MODULES_VADDR_NR, MODULES_END_NR, -#else + FIXADDR_START_NR, + END_OF_SPACE_NR, +}; + +static struct addr_marker address_markers[] = { + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { (1UL << 63), "Kernel Space" }, + [LOW_KERNEL_NR] = { 0UL, "Low Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, +#ifdef CONFIG_KASAN + [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, + [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, +#endif +#ifdef CONFIG_MODIFY_LDT_SYSCALL + [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" }, +#endif + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, +#ifdef CONFIG_X86_ESPFIX64 + [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, +#endif +#ifdef CONFIG_EFI + [EFI_END_NR] = { EFI_VA_END, "EFI Runtime Services" }, +#endif + [HIGH_KERNEL_NR] = { __START_KERNEL_map, "High Kernel Mapping" }, + [MODULES_VADDR_NR] = { MODULES_VADDR, "Modules" }, + [MODULES_END_NR] = { MODULES_END, "End Modules" }, + [FIXADDR_START_NR] = { FIXADDR_START, "Fixmap Area" }, + [END_OF_SPACE_NR] = { -1, NULL } +}; + +#else /* CONFIG_X86_64 */ + +enum address_markers_idx { + USER_SPACE_NR = 0, KERNEL_SPACE_NR, VMALLOC_START_NR, VMALLOC_END_NR, -# ifdef CONFIG_HIGHMEM +#ifdef CONFIG_HIGHMEM PKMAP_BASE_NR, -# endif - FIXADDR_START_NR, #endif + CPU_ENTRY_AREA_NR, + FIXADDR_START_NR, + END_OF_SPACE_NR, }; -/* Address space markers hints */ static struct addr_marker address_markers[] = { - { 0, "User Space" }, -#ifdef CONFIG_X86_64 - { 0x8000000000000000UL, "Kernel Space" }, - { 0/* PAGE_OFFSET */, "Low Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/* VMEMMAP_START */, "Vmemmap" }, -#ifdef CONFIG_KASAN - { KASAN_SHADOW_START, "KASAN shadow" }, - { KASAN_SHADOW_END, "KASAN shadow end" }, + [USER_SPACE_NR] = { 0, "User Space" }, + [KERNEL_SPACE_NR] = { PAGE_OFFSET, "Kernel Mapping" }, + [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, + [VMALLOC_END_NR] = { 0UL, "vmalloc() End" }, +#ifdef CONFIG_HIGHMEM + [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" }, #endif -# ifdef CONFIG_X86_ESPFIX64 - { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, -# endif -# ifdef CONFIG_EFI - { EFI_VA_END, "EFI Runtime Services" }, -# endif - { __START_KERNEL_map, "High Kernel Mapping" }, - { MODULES_VADDR, "Modules" }, - { MODULES_END, "End Modules" }, -#else - { PAGE_OFFSET, "Kernel Mapping" }, - { 0/* VMALLOC_START */, "vmalloc() Area" }, - { 0/*VMALLOC_END*/, "vmalloc() End" }, -# ifdef CONFIG_HIGHMEM - { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, -# endif - { 0/*FIXADDR_START*/, "Fixmap Area" }, -#endif - { -1, NULL } /* End of list */ + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" }, + [FIXADDR_START_NR] = { 0UL, "Fixmap area" }, + [END_OF_SPACE_NR] = { -1, NULL } }; +#endif /* !CONFIG_X86_64 */ + /* Multipliers for offsets within the PTEs */ #define PTE_LEVEL_MULT (PAGE_SIZE) #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT) @@ -140,7 +169,7 @@ static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg) static const char * const level_name[] = { "cr3", "pgd", "p4d", "pud", "pmd", "pte" }; - if (!pgprot_val(prot)) { + if (!(pr & _PAGE_PRESENT)) { /* Not present */ pt_dump_cont_printf(m, dmsg, " "); } else { @@ -447,7 +476,7 @@ static inline bool is_hypervisor_range(int idx) } static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, - bool checkwx) + bool checkwx, bool dmesg) { #ifdef CONFIG_X86_64 pgd_t *start = (pgd_t *) &init_top_pgt; @@ -460,7 +489,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, if (pgd) { start = pgd; - st.to_dmesg = true; + st.to_dmesg = dmesg; } st.check_wx = checkwx; @@ -498,13 +527,37 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd) { - ptdump_walk_pgd_level_core(m, pgd, false); + ptdump_walk_pgd_level_core(m, pgd, false, true); +} + +void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + if (user && static_cpu_has(X86_FEATURE_PTI)) + pgd = kernel_to_user_pgdp(pgd); +#endif + ptdump_walk_pgd_level_core(m, pgd, false, false); +} +EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs); + +static void ptdump_walk_user_pgd_level_checkwx(void) +{ +#ifdef CONFIG_PAGE_TABLE_ISOLATION + pgd_t *pgd = (pgd_t *) &init_top_pgt; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("x86/mm: Checking user space page tables\n"); + pgd = kernel_to_user_pgdp(pgd); + ptdump_walk_pgd_level_core(NULL, pgd, true, false); +#endif } -EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level); void ptdump_walk_pgd_level_checkwx(void) { - ptdump_walk_pgd_level_core(NULL, NULL, true); + ptdump_walk_pgd_level_core(NULL, NULL, true, false); + ptdump_walk_user_pgd_level_checkwx(); } static int __init pt_dump_init(void) @@ -525,8 +578,8 @@ static int __init pt_dump_init(void) address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE; # endif address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE; #endif - return 0; } __initcall(pt_dump_init); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 6fdf91ef130a..82f5252c723a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -20,6 +20,7 @@ #include <asm/kaslr.h> #include <asm/hypervisor.h> #include <asm/cpufeature.h> +#include <asm/pti.h> /* * We need to define the tracepoints somewhere, and tlb.c @@ -160,6 +161,12 @@ struct map_range { static int page_size_mask; +static void enable_global_pages(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + __supported_pte_mask |= _PAGE_GLOBAL; +} + static void __init probe_page_size_mask(void) { /* @@ -177,11 +184,11 @@ static void __init probe_page_size_mask(void) cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ + __supported_pte_mask &= ~_PAGE_GLOBAL; if (boot_cpu_has(X86_FEATURE_PGE)) { cr4_set_bits_and_update_boot(X86_CR4_PGE); - __supported_pte_mask |= _PAGE_GLOBAL; - } else - __supported_pte_mask &= ~_PAGE_GLOBAL; + enable_global_pages(); + } /* Enable 1 GB linear kernel mappings if available: */ if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) { @@ -194,34 +201,44 @@ static void __init probe_page_size_mask(void) static void setup_pcid(void) { -#ifdef CONFIG_X86_64 - if (boot_cpu_has(X86_FEATURE_PCID)) { - if (boot_cpu_has(X86_FEATURE_PGE)) { - /* - * This can't be cr4_set_bits_and_update_boot() -- - * the trampoline code can't handle CR4.PCIDE and - * it wouldn't do any good anyway. Despite the name, - * cr4_set_bits_and_update_boot() doesn't actually - * cause the bits in question to remain set all the - * way through the secondary boot asm. - * - * Instead, we brute-force it and set CR4.PCIDE - * manually in start_secondary(). - */ - cr4_set_bits(X86_CR4_PCIDE); - } else { - /* - * flush_tlb_all(), as currently implemented, won't - * work if PCID is on but PGE is not. Since that - * combination doesn't exist on real hardware, there's - * no reason to try to fully support it, but it's - * polite to avoid corrupting data if we're on - * an improperly configured VM. - */ - setup_clear_cpu_cap(X86_FEATURE_PCID); - } + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + if (!boot_cpu_has(X86_FEATURE_PCID)) + return; + + if (boot_cpu_has(X86_FEATURE_PGE)) { + /* + * This can't be cr4_set_bits_and_update_boot() -- the + * trampoline code can't handle CR4.PCIDE and it wouldn't + * do any good anyway. Despite the name, + * cr4_set_bits_and_update_boot() doesn't actually cause + * the bits in question to remain set all the way through + * the secondary boot asm. + * + * Instead, we brute-force it and set CR4.PCIDE manually in + * start_secondary(). + */ + cr4_set_bits(X86_CR4_PCIDE); + + /* + * INVPCID's single-context modes (2/3) only work if we set + * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable + * on systems that have X86_CR4_PCIDE clear, or that have + * no INVPCID support at all. + */ + if (boot_cpu_has(X86_FEATURE_INVPCID)) + setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE); + } else { + /* + * flush_tlb_all(), as currently implemented, won't work if + * PCID is on but PGE is not. Since that combination + * doesn't exist on real hardware, there's no reason to try + * to fully support it, but it's polite to avoid corrupting + * data if we're on an improperly configured VM. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); } -#endif } #ifdef CONFIG_X86_32 @@ -622,6 +639,7 @@ void __init init_mem_mapping(void) { unsigned long end; + pti_check_boottime_disable(); probe_page_size_mask(); setup_pcid(); @@ -845,12 +863,12 @@ void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } -DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ }; -EXPORT_SYMBOL_GPL(cpu_tlbstate); +EXPORT_PER_CPU_SYMBOL(cpu_tlbstate); void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 8a64a6f2848d..135c9a7898c7 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -50,6 +50,7 @@ #include <asm/setup.h> #include <asm/set_memory.h> #include <asm/page_types.h> +#include <asm/cpu_entry_area.h> #include <asm/init.h> #include "mm_internal.h" @@ -766,6 +767,7 @@ void __init mem_init(void) mem_init_print_info(NULL); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif @@ -777,6 +779,10 @@ void __init mem_init(void) FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, + CPU_ENTRY_AREA_BASE, + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE, + CPU_ENTRY_AREA_MAP_SIZE >> 10, + #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 9ec70d780f1f..47388f0c0e59 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -15,6 +15,7 @@ #include <asm/tlbflush.h> #include <asm/sections.h> #include <asm/pgtable.h> +#include <asm/cpu_entry_area.h> extern struct range pfn_mapped[E820_MAX_ENTRIES]; @@ -322,31 +323,33 @@ void __init kasan_init(void) map_range(&pfn_mapped[i]); } - kasan_populate_zero_shadow( - kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), - kasan_mem_to_shadow((void *)__START_KERNEL_map)); - - kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), - (unsigned long)kasan_mem_to_shadow(_end), - early_pfn_to_nid(__pa(_stext))); - - shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM); + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, PAGE_SIZE); - shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE); + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + + CPU_ENTRY_AREA_MAP_SIZE); shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, PAGE_SIZE); - kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), - shadow_cpu_entry_begin); + kasan_populate_zero_shadow( + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), + shadow_cpu_entry_begin); kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, (unsigned long)shadow_cpu_entry_end, 0); - kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END); + kasan_populate_zero_shadow(shadow_cpu_entry_end, + kasan_mem_to_shadow((void *)__START_KERNEL_map)); + + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), + (unsigned long)kasan_mem_to_shadow(_end), + early_pfn_to_nid(__pa(_stext))); + + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), + (void *)KASAN_SHADOW_END); load_cr3(init_top_pgt); __flush_tlb_all(); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 879ef930e2c2..aedebd2ebf1e 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -34,25 +34,14 @@ #define TB_SHIFT 40 /* - * Virtual address start and end range for randomization. The end changes base - * on configuration to have the highest amount of space for randomization. - * It increases the possible random position for each randomized region. + * Virtual address start and end range for randomization. * - * You need to add an if/def entry if you introduce a new memory region - * compatible with KASLR. Your entry must be in logical order with memory - * layout. For example, ESPFIX is before EFI because its virtual address is - * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to - * ensure that this order is correct and won't be changed. + * The end address could depend on more configuration options to make the + * highest amount of space for randomization available, but that's too hard + * to keep straight and caused issues already. */ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; - -#if defined(CONFIG_X86_ESPFIX64) -static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; -#elif defined(CONFIG_EFI) -static const unsigned long vaddr_end = EFI_VA_END; -#else -static const unsigned long vaddr_end = __START_KERNEL_map; -#endif +static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; /* Default values */ unsigned long page_offset_base = __PAGE_OFFSET_BASE; @@ -101,15 +90,12 @@ void __init kernel_randomize_memory(void) unsigned long remain_entropy; /* - * All these BUILD_BUG_ON checks ensures the memory layout is - * consistent with the vaddr_start/vaddr_end variables. + * These BUILD_BUG_ON checks ensure the memory layout is consistent + * with the vaddr_start/vaddr_end variables. These checks are very + * limited.... */ BUILD_BUG_ON(vaddr_start >= vaddr_end); - BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && - vaddr_end >= EFI_VA_END); - BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || - IS_ENABLED(CONFIG_EFI)) && - vaddr_end >= __START_KERNEL_map); + BUILD_BUG_ON(vaddr_end != CPU_ENTRY_AREA_BASE); BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); if (!kaslr_memory_enabled()) diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index d9a9e9fc75dd..391b13402e40 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -405,13 +405,13 @@ bool sme_active(void) { return sme_me_mask && !sev_enabled; } -EXPORT_SYMBOL_GPL(sme_active); +EXPORT_SYMBOL(sme_active); bool sev_active(void) { return sme_me_mask && sev_enabled; } -EXPORT_SYMBOL_GPL(sev_active); +EXPORT_SYMBOL(sev_active); static const struct dma_map_ops sev_dma_ops = { .alloc = sev_alloc, diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 96d456a94b03..004abf9ebf12 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -355,14 +355,15 @@ static inline void _pgd_free(pgd_t *pgd) kmem_cache_free(pgd_cache, pgd); } #else + static inline pgd_t *_pgd_alloc(void) { - return (pgd_t *)__get_free_page(PGALLOC_GFP); + return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); } static inline void _pgd_free(pgd_t *pgd) { - free_page((unsigned long)pgd); + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); } #endif /* CONFIG_X86_PAE */ diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 6b9bf023a700..c3c5274410a9 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -10,6 +10,7 @@ #include <linux/pagemap.h> #include <linux/spinlock.h> +#include <asm/cpu_entry_area.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/fixmap.h> diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c new file mode 100644 index 000000000000..43d4a4a29037 --- /dev/null +++ b/arch/x86/mm/pti.c @@ -0,0 +1,388 @@ +/* + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * This code is based in part on work published here: + * + * https://github.com/IAIK/KAISER + * + * The original work was written by and and signed off by for the Linux + * kernel by: + * + * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at> + * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at> + * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at> + * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at> + * + * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com> + * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and + * Andy Lutomirsky <luto@amacapital.net> + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/uaccess.h> + +#include <asm/cpufeature.h> +#include <asm/hypervisor.h> +#include <asm/vsyscall.h> +#include <asm/cmdline.h> +#include <asm/pti.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/desc.h> + +#undef pr_fmt +#define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt + +/* Backporting helper */ +#ifndef __GFP_NOTRACK +#define __GFP_NOTRACK 0 +#endif + +static void __init pti_print_if_insecure(const char *reason) +{ + if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + pr_info("%s\n", reason); +} + +static void __init pti_print_if_secure(const char *reason) +{ + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + pr_info("%s\n", reason); +} + +void __init pti_check_boottime_disable(void) +{ + char arg[5]; + int ret; + + if (hypervisor_is_type(X86_HYPER_XEN_PV)) { + pti_print_if_insecure("disabled on XEN PV."); + return; + } + + ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg)); + if (ret > 0) { + if (ret == 3 && !strncmp(arg, "off", 3)) { + pti_print_if_insecure("disabled on command line."); + return; + } + if (ret == 2 && !strncmp(arg, "on", 2)) { + pti_print_if_secure("force enabled on command line."); + goto enable; + } + if (ret == 4 && !strncmp(arg, "auto", 4)) + goto autosel; + } + + if (cmdline_find_option_bool(boot_command_line, "nopti")) { + pti_print_if_insecure("disabled on command line."); + return; + } + +autosel: + if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) + return; +enable: + setup_force_cpu_cap(X86_FEATURE_PTI); +} + +pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) +{ + /* + * Changes to the high (kernel) portion of the kernelmode page + * tables are not automatically propagated to the usermode tables. + * + * Users should keep in mind that, unlike the kernelmode tables, + * there is no vmalloc_fault equivalent for the usermode tables. + * Top-level entries added to init_mm's usermode pgd after boot + * will not be automatically propagated to other mms. + */ + if (!pgdp_maps_userspace(pgdp)) + return pgd; + + /* + * The user page tables get the full PGD, accessible from + * userspace: + */ + kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; + + /* + * If this is normal user memory, make it NX in the kernel + * pagetables so that, if we somehow screw up and return to + * usermode with the kernel CR3 loaded, we'll get a page fault + * instead of allowing user code to execute with the wrong CR3. + * + * As exceptions, we don't set NX if: + * - _PAGE_USER is not set. This could be an executable + * EFI runtime mapping or something similar, and the kernel + * may execute from it + * - we don't have NX support + * - we're clearing the PGD (i.e. the new pgd is not present). + */ + if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && + (__supported_pte_mask & _PAGE_NX)) + pgd.pgd |= _PAGE_NX; + + /* return the copy of the PGD we want the kernel to use: */ + return pgd; +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a P4D on success, or NULL on failure. + */ +static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) +{ + pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + + if (address < PAGE_OFFSET) { + WARN_ONCE(1, "attempt to walk user address\n"); + return NULL; + } + + if (pgd_none(*pgd)) { + unsigned long new_p4d_page = __get_free_page(gfp); + if (!new_p4d_page) + return NULL; + + if (pgd_none(*pgd)) { + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); + new_p4d_page = 0; + } + if (new_p4d_page) + free_page(new_p4d_page); + } + BUILD_BUG_ON(pgd_large(*pgd) != 0); + + return p4d_offset(pgd, address); +} + +/* + * Walk the user copy of the page tables (optionally) trying to allocate + * page table pages on the way down. + * + * Returns a pointer to a PMD on success, or NULL on failure. + */ +static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + p4d_t *p4d = pti_user_pagetable_walk_p4d(address); + pud_t *pud; + + BUILD_BUG_ON(p4d_large(*p4d) != 0); + if (p4d_none(*p4d)) { + unsigned long new_pud_page = __get_free_page(gfp); + if (!new_pud_page) + return NULL; + + if (p4d_none(*p4d)) { + set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); + new_pud_page = 0; + } + if (new_pud_page) + free_page(new_pud_page); + } + + pud = pud_offset(p4d, address); + /* The user page tables do not use large mappings: */ + if (pud_large(*pud)) { + WARN_ON(1); + return NULL; + } + if (pud_none(*pud)) { + unsigned long new_pmd_page = __get_free_page(gfp); + if (!new_pmd_page) + return NULL; + + if (pud_none(*pud)) { + set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); + new_pmd_page = 0; + } + if (new_pmd_page) + free_page(new_pmd_page); + } + + return pmd_offset(pud, address); +} + +#ifdef CONFIG_X86_VSYSCALL_EMULATION +/* + * Walk the shadow copy of the page tables (optionally) trying to allocate + * page table pages on the way down. Does not support large pages. + * + * Note: this is only used when mapping *new* kernel data into the + * user/shadow page tables. It is never used for userspace data. + * + * Returns a pointer to a PTE on success, or NULL on failure. + */ +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) +{ + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); + pmd_t *pmd = pti_user_pagetable_walk_pmd(address); + pte_t *pte; + + /* We can't do anything sensible if we hit a large mapping. */ + if (pmd_large(*pmd)) { + WARN_ON(1); + return NULL; + } + + if (pmd_none(*pmd)) { + unsigned long new_pte_page = __get_free_page(gfp); + if (!new_pte_page) + return NULL; + + if (pmd_none(*pmd)) { + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + new_pte_page = 0; + } + if (new_pte_page) + free_page(new_pte_page); + } + + pte = pte_offset_kernel(pmd, address); + if (pte_flags(*pte) & _PAGE_USER) { + WARN_ONCE(1, "attempt to walk to user pte\n"); + return NULL; + } + return pte; +} + +static void __init pti_setup_vsyscall(void) +{ + pte_t *pte, *target_pte; + unsigned int level; + + pte = lookup_address(VSYSCALL_ADDR, &level); + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte)) + return; + + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR); + if (WARN_ON(!target_pte)) + return; + + *target_pte = *pte; + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir)); +} +#else +static void __init pti_setup_vsyscall(void) { } +#endif + +static void __init +pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear) +{ + unsigned long addr; + + /* + * Clone the populated PMDs which cover start to end. These PMD areas + * can have holes. + */ + for (addr = start; addr < end; addr += PMD_SIZE) { + pmd_t *pmd, *target_pmd; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + + pgd = pgd_offset_k(addr); + if (WARN_ON(pgd_none(*pgd))) + return; + p4d = p4d_offset(pgd, addr); + if (WARN_ON(p4d_none(*p4d))) + return; + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + continue; + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) + continue; + + target_pmd = pti_user_pagetable_walk_pmd(addr); + if (WARN_ON(!target_pmd)) + return; + + /* + * Copy the PMD. That is, the kernelmode and usermode + * tables will share the last-level page tables of this + * address range + */ + *target_pmd = pmd_clear_flags(*pmd, clear); + } +} + +/* + * Clone a single p4d (i.e. a top-level entry on 4-level systems and a + * next-level entry on 5-level systems. + */ +static void __init pti_clone_p4d(unsigned long addr) +{ + p4d_t *kernel_p4d, *user_p4d; + pgd_t *kernel_pgd; + + user_p4d = pti_user_pagetable_walk_p4d(addr); + kernel_pgd = pgd_offset_k(addr); + kernel_p4d = p4d_offset(kernel_pgd, addr); + *user_p4d = *kernel_p4d; +} + +/* + * Clone the CPU_ENTRY_AREA into the user space visible page table. + */ +static void __init pti_clone_user_shared(void) +{ + pti_clone_p4d(CPU_ENTRY_AREA_BASE); +} + +/* + * Clone the ESPFIX P4D into the user space visinble page table + */ +static void __init pti_setup_espfix64(void) +{ +#ifdef CONFIG_X86_ESPFIX64 + pti_clone_p4d(ESPFIX_BASE_ADDR); +#endif +} + +/* + * Clone the populated PMDs of the entry and irqentry text and force it RO. + */ +static void __init pti_clone_entry_text(void) +{ + pti_clone_pmds((unsigned long) __entry_text_start, + (unsigned long) __irqentry_text_end, + _PAGE_RW | _PAGE_GLOBAL); +} + +/* + * Initialize kernel page table isolation + */ +void __init pti_init(void) +{ + if (!static_cpu_has(X86_FEATURE_PTI)) + return; + + pr_info("enabled\n"); + + pti_clone_user_shared(); + pti_clone_entry_text(); + pti_setup_espfix64(); + pti_setup_vsyscall(); +} diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3118392cdf75..a1561957dccb 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -28,6 +28,38 @@ * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi */ +/* + * We get here when we do something requiring a TLB invalidation + * but could not go invalidate all of the contexts. We do the + * necessary invalidation by clearing out the 'ctx_id' which + * forces a TLB flush when the context is loaded. + */ +void clear_asid_other(void) +{ + u16 asid; + + /* + * This is only expected to be set if we have disabled + * kernel _PAGE_GLOBAL pages. + */ + if (!static_cpu_has(X86_FEATURE_PTI)) { + WARN_ON_ONCE(1); + return; + } + + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { + /* Do not need to flush the current asid */ + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) + continue; + /* + * Make sure the next time we go to switch to + * this asid, we do a flush: + */ + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0); + } + this_cpu_write(cpu_tlbstate.invalidate_other, false); +} + atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); @@ -42,6 +74,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, return; } + if (this_cpu_read(cpu_tlbstate.invalidate_other)) + clear_asid_other(); + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) { if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != next->context.ctx_id) @@ -65,6 +100,25 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, *need_flush = true; } +static void load_new_mm_cr3(pgd_t *pgdir, u16 new_asid, bool need_flush) +{ + unsigned long new_mm_cr3; + + if (need_flush) { + invalidate_user_asid(new_asid); + new_mm_cr3 = build_cr3(pgdir, new_asid); + } else { + new_mm_cr3 = build_cr3_noflush(pgdir, new_asid); + } + + /* + * Caution: many callers of this function expect + * that load_cr3() is serializing and orders TLB + * fills with respect to the mm_cpumask writes. + */ + write_cr3(new_mm_cr3); +} + void leave_mm(int cpu) { struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); @@ -128,7 +182,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, * isn't free. */ #ifdef CONFIG_DEBUG_VM - if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev, prev_asid))) { + if (WARN_ON_ONCE(__read_cr3() != build_cr3(real_prev->pgd, prev_asid))) { /* * If we were to BUG here, we'd be very likely to kill * the system so hard that we don't see the call trace. @@ -195,7 +249,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, if (need_flush) { this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); - write_cr3(build_cr3(next, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, true); /* * NB: This gets called via leave_mm() in the idle path @@ -208,7 +262,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); } else { /* The new ASID is already up to date. */ - write_cr3(build_cr3_noflush(next, new_asid)); + load_new_mm_cr3(next->pgd, new_asid, false); /* See above wrt _rcuidle. */ trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); @@ -288,7 +342,7 @@ void initialize_tlbstate_and_flush(void) !(cr4_read_shadow() & X86_CR4_PCIDE)); /* Force ASID 0 and force a TLB flush. */ - write_cr3(build_cr3(mm, 0)); + write_cr3(build_cr3(mm->pgd, 0)); /* Reinitialize tlbstate. */ this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); @@ -551,7 +605,7 @@ static void do_kernel_range_flush(void *info) /* flush range by one by one 'invlpg' */ for (addr = f->start; addr < f->end; addr += PAGE_SIZE) - __flush_tlb_single(addr); + __flush_tlb_one(addr); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6a151ce70e86..d87ac96e37ed 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -196,6 +196,9 @@ static pgd_t *efi_pgd; * because we want to avoid inserting EFI region mappings (EFI_VA_END * to EFI_VA_START) into the standard kernel page tables. Everything * else can be shared, see efi_sync_low_kernel_mappings(). + * + * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the + * allocation. */ int __init efi_alloc_page_tables(void) { @@ -208,7 +211,7 @@ int __init efi_alloc_page_tables(void) return 0; gfp_mask = GFP_KERNEL | __GFP_ZERO; - efi_pgd = (pgd_t *)__get_free_page(gfp_mask); + efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER); if (!efi_pgd) return -ENOMEM; diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 8a99a2e96537..5b513ccffde4 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -592,7 +592,18 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff, /* * Update the first page pointer to skip over the CSH header. */ - cap_info->pages[0] += csh->headersize; + cap_info->phys[0] += csh->headersize; + + /* + * cap_info->capsule should point at a virtual mapping of the entire + * capsule, starting at the capsule header. Our image has the Quark + * security header prepended, so we cannot rely on the default vmap() + * mapping created by the generic capsule code. + * Given that the Quark firmware does not appear to care about the + * virtual mapping, let's just point cap_info->capsule at our copy + * of the capsule header. + */ + cap_info->capsule = &cap_info->header; return 1; } diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index f44c0bc95aa2..8538a6723171 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -299,7 +299,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, local_flush_tlb(); stat->d_alltlb++; } else { - __flush_tlb_one(msg->address); + __flush_tlb_single(msg->address); stat->d_onetlb++; } stat->d_requestee++; diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c index 5f6fd860820a..e4cb9f4cde8a 100644 --- a/arch/x86/platform/uv/uv_irq.c +++ b/arch/x86/platform/uv/uv_irq.c @@ -128,7 +128,7 @@ static void uv_domain_free(struct irq_domain *domain, unsigned int virq, * on the specified blade to allow the sending of MSIs to the specified CPU. */ static int uv_domain_activate(struct irq_domain *domain, - struct irq_data *irq_data, bool early) + struct irq_data *irq_data, bool reserve) { uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data); return 0; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index d669e9d89001..c9081c6671f0 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1,8 +1,12 @@ +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +#include <linux/bootmem.h> +#endif #include <linux/cpu.h> #include <linux/kexec.h> #include <xen/features.h> #include <xen/page.h> +#include <xen/interface/memory.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> @@ -331,3 +335,80 @@ void xen_arch_unregister_cpu(int num) } EXPORT_SYMBOL(xen_arch_unregister_cpu); #endif + +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +void __init arch_xen_balloon_init(struct resource *hostmem_resource) +{ + struct xen_memory_map memmap; + int rc; + unsigned int i, last_guest_ram; + phys_addr_t max_addr = PFN_PHYS(max_pfn); + struct e820_table *xen_e820_table; + const struct e820_entry *entry; + struct resource *res; + + if (!xen_initial_domain()) + return; + + xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); + if (!xen_e820_table) + return; + + memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); + set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); + rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); + if (rc) { + pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); + goto out; + } + + last_guest_ram = 0; + for (i = 0; i < memmap.nr_entries; i++) { + if (xen_e820_table->entries[i].addr >= max_addr) + break; + if (xen_e820_table->entries[i].type == E820_TYPE_RAM) + last_guest_ram = i; + } + + entry = &xen_e820_table->entries[last_guest_ram]; + if (max_addr >= entry->addr + entry->size) + goto out; /* No unallocated host RAM. */ + + hostmem_resource->start = max_addr; + hostmem_resource->end = entry->addr + entry->size; + + /* + * Mark non-RAM regions between the end of dom0 RAM and end of host RAM + * as unavailable. The rest of that region can be used for hotplug-based + * ballooning. + */ + for (; i < memmap.nr_entries; i++) { + entry = &xen_e820_table->entries[i]; + + if (entry->type == E820_TYPE_RAM) + continue; + + if (entry->addr >= hostmem_resource->end) + break; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + goto out; + + res->name = "Unavailable host RAM"; + res->start = entry->addr; + res->end = (entry->addr + entry->size < hostmem_resource->end) ? + entry->addr + entry->size : hostmem_resource->end; + rc = insert_resource(hostmem_resource, res); + if (rc) { + pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", + __func__, res->start, res->end, rc); + kfree(res); + goto out; + } + } + + out: + kfree(xen_e820_table); +} +#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 7beeee1443b3..c047f42552e1 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -88,6 +88,8 @@ #include "multicalls.h" #include "pmu.h" +#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */ + void *xen_initial_gdt; static int xen_cpu_up_prepare_pv(unsigned int cpu); @@ -1258,6 +1260,7 @@ asmlinkage __visible void __init xen_start_kernel(void) __userpte_alloc_gfp &= ~__GFP_HIGHMEM; /* Work out if we support NX */ + get_cpu_cap(&boot_cpu_data); x86_configure_nx(); /* Get mfn list */ diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 6cf801ca1142..4d62c071b166 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1902,6 +1902,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* Graft it onto L4[511][510] */ copy_page(level2_kernel_pgt, l2); + /* + * Zap execute permission from the ident map. Due to the sharing of + * L1 entries we need to do this in the L2. + */ + if (__supported_pte_mask & _PAGE_NX) { + for (i = 0; i < PTRS_PER_PMD; ++i) { + if (pmd_none(level2_ident_pgt[i])) + continue; + level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX); + } + } + /* Copy the initial P->M table mappings if necessary. */ i = pgd_index(xen_start_info->mfn_list); if (i && i < pgd_index(__START_KERNEL_map)) @@ -2261,7 +2273,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) switch (idx) { case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: - case FIX_RO_IDT: #ifdef CONFIG_X86_32 case FIX_WP_TEST: # ifdef CONFIG_HIGHMEM @@ -2272,7 +2283,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) #endif case FIX_TEXT_POKE0: case FIX_TEXT_POKE1: - case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM: /* All local page mappings */ pte = pfn_pte(phys, prot); break; diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index c114ca767b3b..6e0d2086eacb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -808,7 +808,6 @@ char * __init xen_memory_setup(void) addr = xen_e820_table.entries[0].addr; size = xen_e820_table.entries[0].size; while (i < xen_e820_table.nr_entries) { - bool discard = false; chunk_size = size; type = xen_e820_table.entries[i].type; @@ -824,11 +823,10 @@ char * __init xen_memory_setup(void) xen_add_extra_mem(pfn_s, n_pfns); xen_max_p2m_pfn = pfn_s + n_pfns; } else - discard = true; + type = E820_TYPE_UNUSABLE; } - if (!discard) - xen_align_and_add_e820_region(addr, chunk_size, type); + xen_align_and_add_e820_region(addr, chunk_size, type); addr += chunk_size; size -= chunk_size; diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 415a54ced4d6..35d4dcea381f 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -664,7 +664,7 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) unsigned int i; list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { - ctx->rcvused -= rsgl->sg_num_bytes; + atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); af_alg_free_sg(&rsgl->sgl); list_del(&rsgl->list); if (rsgl != &areq->first_rsgl) @@ -1138,12 +1138,6 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, if (!af_alg_readable(sk)) break; - if (!ctx->used) { - err = af_alg_wait_for_data(sk, flags); - if (err) - return err; - } - seglen = min_t(size_t, (maxsize - len), msg_data_left(msg)); @@ -1169,7 +1163,7 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, areq->last_rsgl = rsgl; len += err; - ctx->rcvused += err; + atomic_add(err, &ctx->rcvused); rsgl->sg_num_bytes = err; iov_iter_advance(&msg->msg_iter, err); } diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 48b34e9c6834..e9885a35ef6e 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -111,6 +111,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t usedpages = 0; /* [in] RX bufs to be used from user */ size_t processed = 0; /* [in] TX bufs to be consumed */ + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* * Data length provided by caller via sendmsg/sendpage that has not * yet been processed. @@ -285,6 +291,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, /* AIO operation */ sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = outlen; + aead_request_set_callback(&areq->cra_u.aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_async_cb, areq); @@ -292,12 +302,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, crypto_aead_decrypt(&areq->cra_u.aead_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) { - /* Remember output size that will be generated. */ - areq->outlen = outlen; - + if (err == -EINPROGRESS || err == -EBUSY) return -EIOCBQUEUED; - } sock_put(sk); } else { @@ -565,7 +571,7 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; - ctx->rcvused = 0; + atomic_set(&ctx->rcvused, 0); ctx->more = 0; ctx->merge = 0; ctx->enc = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 30cff827dd8f..c5c47b680152 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -72,6 +72,12 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, int err = 0; size_t len = 0; + if (!ctx->used) { + err = af_alg_wait_for_data(sk, flags); + if (err) + return err; + } + /* Allocate cipher request for current operation. */ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) + crypto_skcipher_reqsize(tfm)); @@ -119,6 +125,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, /* AIO operation */ sock_hold(sk); areq->iocb = msg->msg_iocb; + + /* Remember output size that will be generated. */ + areq->outlen = len; + skcipher_request_set_callback(&areq->cra_u.skcipher_req, CRYPTO_TFM_REQ_MAY_SLEEP, af_alg_async_cb, areq); @@ -127,12 +137,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, crypto_skcipher_decrypt(&areq->cra_u.skcipher_req); /* AIO operation in progress */ - if (err == -EINPROGRESS || err == -EBUSY) { - /* Remember output size that will be generated. */ - areq->outlen = len; - + if (err == -EINPROGRESS || err == -EBUSY) return -EIOCBQUEUED; - } sock_put(sk); } else { @@ -384,7 +390,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) INIT_LIST_HEAD(&ctx->tsgl_list); ctx->len = len; ctx->used = 0; - ctx->rcvused = 0; + atomic_set(&ctx->rcvused, 0); ctx->more = 0; ctx->merge = 0; ctx->enc = 0; diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c index db1bc3147bc4..600afa99941f 100644 --- a/crypto/chacha20poly1305.c +++ b/crypto/chacha20poly1305.c @@ -610,6 +610,11 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, algt->mask)); if (IS_ERR(poly)) return PTR_ERR(poly); + poly_hash = __crypto_hash_alg_common(poly); + + err = -EINVAL; + if (poly_hash->digestsize != POLY1305_DIGEST_SIZE) + goto out_put_poly; err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); @@ -618,7 +623,6 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, ctx = aead_instance_ctx(inst); ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; - poly_hash = __crypto_hash_alg_common(poly); err = crypto_init_ahash_spawn(&ctx->poly, poly_hash, aead_crypto_instance(inst)); if (err) diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c index 4e6472658852..eca04d3729b3 100644 --- a/crypto/mcryptd.c +++ b/crypto/mcryptd.c @@ -81,6 +81,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue, pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); + spin_lock_init(&cpu_queue->q_lock); } return 0; } @@ -104,15 +105,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue, int cpu, err; struct mcryptd_cpu_queue *cpu_queue; - cpu = get_cpu(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); - rctx->tag.cpu = cpu; + cpu_queue = raw_cpu_ptr(queue->cpu_queue); + spin_lock(&cpu_queue->q_lock); + cpu = smp_processor_id(); + rctx->tag.cpu = smp_processor_id(); err = crypto_enqueue_request(&cpu_queue->queue, request); pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", cpu, cpu_queue, request); + spin_unlock(&cpu_queue->q_lock); queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); - put_cpu(); return err; } @@ -161,16 +163,11 @@ static void mcryptd_queue_worker(struct work_struct *work) cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); i = 0; while (i < MCRYPTD_BATCH || single_task_running()) { - /* - * preempt_disable/enable is used to prevent - * being preempted by mcryptd_enqueue_request() - */ - local_bh_disable(); - preempt_disable(); + + spin_lock_bh(&cpu_queue->q_lock); backlog = crypto_get_backlog(&cpu_queue->queue); req = crypto_dequeue_request(&cpu_queue->queue); - preempt_enable(); - local_bh_enable(); + spin_unlock_bh(&cpu_queue->q_lock); if (!req) { mcryptd_opportunistic_flush(); @@ -185,7 +182,7 @@ static void mcryptd_queue_worker(struct work_struct *work) ++i; } if (cpu_queue->queue.qlen) - queue_work(kcrypto_wq, &cpu_queue->work); + queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work); } void mcryptd_flusher(struct work_struct *__work) diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index ee9cfb99fe25..f8ec3d4ba4a8 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -254,6 +254,14 @@ static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) crypto_free_aead(ctx->child); } +static void pcrypt_free(struct aead_instance *inst) +{ + struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); + + crypto_drop_aead(&ctx->spawn); + kfree(inst); +} + static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { @@ -319,6 +327,8 @@ static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; + inst->free = pcrypt_free; + err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; @@ -349,14 +359,6 @@ static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) return -EINVAL; } -static void pcrypt_free(struct crypto_instance *inst) -{ - struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); - - crypto_drop_aead(&ctx->spawn); - kfree(inst); -} - static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { @@ -469,7 +471,6 @@ static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, - .free = pcrypt_free, .module = THIS_MODULE, }; diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 778e0ff42bfa..11af5fd6a443 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c @@ -449,6 +449,8 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, walk->total = req->cryptlen; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -456,9 +458,6 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, scatterwalk_start(&walk->in, req->src); scatterwalk_start(&walk->out, req->dst); - walk->iv = req->iv; - walk->oiv = req->iv; - walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? SKCIPHER_WALK_SLEEP : 0; @@ -510,6 +509,8 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, int err; walk->nbytes = 0; + walk->iv = req->iv; + walk->oiv = req->iv; if (unlikely(!walk->total)) return 0; @@ -525,9 +526,6 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, scatterwalk_done(&walk->in, 0, walk->total); scatterwalk_done(&walk->out, 0, walk->total); - walk->iv = req->iv; - walk->oiv = req->iv; - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) walk->flags |= SKCIPHER_WALK_SLEEP; else diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index ff2580e7611d..abeb4df4f22e 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1670,6 +1670,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, dev_name(&adev_dimm->dev)); return -ENXIO; } + /* + * Record nfit_mem for the notification path to track back to + * the nfit sysfs attributes for this dimm device object. + */ + dev_set_drvdata(&adev_dimm->dev, nfit_mem); /* * Until standardization materializes we need to consider 4 @@ -1752,9 +1757,11 @@ static void shutdown_dimm_notify(void *data) sysfs_put(nfit_mem->flags_attr); nfit_mem->flags_attr = NULL; } - if (adev_dimm) + if (adev_dimm) { acpi_remove_notify_handler(adev_dimm->handle, ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify); + dev_set_drvdata(&adev_dimm->dev, NULL); + } } mutex_unlock(&acpi_desc->init_mutex); } diff --git a/drivers/android/binder.c b/drivers/android/binder.c index bccec9de0533..a7ecfde66b7b 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -482,7 +482,8 @@ enum binder_deferred_state { * @tsk task_struct for group_leader of process * (invariant after initialized) * @files files_struct for process - * (invariant after initialized) + * (protected by @files_lock) + * @files_lock mutex to protect @files * @deferred_work_node: element for binder_deferred_list * (protected by binder_deferred_lock) * @deferred_work: bitmap of deferred work to perform @@ -530,6 +531,7 @@ struct binder_proc { int pid; struct task_struct *tsk; struct files_struct *files; + struct mutex files_lock; struct hlist_node deferred_work_node; int deferred_work; bool is_dead; @@ -877,20 +879,26 @@ static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) { - struct files_struct *files = proc->files; unsigned long rlim_cur; unsigned long irqs; + int ret; - if (files == NULL) - return -ESRCH; - - if (!lock_task_sighand(proc->tsk, &irqs)) - return -EMFILE; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + ret = -ESRCH; + goto err; + } + if (!lock_task_sighand(proc->tsk, &irqs)) { + ret = -EMFILE; + goto err; + } rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); unlock_task_sighand(proc->tsk, &irqs); - return __alloc_fd(files, 0, rlim_cur, flags); + ret = __alloc_fd(proc->files, 0, rlim_cur, flags); +err: + mutex_unlock(&proc->files_lock); + return ret; } /* @@ -899,8 +907,10 @@ static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) static void task_fd_install( struct binder_proc *proc, unsigned int fd, struct file *file) { + mutex_lock(&proc->files_lock); if (proc->files) __fd_install(proc->files, fd, file); + mutex_unlock(&proc->files_lock); } /* @@ -910,9 +920,11 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) { int retval; - if (proc->files == NULL) - return -ESRCH; - + mutex_lock(&proc->files_lock); + if (proc->files == NULL) { + retval = -ESRCH; + goto err; + } retval = __close_fd(proc->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || @@ -920,7 +932,8 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd) retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; - +err: + mutex_unlock(&proc->files_lock); return retval; } @@ -4627,7 +4640,9 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) ret = binder_alloc_mmap_handler(&proc->alloc, vma); if (ret) return ret; + mutex_lock(&proc->files_lock); proc->files = get_files_struct(current); + mutex_unlock(&proc->files_lock); return 0; err_bad_arg: @@ -4651,6 +4666,7 @@ static int binder_open(struct inode *nodp, struct file *filp) spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; + mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); proc->default_priority = task_nice(current); binder_dev = container_of(filp->private_data, struct binder_device, @@ -4903,9 +4919,11 @@ static void binder_deferred_func(struct work_struct *work) files = NULL; if (defer & BINDER_DEFERRED_PUT_FILES) { + mutex_lock(&proc->files_lock); files = proc->files; if (files) proc->files = NULL; + mutex_unlock(&proc->files_lock); } if (defer & BINDER_DEFERRED_FLUSH) diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index eb3af2739537..07532d83be0b 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -186,6 +186,11 @@ static void cache_associativity(struct cacheinfo *this_leaf) this_leaf->ways_of_associativity = (size / nr_sets) / line_size; } +static bool cache_node_is_unified(struct cacheinfo *this_leaf) +{ + return of_property_read_bool(this_leaf->of_node, "cache-unified"); +} + static void cache_of_override_properties(unsigned int cpu) { int index; @@ -194,6 +199,14 @@ static void cache_of_override_properties(unsigned int cpu) for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = this_cpu_ci->info_list + index; + /* + * init_cache_level must setup the cache level correctly + * overriding the architecturally specified levels, so + * if type is NONE at this stage, it should be unified + */ + if (this_leaf->type == CACHE_TYPE_NOCACHE && + cache_node_is_unified(this_leaf)) + this_leaf->type = CACHE_TYPE_UNIFIED; cache_size(this_leaf); cache_get_line_size(this_leaf); cache_nr_sets(this_leaf); diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c index 328ca93781cf..1b76d9585902 100644 --- a/drivers/bus/sunxi-rsb.c +++ b/drivers/bus/sunxi-rsb.c @@ -178,6 +178,7 @@ static struct bus_type sunxi_rsb_bus = { .match = sunxi_rsb_device_match, .probe = sunxi_rsb_device_probe, .remove = sunxi_rsb_device_remove, + .uevent = of_device_uevent_modalias, }; static void sunxi_rsb_dev_release(struct device *dev) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 647d056df88c..b56c11f51baf 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -220,7 +220,8 @@ static bool clk_core_is_enabled(struct clk_core *core) ret = core->ops->is_enabled(core->hw); done: - clk_pm_runtime_put(core); + if (core->dev) + pm_runtime_put(core->dev); return ret; } @@ -1564,6 +1565,9 @@ static void clk_change_rate(struct clk_core *core) best_parent_rate = core->parent->rate; } + if (clk_pm_runtime_get(core)) + return; + if (core->flags & CLK_SET_RATE_UNGATE) { unsigned long flags; @@ -1634,6 +1638,8 @@ static void clk_change_rate(struct clk_core *core) /* handle the new child who might not be in core->children yet */ if (core->new_child) clk_change_rate(core->new_child); + + clk_pm_runtime_put(core); } static int clk_core_set_rate_nolock(struct clk_core *core, diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c index a1a634253d6f..f00d8758ba24 100644 --- a/drivers/clk/sunxi/clk-sun9i-mmc.c +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -16,6 +16,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/delay.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> @@ -83,9 +84,20 @@ static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, return 0; } +static int sun9i_mmc_reset_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + sun9i_mmc_reset_assert(rcdev, id); + udelay(10); + sun9i_mmc_reset_deassert(rcdev, id); + + return 0; +} + static const struct reset_control_ops sun9i_mmc_reset_ops = { .assert = sun9i_mmc_reset_assert, .deassert = sun9i_mmc_reset_deassert, + .reset = sun9i_mmc_reset_reset, }; static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig index 3e104f5aa0c2..b56b3f711d94 100644 --- a/drivers/crypto/chelsio/Kconfig +++ b/drivers/crypto/chelsio/Kconfig @@ -5,6 +5,7 @@ config CRYPTO_DEV_CHELSIO select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_AUTHENC + select CRYPTO_GF128MUL ---help--- The Chelsio Crypto Co-processor driver for T6 adapters. diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c index 89ba9e85c0f3..4bcef78a08aa 100644 --- a/drivers/crypto/inside-secure/safexcel.c +++ b/drivers/crypto/inside-secure/safexcel.c @@ -607,6 +607,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ndesc = ctx->handle_result(priv, ring, sreq->req, &should_complete, &ret); if (ndesc < 0) { + kfree(sreq); dev_err(priv->dev, "failed to handle result (%d)", ndesc); return; } diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 5438552bc6d7..fcc0a606d748 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -14,6 +14,7 @@ #include <crypto/aes.h> #include <crypto/skcipher.h> +#include <crypto/internal/skcipher.h> #include "safexcel.h" @@ -33,6 +34,10 @@ struct safexcel_cipher_ctx { unsigned int key_len; }; +struct safexcel_cipher_req { + bool needs_inv; +}; + static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, struct crypto_async_request *async, struct safexcel_command_desc *cdesc, @@ -126,9 +131,9 @@ static int safexcel_context_control(struct safexcel_cipher_ctx *ctx, return 0; } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct skcipher_request *req = skcipher_request_cast(async); struct safexcel_result_desc *rdesc; @@ -265,7 +270,6 @@ static int safexcel_aes_send(struct crypto_async_request *async, spin_unlock_bh(&priv->ring[ring].egress_lock); request->req = &req->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = n_rdesc; @@ -341,8 +345,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_aes_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -359,6 +361,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return ndesc; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int err; + + if (sreq->needs_inv) { + sreq->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_cipher_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -368,8 +390,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, struct safexcel_crypto_priv *priv = ctx->priv; int ret; - ctx->base.handle_result = safexcel_handle_inv_result; - ret = safexcel_invalidate_cache(async, &ctx->base, priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) @@ -381,28 +401,46 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct skcipher_request *req = skcipher_request_cast(async); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); + int ret; + + if (sreq->needs_inv) + ret = safexcel_cipher_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_aes_send(async, ring, request, + commands, results); + return ret; +} + static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct skcipher_request req; + SKCIPHER_REQUEST_ON_STACK(req, __crypto_skcipher_cast(tfm)); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct skcipher_request)); + memset(req, 0, sizeof(struct skcipher_request)); /* create invalidation request */ init_completion(&result.completion); - skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, - safexcel_inv_complete, &result); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + safexcel_inv_complete, &result); - skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_cipher_send_inv; + sreq->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (!priv->ring[ring].need_dequeue) @@ -424,19 +462,21 @@ static int safexcel_aes(struct skcipher_request *req, enum safexcel_cipher_direction dir, u32 mode) { struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct safexcel_cipher_req *sreq = skcipher_request_ctx(req); struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; + sreq->needs_inv = false; ctx->direction = dir; ctx->mode = mode; if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_cipher_send_inv; + if (ctx->base.needs_inv) { + sreq->needs_inv = true; + ctx->base.needs_inv = false; + } } else { ctx->base.ring = safexcel_select_ring(priv); - ctx->base.send = safexcel_aes_send; - ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, EIP197_GFP_FLAGS(req->base), &ctx->base.ctxr_dma); @@ -476,6 +516,11 @@ static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm) alg.skcipher.base); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_send; + ctx->base.handle_result = safexcel_handle_result; + + crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), + sizeof(struct safexcel_cipher_req)); return 0; } diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 74feb6227101..0c5a5820b06e 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -32,9 +32,10 @@ struct safexcel_ahash_req { bool last_req; bool finish; bool hmac; + bool needs_inv; u8 state_sz; /* expected sate size, only set once */ - u32 state[SHA256_DIGEST_SIZE / sizeof(u32)]; + u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32)); u64 len; u64 processed; @@ -119,15 +120,15 @@ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx, } } -static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, - struct crypto_async_request *async, - bool *should_complete, int *ret) +static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) { struct safexcel_result_desc *rdesc; struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); struct safexcel_ahash_req *sreq = ahash_request_ctx(areq); - int cache_len, result_sz = sreq->state_sz; + int cache_len; *ret = 0; @@ -148,8 +149,8 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, spin_unlock_bh(&priv->ring[ring].egress_lock); if (sreq->finish) - result_sz = crypto_ahash_digestsize(ahash); - memcpy(sreq->state, areq->result, result_sz); + memcpy(areq->result, sreq->state, + crypto_ahash_digestsize(ahash)); dma_unmap_sg(priv->dev, areq->src, sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE); @@ -165,9 +166,9 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, return 1; } -static int safexcel_ahash_send(struct crypto_async_request *async, int ring, - struct safexcel_request *request, int *commands, - int *results) +static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, + struct safexcel_request *request, + int *commands, int *results) { struct ahash_request *areq = ahash_request_cast(async); struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); @@ -273,7 +274,7 @@ send_command: /* Add the token */ safexcel_hash_token(first_cdesc, len, req->state_sz); - ctx->base.result_dma = dma_map_single(priv->dev, areq->result, + ctx->base.result_dma = dma_map_single(priv->dev, req->state, req->state_sz, DMA_FROM_DEVICE); if (dma_mapping_error(priv->dev, ctx->base.result_dma)) { ret = -EINVAL; @@ -292,7 +293,6 @@ send_command: req->processed += len; request->req = &areq->base; - ctx->base.handle_result = safexcel_handle_result; *commands = n_cdesc; *results = 1; @@ -374,8 +374,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ring = safexcel_select_ring(priv); ctx->base.ring = ring; - ctx->base.needs_inv = false; - ctx->base.send = safexcel_ahash_send; spin_lock_bh(&priv->ring[ring].queue_lock); enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async); @@ -392,6 +390,26 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, return 1; } +static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, + struct crypto_async_request *async, + bool *should_complete, int *ret) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int err; + + if (req->needs_inv) { + req->needs_inv = false; + err = safexcel_handle_inv_result(priv, ring, async, + should_complete, ret); + } else { + err = safexcel_handle_req_result(priv, ring, async, + should_complete, ret); + } + + return err; +} + static int safexcel_ahash_send_inv(struct crypto_async_request *async, int ring, struct safexcel_request *request, int *commands, int *results) @@ -400,7 +418,6 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); int ret; - ctx->base.handle_result = safexcel_handle_inv_result; ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv, ctx->base.ctxr_dma, ring, request); if (unlikely(ret)) @@ -412,28 +429,46 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, return 0; } +static int safexcel_ahash_send(struct crypto_async_request *async, + int ring, struct safexcel_request *request, + int *commands, int *results) +{ + struct ahash_request *areq = ahash_request_cast(async); + struct safexcel_ahash_req *req = ahash_request_ctx(areq); + int ret; + + if (req->needs_inv) + ret = safexcel_ahash_send_inv(async, ring, request, + commands, results); + else + ret = safexcel_ahash_send_req(async, ring, request, + commands, results); + return ret; +} + static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm) { struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); struct safexcel_crypto_priv *priv = ctx->priv; - struct ahash_request req; + AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm)); + struct safexcel_ahash_req *rctx = ahash_request_ctx(req); struct safexcel_inv_result result = {}; int ring = ctx->base.ring; - memset(&req, 0, sizeof(struct ahash_request)); + memset(req, 0, sizeof(struct ahash_request)); /* create invalidation request */ init_completion(&result.completion); - ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, safexcel_inv_complete, &result); - ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm)); - ctx = crypto_tfm_ctx(req.base.tfm); + ahash_request_set_tfm(req, __crypto_ahash_cast(tfm)); + ctx = crypto_tfm_ctx(req->base.tfm); ctx->base.exit_inv = true; - ctx->base.send = safexcel_ahash_send_inv; + rctx->needs_inv = true; spin_lock_bh(&priv->ring[ring].queue_lock); - crypto_enqueue_request(&priv->ring[ring].queue, &req.base); + crypto_enqueue_request(&priv->ring[ring].queue, &req->base); spin_unlock_bh(&priv->ring[ring].queue_lock); if (!priv->ring[ring].need_dequeue) @@ -481,14 +516,16 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq) struct safexcel_crypto_priv *priv = ctx->priv; int ret, ring; - ctx->base.send = safexcel_ahash_send; + req->needs_inv = false; if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq); if (ctx->base.ctxr) { - if (ctx->base.needs_inv) - ctx->base.send = safexcel_ahash_send_inv; + if (ctx->base.needs_inv) { + ctx->base.needs_inv = false; + req->needs_inv = true; + } } else { ctx->base.ring = safexcel_select_ring(priv); ctx->base.ctxr = dma_pool_zalloc(priv->context_pool, @@ -622,6 +659,8 @@ static int safexcel_ahash_cra_init(struct crypto_tfm *tfm) struct safexcel_alg_template, alg.ahash); ctx->priv = tmpl->priv; + ctx->base.send = safexcel_ahash_send; + ctx->base.handle_result = safexcel_handle_result; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct safexcel_ahash_req)); diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 48de52cf2ecc..662e709812cc 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1625,6 +1625,7 @@ static int queue_cache_init(void) CWQ_ENTRY_SIZE, 0, NULL); if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; return -ENOMEM; } return 0; @@ -1634,6 +1635,8 @@ static void queue_cache_destroy(void) { kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); + queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; + queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; } static long spu_queue_register_workfn(void *arg) diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c index ec8ac5c4dd84..055e2e8f985a 100644 --- a/drivers/firmware/efi/capsule-loader.c +++ b/drivers/firmware/efi/capsule-loader.c @@ -20,10 +20,6 @@ #define NO_FURTHER_WRITE_ACTION -1 -#ifndef phys_to_page -#define phys_to_page(x) pfn_to_page((x) >> PAGE_SHIFT) -#endif - /** * efi_free_all_buff_pages - free all previous allocated buffer pages * @cap_info: pointer to current instance of capsule_info structure @@ -35,7 +31,7 @@ static void efi_free_all_buff_pages(struct capsule_info *cap_info) { while (cap_info->index > 0) - __free_page(phys_to_page(cap_info->pages[--cap_info->index])); + __free_page(cap_info->pages[--cap_info->index]); cap_info->index = NO_FURTHER_WRITE_ACTION; } @@ -71,6 +67,14 @@ int __efi_capsule_setup_info(struct capsule_info *cap_info) cap_info->pages = temp_page; + temp_page = krealloc(cap_info->phys, + pages_needed * sizeof(phys_addr_t *), + GFP_KERNEL | __GFP_ZERO); + if (!temp_page) + return -ENOMEM; + + cap_info->phys = temp_page; + return 0; } @@ -105,9 +109,24 @@ int __weak efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff, **/ static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info) { + bool do_vunmap = false; int ret; - ret = efi_capsule_update(&cap_info->header, cap_info->pages); + /* + * cap_info->capsule may have been assigned already by a quirk + * handler, so only overwrite it if it is NULL + */ + if (!cap_info->capsule) { + cap_info->capsule = vmap(cap_info->pages, cap_info->index, + VM_MAP, PAGE_KERNEL); + if (!cap_info->capsule) + return -ENOMEM; + do_vunmap = true; + } + + ret = efi_capsule_update(cap_info->capsule, cap_info->phys); + if (do_vunmap) + vunmap(cap_info->capsule); if (ret) { pr_err("capsule update failed\n"); return ret; @@ -165,10 +184,12 @@ static ssize_t efi_capsule_write(struct file *file, const char __user *buff, goto failed; } - cap_info->pages[cap_info->index++] = page_to_phys(page); + cap_info->pages[cap_info->index] = page; + cap_info->phys[cap_info->index] = page_to_phys(page); cap_info->page_bytes_remain = PAGE_SIZE; + cap_info->index++; } else { - page = phys_to_page(cap_info->pages[cap_info->index - 1]); + page = cap_info->pages[cap_info->index - 1]; } kbuff = kmap(page); @@ -252,6 +273,7 @@ static int efi_capsule_release(struct inode *inode, struct file *file) struct capsule_info *cap_info = file->private_data; kfree(cap_info->pages); + kfree(cap_info->phys); kfree(file->private_data); file->private_data = NULL; return 0; @@ -281,6 +303,13 @@ static int efi_capsule_open(struct inode *inode, struct file *file) return -ENOMEM; } + cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL); + if (!cap_info->phys) { + kfree(cap_info->pages); + kfree(cap_info); + return -ENOMEM; + } + file->private_data = cap_info; return 0; diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index dfcf56ee3c61..76861a00bb92 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -522,6 +522,7 @@ static struct of_device_id const bcm_kona_gpio_of_match[] = { * category than their parents, so it won't report false recursion. */ static struct lock_class_key gpio_lock_class; +static struct lock_class_key gpio_request_class; static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) @@ -531,7 +532,7 @@ static int bcm_kona_gpio_irq_map(struct irq_domain *d, unsigned int irq, ret = irq_set_chip_data(irq, d->host_data); if (ret < 0) return ret; - irq_set_lockdep_class(irq, &gpio_lock_class); + irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class); irq_set_chip_and_handler(irq, &bcm_gpio_irq_chip, handle_simple_irq); irq_set_noprobe(irq); diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 545d43a587b7..bb4f8cf18bd9 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -327,6 +327,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank( * category than their parents, so it won't report false recursion. */ static struct lock_class_key brcmstb_gpio_irq_lock_class; +static struct lock_class_key brcmstb_gpio_irq_request_class; static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq, @@ -346,7 +347,8 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq, ret = irq_set_chip_data(irq, &bank->gc); if (ret < 0) return ret; - irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class); + irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class, + &brcmstb_gpio_irq_request_class); irq_set_chip_and_handler(irq, &priv->irq_chip, handle_level_irq); irq_set_noprobe(irq); return 0; diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c index 23e771dba4c1..e85903eddc68 100644 --- a/drivers/gpio/gpio-reg.c +++ b/drivers/gpio/gpio-reg.c @@ -103,8 +103,8 @@ static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset) struct gpio_reg *r = to_gpio_reg(gc); int irq = r->irqs[offset]; - if (irq >= 0 && r->irq.domain) - irq = irq_find_mapping(r->irq.domain, irq); + if (irq >= 0 && r->irqdomain) + irq = irq_find_mapping(r->irqdomain, irq); return irq; } diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 8db47f671708..02fa8fe2292a 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -565,6 +565,7 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = { * than their parents, so it won't report false recursion. */ static struct lock_class_key gpio_lock_class; +static struct lock_class_key gpio_request_class; static int tegra_gpio_probe(struct platform_device *pdev) { @@ -670,7 +671,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) bank = &tgi->bank_info[GPIO_BANK(gpio)]; - irq_set_lockdep_class(irq, &gpio_lock_class); + irq_set_lockdep_class(irq, &gpio_lock_class, + &gpio_request_class); irq_set_chip_data(irq, bank); irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); } diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c index 2313af82fad3..acd59113e08b 100644 --- a/drivers/gpio/gpio-xgene-sb.c +++ b/drivers/gpio/gpio-xgene-sb.c @@ -139,7 +139,7 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio) static int xgene_gpio_sb_domain_activate(struct irq_domain *d, struct irq_data *irq_data, - bool early) + bool reserve) { struct xgene_gpio_sb *priv = d->host_data; u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index eb4528c87c0b..d6f3d9ee1350 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1074,7 +1074,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip) } if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, dev_fwnode(chip->parent)); acpi_gpiochip_request_regions(acpi_gpio); acpi_gpiochip_scan_gpios(acpi_gpio); diff --git a/drivers/gpio/gpiolib-devprop.c b/drivers/gpio/gpiolib-devprop.c index 27f383bda7d9..f748aa3e77f7 100644 --- a/drivers/gpio/gpiolib-devprop.c +++ b/drivers/gpio/gpiolib-devprop.c @@ -19,30 +19,27 @@ /** * devprop_gpiochip_set_names - Set GPIO line names using device properties * @chip: GPIO chip whose lines should be named, if possible + * @fwnode: Property Node containing the gpio-line-names property * * Looks for device property "gpio-line-names" and if it exists assigns * GPIO line names for the chip. The memory allocated for the assigned * names belong to the underlying firmware node and should not be released * by the caller. */ -void devprop_gpiochip_set_names(struct gpio_chip *chip) +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode) { struct gpio_device *gdev = chip->gpiodev; const char **names; int ret, i; - if (!chip->parent) { - dev_warn(&gdev->dev, "GPIO chip parent is NULL\n"); - return; - } - - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", NULL, 0); if (ret < 0) return; if (ret != gdev->ngpio) { - dev_warn(chip->parent, + dev_warn(&gdev->dev, "names %d do not match number of GPIOs %d\n", ret, gdev->ngpio); return; @@ -52,10 +49,10 @@ void devprop_gpiochip_set_names(struct gpio_chip *chip) if (!names) return; - ret = device_property_read_string_array(chip->parent, "gpio-line-names", + ret = fwnode_property_read_string_array(fwnode, "gpio-line-names", names, gdev->ngpio); if (ret < 0) { - dev_warn(chip->parent, "failed to read GPIO line names\n"); + dev_warn(&gdev->dev, "failed to read GPIO line names\n"); kfree(names); return; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index e0d59e61b52f..72a0695d2ac3 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -493,7 +493,8 @@ int of_gpiochip_add(struct gpio_chip *chip) /* If the chip defines names itself, these take precedence */ if (!chip->names) - devprop_gpiochip_set_names(chip); + devprop_gpiochip_set_names(chip, + of_fwnode_handle(chip->of_node)); of_node_get(chip->of_node); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index aad84a6306c4..44332b793718 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -73,7 +73,8 @@ LIST_HEAD(gpio_devices); static void gpiochip_free_hogs(struct gpio_chip *chip); static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, - struct lock_class_key *key); + struct lock_class_key *lock_key, + struct lock_class_key *request_key); static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); @@ -1100,7 +1101,8 @@ static void gpiochip_setup_devs(void) } int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, - struct lock_class_key *key) + struct lock_class_key *lock_key, + struct lock_class_key *request_key) { unsigned long flags; int status = 0; @@ -1246,7 +1248,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (status) goto err_remove_from_list; - status = gpiochip_add_irqchip(chip, key); + status = gpiochip_add_irqchip(chip, lock_key, request_key); if (status) goto err_remove_chip; @@ -1632,7 +1634,7 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, * This lock class tells lockdep that GPIO irqs are in a different * category than their parents, so it won't report false recursion. */ - irq_set_lockdep_class(irq, chip->irq.lock_key); + irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key); irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); /* Chips that use nested thread handlers have them marked */ if (chip->irq.threaded) @@ -1712,10 +1714,12 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) /** * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip * @gpiochip: the GPIO chip to add the IRQ chip to - * @lock_key: lockdep class + * @lock_key: lockdep class for IRQ lock + * @request_key: lockdep class for IRQ request */ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, - struct lock_class_key *lock_key) + struct lock_class_key *lock_key, + struct lock_class_key *request_key) { struct irq_chip *irqchip = gpiochip->irq.chip; const struct irq_domain_ops *ops; @@ -1753,6 +1757,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, gpiochip->to_irq = gpiochip_to_irq; gpiochip->irq.default_type = type; gpiochip->irq.lock_key = lock_key; + gpiochip->irq.request_key = request_key; if (gpiochip->irq.domain_ops) ops = gpiochip->irq.domain_ops; @@ -1850,7 +1855,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE * to have the core avoid setting up any default type in the hardware. * @threaded: whether this irqchip uses a nested thread handler - * @lock_key: lockdep class + * @lock_key: lockdep class for IRQ lock + * @request_key: lockdep class for IRQ request * * This function closely associates a certain irqchip with a certain * gpiochip, providing an irq domain to translate the local IRQs to @@ -1872,7 +1878,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, irq_flow_handler_t handler, unsigned int type, bool threaded, - struct lock_class_key *lock_key) + struct lock_class_key *lock_key, + struct lock_class_key *request_key) { struct device_node *of_node; @@ -1913,6 +1920,7 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, gpiochip->irq.default_type = type; gpiochip->to_irq = gpiochip_to_irq; gpiochip->irq.lock_key = lock_key; + gpiochip->irq.request_key = request_key; gpiochip->irq.domain = irq_domain_add_simple(of_node, gpiochip->ngpio, first_irq, &gpiochip_domain_ops, gpiochip); @@ -1940,7 +1948,8 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key); #else /* CONFIG_GPIOLIB_IRQCHIP */ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, - struct lock_class_key *key) + struct lock_class_key *lock_key, + struct lock_class_key *request_key) { return 0; } diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index af48322839c3..6c44d1652139 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -228,7 +228,8 @@ static inline int gpio_chip_hwgpio(const struct gpio_desc *desc) return desc - &desc->gdev->descs[0]; } -void devprop_gpiochip_set_names(struct gpio_chip *chip); +void devprop_gpiochip_set_names(struct gpio_chip *chip, + const struct fwnode_handle *fwnode); /* With descriptor prefix */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index da43813d67a4..5aeb5f8816f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) PACKET3_MAP_QUEUES_PIPE(ring->pipe) | PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ - PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */ + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f71fe6d2ddda..bb5fa895fb64 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct dm_connector_state *dm_state) { struct drm_display_mode *preferred_mode = NULL; - const struct drm_connector *drm_connector; + struct drm_connector *drm_connector; struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; bool native_mode_found = false; @@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (!aconnector->dc_sink) { /* - * Exclude MST from creating fake_sink - * TODO: need to enable MST into fake_sink feature + * Create dc_sink when necessary to MST + * Don't apply fake_sink to MST */ - if (aconnector->mst_port) - goto stream_create_fail; + if (aconnector->mst_port) { + dm_dp_mst_dc_sink_create(drm_connector); + goto mst_dc_sink_create_done; + } if (create_fake_sink(aconnector)) goto stream_create_fail; @@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream_create_fail: dm_state_null: drm_connector_null: +mst_dc_sink_create_done: return stream; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 117521c6a6ed..0230250a1164 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -189,6 +189,8 @@ struct amdgpu_dm_connector { struct mutex hpd_lock; bool fake_enable; + + bool mst_connected; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index f8efb98b1fa7..638c2c2b5cd7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector, return ret; } +void dm_dp_mst_dc_sink_create(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct edid *edid; + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); + + if (!edid) { + drm_mode_connector_update_edid_property( + &aconnector->base, + NULL); + return; + } + + aconnector->edid = edid; + + dc_sink = dc_link_add_remote_sink( + aconnector->dc_link, + (uint8_t *)aconnector->edid, + (aconnector->edid->extensions + 1) * EDID_LENGTH, + &init_params); + + dc_sink->priv = aconnector; + aconnector->dc_sink = dc_sink; + + amdgpu_dm_add_sink_to_freesync_module( + connector, aconnector->edid); + + drm_mode_connector_update_edid_property( + &aconnector->base, aconnector->edid); +} + static int dm_dp_mst_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); @@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_mode_connector_set_path_property(connector, pathprop); drm_connector_list_iter_end(&conn_iter); + aconnector->mst_connected = true; return &aconnector->base; } } @@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, */ amdgpu_dm_connector_funcs_reset(connector); + aconnector->mst_connected = true; + DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", aconnector, connector->base.id, aconnector->mst_port); @@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, drm_mode_connector_update_edid_property( &aconnector->base, NULL); + + aconnector->mst_connected = false; } static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) @@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) drm_kms_helper_hotplug_event(dev); } +static void dm_dp_mst_link_status_reset(struct drm_connector *connector) +{ + mutex_lock(&connector->dev->mode_config.mutex); + drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); +} + static void dm_dp_mst_register_connector(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); if (adev->mode_info.rfbdev) drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); @@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) drm_connector_register(connector); + if (aconnector->mst_connected) + dm_dp_mst_link_status_reset(connector); } static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 2da851b40042..8cf51da26657 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -31,5 +31,6 @@ struct amdgpu_dm_connector; void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector); +void dm_dp_mst_dc_sink_create(struct drm_connector *connector); #endif diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 3dce35e66b09..b142629a1058 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -900,6 +900,15 @@ bool dcn_validate_bandwidth( v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; + /* + * Spreadsheet doesn't handle taps_c is one properly, + * need to force Chroma to always be scaled to pass + * bandwidth validation. + */ + if (v->override_hta_pschroma[input_idx] == 1) + v->override_hta_pschroma[input_idx] = 2; + if (v->override_vta_pschroma[input_idx] == 1) + v->override_vta_pschroma[input_idx] = 2; v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; } if (v->is_line_buffer_bpp_fixed == dcn_bw_yes) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index e27ed4a45265..42a111b9505d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal) link->link_enc->funcs->disable_output(link->link_enc, signal, link); } -bool dp_active_dongle_validate_timing( +static bool dp_active_dongle_validate_timing( const struct dc_crtc_timing *timing, const struct dc_dongle_caps *dongle_caps) { @@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing( /* Check Color Depth and Pixel Clock */ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) required_pix_clk /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + required_pix_clk = required_pix_clk * 2 / 3; switch (timing->display_color_depth) { case COLOR_DEPTH_666: diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 07ff8d2faf3f..d844fadcd56f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface( int num_planes, struct dc_state *context) { - int i, be_idx; + int i; if (num_planes == 0) return; - be_idx = -1; for (i = 0; i < dc->res_pool->pipe_count; i++) { - if (stream == context->res_ctx.pipe_ctx[i].stream) { - be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst; - break; + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (stream == pipe_ctx->stream) { + if (!pipe_ctx->top_pipe && + (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); } } @@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface( context->stream_count); dce110_program_front_end_for_pipe(dc, pipe_ctx); + + dc->hwss.update_plane_addr(dc, pipe_ctx); + program_surface_visibility(dc, pipe_ctx); } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + + if ((stream == pipe_ctx->stream) && + (!pipe_ctx->top_pipe) && + (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); + } } static void dce110_power_down_fe(struct dc *dc, int fe_idx) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 74e7c82bdc76..a9d55d0dd69e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps( scl_data->taps.h_taps = 1; if (IDENTITY_RATIO(scl_data->ratios.vert)) scl_data->taps.v_taps = 1; - /* - * Spreadsheet doesn't handle taps_c is one properly, - * need to force Chroma to always be scaled to pass - * bandwidth validation. - */ + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index a9782b1aba47..34daf895f848 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1360,7 +1360,7 @@ void dpp1_cm_set_output_csc_adjustment( void dpp1_cm_set_output_csc_default( struct dpp *dpp_base, - const struct default_adjustment *default_adjust); + enum dc_color_space colorspace); void dpp1_cm_set_gamut_remap( struct dpp *dpp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 40627c244bf5..ed1216b53465 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -225,14 +225,13 @@ void dpp1_cm_set_gamut_remap( void dpp1_cm_set_output_csc_default( struct dpp *dpp_base, - const struct default_adjustment *default_adjust) + enum dc_color_space colorspace) { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); uint32_t ocsc_mode = 0; - if (default_adjust != NULL) { - switch (default_adjust->out_color_space) { + switch (colorspace) { case COLOR_SPACE_SRGB: case COLOR_SPACE_2020_RGB_FULLRANGE: ocsc_mode = 0; @@ -253,7 +252,6 @@ void dpp1_cm_set_output_csc_default( case COLOR_SPACE_UNKNOWN: default: break; - } } REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 961ad5c3b454..05dc01e54531 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2097,6 +2097,8 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx, tbl_entry.color_space = color_space; //tbl_entry.regval = matrix; pipe_ctx->plane_res.dpp->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.dpp, &tbl_entry); + } else { + pipe_ctx->plane_res.dpp->funcs->opp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace); } } static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 83a68460edcd..9420dfb94d39 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -64,7 +64,7 @@ struct dpp_funcs { void (*opp_set_csc_default)( struct dpp *dpp, - const struct default_adjustment *default_adjust); + enum dc_color_space colorspace); void (*opp_set_csc_adjustment)( struct dpp *dpp, diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 2e065facdce7..a0f4d2a2a481 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -168,16 +168,23 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc) void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb, int x, int y) { + const struct drm_format_info *format = fb->format; + unsigned int num_planes = format->num_planes; u32 addr = drm_fb_obj(fb)->dev_addr; - int num_planes = fb->format->num_planes; int i; if (num_planes > 3) num_planes = 3; - for (i = 0; i < num_planes; i++) + addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] + + x * format->cpp[0]; + + y /= format->vsub; + x /= format->hsub; + + for (i = 1; i < num_planes; i++) addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] + - x * fb->format->cpp[i]; + x * format->cpp[i]; for (; i < 3; i++) addrs[i] = 0; } @@ -744,15 +751,14 @@ void armada_drm_crtc_plane_disable(struct armada_crtc *dcrtc, if (plane->fb) drm_framebuffer_put(plane->fb); - /* Power down the Y/U/V FIFOs */ - sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66; - /* Power down most RAMs and FIFOs if this is the primary plane */ if (plane->type == DRM_PLANE_TYPE_PRIMARY) { - sram_para1 |= CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | - CFG_PDWN32x32 | CFG_PDWN64x66; + sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 | + CFG_PDWN32x32 | CFG_PDWN64x66; dma_ctrl0_mask = CFG_GRA_ENA; } else { + /* Power down the Y/U/V FIFOs */ + sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66; dma_ctrl0_mask = CFG_DMA_ENA; } @@ -1225,17 +1231,13 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", dcrtc); - if (ret < 0) { - kfree(dcrtc); - return ret; - } + if (ret < 0) + goto err_crtc; if (dcrtc->variant->init) { ret = dcrtc->variant->init(dcrtc, dev); - if (ret) { - kfree(dcrtc); - return ret; - } + if (ret) + goto err_crtc; } /* Ensure AXI pipeline is enabled */ @@ -1246,13 +1248,15 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, dcrtc->crtc.port = port; primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (!primary) - return -ENOMEM; + if (!primary) { + ret = -ENOMEM; + goto err_crtc; + } ret = armada_drm_plane_init(primary); if (ret) { kfree(primary); - return ret; + goto err_crtc; } ret = drm_universal_plane_init(drm, &primary->base, 0, @@ -1263,7 +1267,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) { kfree(primary); - return ret; + goto err_crtc; } ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL, @@ -1282,6 +1286,9 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev, err_crtc_init: primary->base.funcs->destroy(&primary->base); +err_crtc: + kfree(dcrtc); + return ret; } diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h index bab11f483575..bfd3514fbe9b 100644 --- a/drivers/gpu/drm/armada/armada_crtc.h +++ b/drivers/gpu/drm/armada/armada_crtc.h @@ -42,6 +42,8 @@ struct armada_plane_work { }; struct armada_plane_state { + u16 src_x; + u16 src_y; u32 src_hw; u32 dst_hw; u32 dst_yx; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index b411b608821a..aba947696178 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -99,6 +99,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, { struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); + const struct drm_format_info *format; struct drm_rect src = { .x1 = src_x, .y1 = src_y, @@ -117,7 +118,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, }; uint32_t val, ctrl0; unsigned idx = 0; - bool visible; + bool visible, fb_changed; int ret; trace_armada_ovl_plane_update(plane, crtc, fb, @@ -138,6 +139,18 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, if (!visible) ctrl0 &= ~CFG_DMA_ENA; + /* + * Shifting a YUV packed format image by one pixel causes the U/V + * planes to swap. Compensate for it by also toggling the UV swap. + */ + format = fb->format; + if (format->num_planes == 1 && src.x1 >> 16 & (format->hsub - 1)) + ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV); + + fb_changed = plane->fb != fb || + dplane->base.state.src_x != src.x1 >> 16 || + dplane->base.state.src_y != src.y1 >> 16; + if (!dcrtc->plane) { dcrtc->plane = plane; armada_ovl_update_attr(&dplane->prop, dcrtc); @@ -145,7 +158,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, /* FIXME: overlay on an interlaced display */ /* Just updating the position/size? */ - if (plane->fb == fb && dplane->base.state.ctrl0 == ctrl0) { + if (!fb_changed && dplane->base.state.ctrl0 == ctrl0) { val = (drm_rect_height(&src) & 0xffff0000) | drm_rect_width(&src) >> 16; dplane->base.state.src_hw = val; @@ -169,9 +182,8 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0) armada_drm_plane_work_cancel(dcrtc, &dplane->base); - if (plane->fb != fb) { - u32 addrs[3], pixel_format; - int num_planes, hsub; + if (fb_changed) { + u32 addrs[3]; /* * Take a reference on the new framebuffer - we want to @@ -182,23 +194,11 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, if (plane->fb) armada_ovl_retire_fb(dplane, plane->fb); - src_y = src.y1 >> 16; - src_x = src.x1 >> 16; + dplane->base.state.src_y = src_y = src.y1 >> 16; + dplane->base.state.src_x = src_x = src.x1 >> 16; armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y); - pixel_format = fb->format->format; - hsub = drm_format_horz_chroma_subsampling(pixel_format); - num_planes = fb->format->num_planes; - - /* - * Annoyingly, shifting a YUYV-format image by one pixel - * causes the U/V planes to toggle. Toggle the UV swap. - * (Unfortunately, this causes momentary colour flickering.) - */ - if (src_x & (hsub - 1) && num_planes == 1) - ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV); - armada_reg_queue_set(dplane->vbl.regs, idx, addrs[0], LCD_SPU_DMA_START_ADDR_Y0); armada_reg_queue_set(dplane->vbl.regs, idx, addrs[1], diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 59849f02e2ad..1402c0e71b03 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c @@ -220,17 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr mutex_lock(&dev->mode_config.idr_mutex); - /* Insert the new lessee into the tree */ - id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL); - if (id < 0) { - error = id; - goto out_lessee; - } - - lessee->lessee_id = id; - lessee->lessor = drm_master_get(lessor); - list_add_tail(&lessee->lessee_list, &lessor->lessees); - idr_for_each_entry(leases, entry, object) { error = 0; if (!idr_find(&dev->mode_config.crtc_idr, object)) @@ -246,6 +235,17 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr } } + /* Insert the new lessee into the tree */ + id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL); + if (id < 0) { + error = id; + goto out_lessee; + } + + lessee->lessee_id = id; + lessee->lessor = drm_master_get(lessor); + list_add_tail(&lessee->lessee_list, &lessor->lessees); + /* Move the leases over */ lessee->leases = *leases; DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor); diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 37a93cdffb4a..2c90519576a3 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -558,11 +558,10 @@ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format) } /* - * setplane_internal - setplane handler for internal callers + * __setplane_internal - setplane handler for internal callers * - * Note that we assume an extra reference has already been taken on fb. If the - * update fails, this reference will be dropped before return; if it succeeds, - * the previous framebuffer (if any) will be unreferenced instead. + * This function will take a reference on the new fb for the plane + * on success. * * src_{x,y,w,h} are provided in 16.16 fixed point format */ @@ -630,14 +629,12 @@ static int __setplane_internal(struct drm_plane *plane, if (!ret) { plane->crtc = crtc; plane->fb = fb; - fb = NULL; + drm_framebuffer_get(plane->fb); } else { plane->old_fb = NULL; } out: - if (fb) - drm_framebuffer_put(fb); if (plane->old_fb) drm_framebuffer_put(plane->old_fb); plane->old_fb = NULL; @@ -685,6 +682,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_plane *plane; struct drm_crtc *crtc = NULL; struct drm_framebuffer *fb = NULL; + int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; @@ -717,15 +715,16 @@ int drm_mode_setplane(struct drm_device *dev, void *data, } } - /* - * setplane_internal will take care of deref'ing either the old or new - * framebuffer depending on success. - */ - return setplane_internal(plane, crtc, fb, - plane_req->crtc_x, plane_req->crtc_y, - plane_req->crtc_w, plane_req->crtc_h, - plane_req->src_x, plane_req->src_y, - plane_req->src_w, plane_req->src_h); + ret = setplane_internal(plane, crtc, fb, + plane_req->crtc_x, plane_req->crtc_y, + plane_req->crtc_w, plane_req->crtc_h, + plane_req->src_x, plane_req->src_y, + plane_req->src_w, plane_req->src_h); + + if (fb) + drm_framebuffer_put(fb); + + return ret; } static int drm_mode_cursor_universal(struct drm_crtc *crtc, @@ -788,13 +787,12 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, src_h = fb->height << 16; } - /* - * setplane_internal will take care of deref'ing either the old or new - * framebuffer depending on success. - */ ret = __setplane_internal(crtc->cursor, crtc, fb, - crtc_x, crtc_y, crtc_w, crtc_h, - 0, 0, src_w, src_h, ctx); + crtc_x, crtc_y, crtc_w, crtc_h, + 0, 0, src_w, src_h, ctx); + + if (fb) + drm_framebuffer_put(fb); /* Update successful; save new cursor position, if necessary */ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index f776fc1cc543..cb4d09c70fd4 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -369,40 +369,26 @@ static const struct file_operations drm_syncobj_file_fops = { .release = drm_syncobj_file_release, }; -static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) -{ - struct file *file = anon_inode_getfile("syncobj_file", - &drm_syncobj_file_fops, - syncobj, 0); - if (IS_ERR(file)) - return PTR_ERR(file); - - drm_syncobj_get(syncobj); - if (cmpxchg(&syncobj->file, NULL, file)) { - /* lost the race */ - fput(file); - } - - return 0; -} - int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) { - int ret; + struct file *file; int fd; fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) return fd; - if (!syncobj->file) { - ret = drm_syncobj_alloc_file(syncobj); - if (ret) { - put_unused_fd(fd); - return ret; - } + file = anon_inode_getfile("syncobj_file", + &drm_syncobj_file_fops, + syncobj, 0); + if (IS_ERR(file)) { + put_unused_fd(fd); + return PTR_ERR(file); } - fd_install(fd, syncobj->file); + + drm_syncobj_get(syncobj); + fd_install(fd, file); + *p_fd = fd; return 0; } @@ -422,31 +408,24 @@ static int drm_syncobj_handle_to_fd(struct drm_file *file_private, return ret; } -static struct drm_syncobj *drm_syncobj_fdget(int fd) -{ - struct file *file = fget(fd); - - if (!file) - return NULL; - if (file->f_op != &drm_syncobj_file_fops) - goto err; - - return file->private_data; -err: - fput(file); - return NULL; -}; - static int drm_syncobj_fd_to_handle(struct drm_file *file_private, int fd, u32 *handle) { - struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); + struct drm_syncobj *syncobj; + struct file *file; int ret; - if (!syncobj) + file = fget(fd); + if (!file) return -EINVAL; + if (file->f_op != &drm_syncobj_file_fops) { + fput(file); + return -EINVAL; + } + /* take a reference to put in the idr */ + syncobj = file->private_data; drm_syncobj_get(syncobj); idr_preload(GFP_KERNEL); @@ -455,12 +434,14 @@ static int drm_syncobj_fd_to_handle(struct drm_file *file_private, spin_unlock(&file_private->syncobj_table_lock); idr_preload_end(); - if (ret < 0) { - fput(syncobj->file); - return ret; - } - *handle = ret; - return 0; + if (ret > 0) { + *handle = ret; + ret = 0; + } else + drm_syncobj_put(syncobj); + + fput(file); + return ret; } static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 355120865efd..309f3fa6794a 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -266,6 +266,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) /* Clear host CRT status, so guest couldn't detect this host CRT. */ if (IS_BROADWELL(dev_priv)) vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; + + vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) @@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, int type, unsigned int resolution) { - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); if (WARN_ON(resolution >= GVT_EDID_NUM)) @@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, port->type = type; emulate_monitor_status_change(vgpu); - vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; + return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 54b5d4c582b6..e143004e66d5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2368,6 +2368,9 @@ struct drm_i915_private { */ struct workqueue_struct *wq; + /* ordered wq for modesets */ + struct workqueue_struct *modeset_wq; + /* Display functions */ struct drm_i915_display_funcs display; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ad4050f7ab3b..18de6569d04a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -330,17 +330,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj) * must wait for all rendering to complete to the object (as unbinding * must anyway), and retire the requests. */ - ret = i915_gem_object_wait(obj, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED | - I915_WAIT_ALL, - MAX_SCHEDULE_TIMEOUT, - NULL); + ret = i915_gem_object_set_to_cpu_domain(obj, false); if (ret) return ret; - i915_gem_retire_requests(to_i915(obj->base.dev)); - while ((vma = list_first_entry_or_null(&obj->vma_list, struct i915_vma, obj_link))) { diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3866c49bc390..333f40bc03bb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6977,6 +6977,7 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) +#define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30) #define MASK_WAKEMEM (1<<13) #define SKL_DFSM _MMIO(0x51000) @@ -8522,6 +8523,7 @@ enum skl_power_gate { #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) +#define CDCLK_DIVMUX_CD_OVERRIDE (1<<19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index e8ca67a129d2..ac236b88c99c 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -367,6 +367,7 @@ struct i915_sw_dma_fence_cb { struct dma_fence *dma; struct timer_list timer; struct irq_work work; + struct rcu_head rcu; }; static void timer_i915_sw_fence_wake(struct timer_list *t) @@ -406,7 +407,7 @@ static void irq_i915_sw_fence_work(struct irq_work *wrk) del_timer_sync(&cb->timer); dma_fence_put(cb->dma); - kfree(cb); + kfree_rcu(cb, rcu); } int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence, diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 5f8b9f1f40f1..bcbc7abe6693 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -186,7 +186,7 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) struct intel_wait *wait, *n, *first; if (!b->irq_armed) - return; + goto wakeup_signaler; /* We only disarm the irq when we are idle (all requests completed), * so if the bottom-half remains asleep, it missed the request @@ -208,6 +208,14 @@ void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine) b->waiters = RB_ROOT; spin_unlock_irq(&b->rb_lock); + + /* + * The signaling thread may be asleep holding a reference to a request, + * that had its signaling cancelled prior to being preempted. We need + * to kick the signaler, just in case, to release any such reference. + */ +wakeup_signaler: + wake_up_process(b->signaler); } static bool use_fake_irq(const struct intel_breadcrumbs *b) @@ -651,23 +659,15 @@ static int intel_breadcrumbs_signaler(void *arg) } if (unlikely(do_schedule)) { - DEFINE_WAIT(exec); - if (kthread_should_park()) kthread_parkme(); - if (kthread_should_stop()) { - GEM_BUG_ON(request); + if (unlikely(kthread_should_stop())) { + i915_gem_request_put(request); break; } - if (request) - add_wait_queue(&request->execute, &exec); - schedule(); - - if (request) - remove_wait_queue(&request->execute, &exec); } i915_gem_request_put(request); } while (1); diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index b2a6d62b71c0..60cf4e58389a 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -860,16 +860,10 @@ static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) { - int min_cdclk = skl_calc_cdclk(0, vco); u32 val; WARN_ON(vco != 8100000 && vco != 8640000); - /* select the minimum CDCLK before enabling DPLL 0 */ - val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); - I915_WRITE(CDCLK_CTL, val); - POSTING_READ(CDCLK_CTL); - /* * We always enable DPLL0 with the lowest link rate possible, but still * taking into account the VCO required to operate the eDP panel at the @@ -923,7 +917,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, { int cdclk = cdclk_state->cdclk; int vco = cdclk_state->vco; - u32 freq_select, pcu_ack; + u32 freq_select, pcu_ack, cdclk_ctl; int ret; WARN_ON((cdclk == 24000) != (vco == 0)); @@ -940,7 +934,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, return; } - /* set CDCLK_CTL */ + /* Choose frequency for this cdclk */ switch (cdclk) { case 450000: case 432000: @@ -968,10 +962,33 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, dev_priv->cdclk.hw.vco != vco) skl_dpll0_disable(dev_priv); + cdclk_ctl = I915_READ(CDCLK_CTL); + + if (dev_priv->cdclk.hw.vco != vco) { + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + } + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); + POSTING_READ(CDCLK_CTL); + if (dev_priv->cdclk.hw.vco != vco) skl_dpll0_enable(dev_priv, vco); - I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk); + I915_WRITE(CDCLK_CTL, cdclk_ctl); + + /* Wa Display #1183: skl,kbl,cfl */ + cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE; + I915_WRITE(CDCLK_CTL, cdclk_ctl); POSTING_READ(CDCLK_CTL); /* inform PCU of the change */ diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e0843bb99169..58a3755544b2 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -2128,6 +2128,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, if (WARN_ON(!pll)) return; + mutex_lock(&dev_priv->dpll_lock); + if (IS_CANNONLAKE(dev_priv)) { /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ val = I915_READ(DPCLKA_CFGCR0); @@ -2157,6 +2159,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, } else if (INTEL_INFO(dev_priv)->gen < 9) { I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); } + + mutex_unlock(&dev_priv->dpll_lock); } static void intel_ddi_clk_disable(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e8ccf89cb17b..123585eeb87d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9944,11 +9944,10 @@ found: } ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); + drm_framebuffer_put(fb); if (ret) goto fail; - drm_framebuffer_put(fb); - ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); if (ret) goto fail; @@ -12545,11 +12544,15 @@ static int intel_atomic_commit(struct drm_device *dev, INIT_WORK(&state->commit_work, intel_atomic_commit_work); i915_sw_fence_commit(&intel_state->commit_ready); - if (nonblock) + if (nonblock && intel_state->modeset) { + queue_work(dev_priv->modeset_wq, &state->commit_work); + } else if (nonblock) { queue_work(system_unbound_wq, &state->commit_work); - else + } else { + if (intel_state->modeset) + flush_workqueue(dev_priv->modeset_wq); intel_atomic_commit_tail(state); - + } return 0; } @@ -13195,7 +13198,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 10) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); modifiers = skl_format_modifiers_ccs; @@ -14463,6 +14466,8 @@ int intel_modeset_init(struct drm_device *dev) enum pipe pipe; struct intel_crtc *crtc; + dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); + drm_mode_config_init(dev); dev->mode_config.min_width = 0; @@ -15271,6 +15276,8 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_cleanup_gt_powersave(dev_priv); intel_teardown_gmbus(dev_priv); + + destroy_workqueue(dev_priv->modeset_wq); } void intel_connector_attach_encoder(struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 3bf65288ffff..5809b29044fc 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c @@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv) }; if (!pci_dev_present(atom_hdaudio_ids)) { - DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n"); + DRM_INFO("HDaudio controller not detected, using LPE audio instead\n"); lpe_present = true; } } diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 6e3b430fccdc..55ea5eb3b7df 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = to_i915(dev); if (dev_priv->psr.active) { - i915_reg_t psr_ctl; + i915_reg_t psr_status; u32 psr_status_mask; if (dev_priv->psr.aux_frame_sync) @@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, 0); if (dev_priv->psr.psr2_support) { - psr_ctl = EDP_PSR2_CTL; + psr_status = EDP_PSR2_STATUS_CTL; psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; - I915_WRITE(psr_ctl, - I915_READ(psr_ctl) & + I915_WRITE(EDP_PSR2_CTL, + I915_READ(EDP_PSR2_CTL) & ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE)); } else { - psr_ctl = EDP_PSR_STATUS_CTL; + psr_status = EDP_PSR_STATUS_CTL; psr_status_mask = EDP_PSR_STATUS_STATE_MASK; - I915_WRITE(psr_ctl, - I915_READ(psr_ctl) & ~EDP_PSR_ENABLE); + I915_WRITE(EDP_PSR_CTL, + I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); } /* Wait till PSR is idle */ if (intel_wait_for_register(dev_priv, - psr_ctl, psr_status_mask, 0, + psr_status, psr_status_mask, 0, 2000)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 8af286c63d3b..7e115f3927f6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -598,6 +598,11 @@ void gen9_enable_dc5(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling DC5\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); } @@ -625,6 +630,11 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv) { DRM_DEBUG_KMS("Disabling DC6\n"); + /* Wa Display #1183: skl,kbl,cfl */ + if (IS_GEN9_BC(dev_priv)) + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | + SKL_SELECT_ALTERNATE_DC_EXIT); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } @@ -1786,6 +1796,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_GMBUS) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2615912430cc..435ff8662cfa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -224,7 +224,7 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, /* Determine if we can get a cache-coherent map, forcing * uncached mapping if we can't. */ - if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) + if (!nouveau_drm_use_coherent_gpu_mapping(drm)) nvbo->force_coherent = true; } @@ -262,7 +262,8 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) continue; - if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host) + if ((flags & TTM_PL_FLAG_TT) && + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) continue; /* Select this page size if it's the first that supports diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 8d4a5be3b913..56fe261b6268 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -152,9 +152,9 @@ nouveau_cli_work_queue(struct nouveau_cli *cli, struct dma_fence *fence, work->cli = cli; mutex_lock(&cli->lock); list_add_tail(&work->head, &cli->worker); - mutex_unlock(&cli->lock); if (dma_fence_add_callback(fence, &work->cb, nouveau_cli_work_fence)) nouveau_cli_work_fence(fence, &work->cb); + mutex_unlock(&cli->lock); } static void diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3331e82ae9e7..96f6bd8aee5d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -157,8 +157,8 @@ struct nouveau_drm { struct nvif_object copy; int mtrr; int type_vram; - int type_host; - int type_ncoh; + int type_host[2]; + int type_ncoh[2]; } ttm; /* GEM interface support */ @@ -217,6 +217,13 @@ nouveau_drm(struct drm_device *dev) return dev->dev_private; } +static inline bool +nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm) +{ + struct nvif_mmu *mmu = &drm->client.mmu; + return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED); +} + int nouveau_pmops_suspend(struct device *); int nouveau_pmops_resume(struct device *); bool nouveau_pmops_runtime(void); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index c533d8e04afc..be7357bf2246 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -429,7 +429,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon) drm_fb_helper_unregister_fbi(&fbcon->helper); drm_fb_helper_fini(&fbcon->helper); - if (nouveau_fb->nvbo) { + if (nouveau_fb && nouveau_fb->nvbo) { nouveau_vma_del(&nouveau_fb->vma); nouveau_bo_unmap(nouveau_fb->nvbo); nouveau_bo_unpin(nouveau_fb->nvbo); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 589a9621db76..c002f8968507 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -103,10 +103,10 @@ nouveau_mem_host(struct ttm_mem_reg *reg, struct ttm_dma_tt *tt) u8 type; int ret; - if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) - type = drm->ttm.type_ncoh; + if (!nouveau_drm_use_coherent_gpu_mapping(drm)) + type = drm->ttm.type_ncoh[!!mem->kind]; else - type = drm->ttm.type_host; + type = drm->ttm.type_host[0]; if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) mem->comp = mem->kind = 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 08b974b30482..dff51a0ee028 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -235,27 +235,46 @@ nouveau_ttm_global_release(struct nouveau_drm *drm) drm->ttm.mem_global_ref.release = NULL; } -int -nouveau_ttm_init(struct nouveau_drm *drm) +static int +nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) { - struct nvkm_device *device = nvxx_device(&drm->client.device); - struct nvkm_pci *pci = device->pci; struct nvif_mmu *mmu = &drm->client.mmu; - struct drm_device *dev = drm->dev; - int typei, ret; + int typei; typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | - NVIF_MEM_COHERENT); + kind | NVIF_MEM_COHERENT); if (typei < 0) return -ENOSYS; - drm->ttm.type_host = typei; + drm->ttm.type_host[!!kind] = typei; - typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE); + typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind); if (typei < 0) return -ENOSYS; - drm->ttm.type_ncoh = typei; + drm->ttm.type_ncoh[!!kind] = typei; + return 0; +} + +int +nouveau_ttm_init(struct nouveau_drm *drm) +{ + struct nvkm_device *device = nvxx_device(&drm->client.device); + struct nvkm_pci *pci = device->pci; + struct nvif_mmu *mmu = &drm->client.mmu; + struct drm_device *dev = drm->dev; + int typei, ret; + + ret = nouveau_ttm_init_host(drm, 0); + if (ret) + return ret; + + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && + drm->client.device.info.chipset != 0x50) { + ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND); + if (ret) + return ret; + } if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC && drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.c b/drivers/gpu/drm/nouveau/nouveau_vmm.c index 9e2628dd8e4d..f5371d96b003 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_vmm.c @@ -67,8 +67,8 @@ nouveau_vma_del(struct nouveau_vma **pvma) nvif_vmm_put(&vma->vmm->vmm, &tmp); } list_del(&vma->head); - *pvma = NULL; kfree(*pvma); + *pvma = NULL; } } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index e14643615698..00eeaaffeae5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2369,7 +2369,7 @@ nv13b_chipset = { .imem = gk20a_instmem_new, .ltc = gp100_ltc_new, .mc = gp10b_mc_new, - .mmu = gf100_mmu_new, + .mmu = gp10b_mmu_new, .secboot = gp10b_secboot_new, .pmu = gm20b_pmu_new, .timer = gk20a_timer_new, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c index 972370ed36f0..7c7efa4ea0d0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c @@ -36,6 +36,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) if (data) { *ver = nvbios_rd08(bios, data + 0x00); switch (*ver) { + case 0x20: case 0x21: case 0x30: case 0x40: @@ -63,6 +64,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx, if (data && idx < *cnt) { u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len); switch (*ver * !!outp) { + case 0x20: case 0x21: case 0x30: *hdr = nvbios_rd08(bios, data + 0x04); @@ -96,12 +98,16 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx, info->type = nvbios_rd16(bios, data + 0x00); info->mask = nvbios_rd16(bios, data + 0x02); switch (*ver) { + case 0x20: + info->mask |= 0x00c0; /* match any link */ + /* fall-through */ case 0x21: case 0x30: info->flags = nvbios_rd08(bios, data + 0x05); info->script[0] = nvbios_rd16(bios, data + 0x06); info->script[1] = nvbios_rd16(bios, data + 0x08); - info->lnkcmp = nvbios_rd16(bios, data + 0x0a); + if (*len >= 0x0c) + info->lnkcmp = nvbios_rd16(bios, data + 0x0a); if (*len >= 0x0f) { info->script[2] = nvbios_rd16(bios, data + 0x0c); info->script[3] = nvbios_rd16(bios, data + 0x0e); @@ -170,6 +176,7 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, memset(info, 0x00, sizeof(*info)); if (data) { switch (*ver) { + case 0x20: case 0x21: info->dc = nvbios_rd08(bios, data + 0x02); info->pe = nvbios_rd08(bios, data + 0x03); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c index 1ba7289684aa..db48a1daca0c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c @@ -249,7 +249,7 @@ nv50_instobj_acquire(struct nvkm_memory *memory) iobj->base.memory.ptrs = &nv50_instobj_fast; else iobj->base.memory.ptrs = &nv50_instobj_slow; - refcount_inc(&iobj->maps); + refcount_set(&iobj->maps, 1); } mutex_unlock(&imem->subdev.mutex); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index b1b1f3626b96..deb96de54b00 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -136,6 +136,13 @@ nvkm_pci_init(struct nvkm_subdev *subdev) return ret; pci->irq = pdev->irq; + + /* Ensure MSI interrupts are armed, for the case where there are + * already interrupts pending (for whatever reason) at load time. + */ + if (pci->msi) + pci->func->msi_rearm(pci); + return ret; } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c index e626eddf24d5..23db74ae1826 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c @@ -78,6 +78,8 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core) /* then read the message */ msg.len = cnt & 0xf; + if (msg.len > CEC_MAX_MSG_SIZE - 2) + msg.len = CEC_MAX_MSG_SIZE - 2; msg.msg[0] = hdmi_read_reg(core->base, HDMI_CEC_RX_CMD_HEADER); msg.msg[1] = hdmi_read_reg(core->base, @@ -104,26 +106,6 @@ static void hdmi_cec_received_msg(struct hdmi_core_data *core) } } -static void hdmi_cec_transmit_fifo_empty(struct hdmi_core_data *core, u32 stat1) -{ - if (stat1 & 2) { - u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3); - - cec_transmit_done(core->adap, - CEC_TX_STATUS_NACK | - CEC_TX_STATUS_MAX_RETRIES, - 0, (dbg3 >> 4) & 7, 0, 0); - } else if (stat1 & 1) { - cec_transmit_done(core->adap, - CEC_TX_STATUS_ARB_LOST | - CEC_TX_STATUS_MAX_RETRIES, - 0, 0, 0, 0); - } else if (stat1 == 0) { - cec_transmit_done(core->adap, CEC_TX_STATUS_OK, - 0, 0, 0, 0); - } -} - void hdmi4_cec_irq(struct hdmi_core_data *core) { u32 stat0 = hdmi_read_reg(core->base, HDMI_CEC_INT_STATUS_0); @@ -132,27 +114,21 @@ void hdmi4_cec_irq(struct hdmi_core_data *core) hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_0, stat0); hdmi_write_reg(core->base, HDMI_CEC_INT_STATUS_1, stat1); - if (stat0 & 0x40) + if (stat0 & 0x20) { + cec_transmit_done(core->adap, CEC_TX_STATUS_OK, + 0, 0, 0, 0); REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7); - else if (stat0 & 0x24) - hdmi_cec_transmit_fifo_empty(core, stat1); - if (stat1 & 2) { + } else if (stat1 & 0x02) { u32 dbg3 = hdmi_read_reg(core->base, HDMI_CEC_DBG_3); cec_transmit_done(core->adap, CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES, 0, (dbg3 >> 4) & 7, 0, 0); - } else if (stat1 & 1) { - cec_transmit_done(core->adap, - CEC_TX_STATUS_ARB_LOST | - CEC_TX_STATUS_MAX_RETRIES, - 0, 0, 0, 0); + REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7); } if (stat0 & 0x02) hdmi_cec_received_msg(core); - if (stat1 & 0x3) - REG_FLD_MOD(core->base, HDMI_CEC_DBG_3, 0x1, 7, 7); } static bool hdmi_cec_clear_tx_fifo(struct cec_adapter *adap) @@ -231,18 +207,14 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable) /* * Enable CEC interrupts: * Transmit Buffer Full/Empty Change event - * Transmitter FIFO Empty event * Receiver FIFO Not Empty event */ - hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x26); + hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_0, 0x22); /* * Enable CEC interrupts: - * RX FIFO Overrun Error event - * Short Pulse Detected event * Frame Retransmit Count Exceeded event - * Start Bit Irregularity event */ - hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x0f); + hdmi_write_reg(core->base, HDMI_CEC_INT_ENABLE_1, 0x02); /* cec calibration enable (self clearing) */ hdmi_write_reg(core->base, HDMI_CEC_SETUP, 0x03); diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index dda904ec0534..500b6fb3e028 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -175,11 +175,31 @@ static void sun4i_hdmi_mode_set(struct drm_encoder *encoder, writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG); } +static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder, + const struct drm_display_mode *mode) +{ + struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); + unsigned long rate = mode->clock * 1000; + unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */ + long rounded_rate; + + /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */ + if (rate > 165000000) + return MODE_CLOCK_HIGH; + rounded_rate = clk_round_rate(hdmi->tmds_clk, rate); + if (rounded_rate > 0 && + max_t(unsigned long, rounded_rate, rate) - + min_t(unsigned long, rounded_rate, rate) < diff) + return MODE_OK; + return MODE_NOCLOCK; +} + static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = { .atomic_check = sun4i_hdmi_atomic_check, .disable = sun4i_hdmi_disable, .enable = sun4i_hdmi_enable, .mode_set = sun4i_hdmi_mode_set, + .mode_valid = sun4i_hdmi_mode_valid, }; static const struct drm_encoder_funcs sun4i_hdmi_funcs = { diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index e122f5b2a395..f4284b51bdca 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -724,12 +724,12 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, if (IS_ERR(tcon->crtc)) { dev_err(dev, "Couldn't create our CRTC\n"); ret = PTR_ERR(tcon->crtc); - goto err_free_clocks; + goto err_free_dotclock; } ret = sun4i_rgb_init(drm, tcon); if (ret < 0) - goto err_free_clocks; + goto err_free_dotclock; if (tcon->quirks->needs_de_be_mux) { /* diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 44343a2bf55c..5d252fb27a82 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) freed += (nr_free_pool - shrink_pages) << pool->order; if (freed >= sc->nr_to_scan) break; + shrink_pages <<= pool->order; } mutex_unlock(&lock); return freed; @@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, int r = 0; unsigned i, j, cpages; unsigned npages = 1 << order; - unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC); + unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); /* allocate array for page caching change */ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); @@ -1006,6 +1007,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) pr_info("Initializing pool allocator\n"); _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); + if (!_manager) + return -ENOMEM; ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index f3fcb836a1f9..0c3f608131cf 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -551,7 +551,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) ret = hid_add_field(parser, HID_FEATURE_REPORT, data); break; default: - hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag); + hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); ret = 0; } diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 68cdc962265b..271f31461da4 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -696,8 +696,16 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, (u8 *)&word, 2); break; case I2C_SMBUS_I2C_BLOCK_DATA: - size = I2C_SMBUS_BLOCK_DATA; - /* fallthrough */ + if (read_write == I2C_SMBUS_READ) { + read_length = data->block[0]; + count = cp2112_write_read_req(buf, addr, read_length, + command, NULL, 0); + } else { + count = cp2112_write_req(buf, addr, command, + data->block + 1, + data->block[0]); + } + break; case I2C_SMBUS_BLOCK_DATA: if (I2C_SMBUS_READ == read_write) { count = cp2112_write_read_req(buf, addr, @@ -785,6 +793,9 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, case I2C_SMBUS_WORD_DATA: data->word = le16_to_cpup((__le16 *)buf); break; + case I2C_SMBUS_I2C_BLOCK_DATA: + memcpy(data->block + 1, buf, read_length); + break; case I2C_SMBUS_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c index 9325545fc3ae..edc0f64bb584 100644 --- a/drivers/hid/hid-holtekff.c +++ b/drivers/hid/hid-holtekff.c @@ -32,10 +32,6 @@ #ifdef CONFIG_HOLTEK_FF -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>"); -MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices"); - /* * These commands and parameters are currently known: * @@ -223,3 +219,7 @@ static struct hid_driver holtek_driver = { .probe = holtek_probe, }; module_hid_driver(holtek_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>"); +MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices"); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 76ed9a216f10..610223f0e945 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1378,6 +1378,8 @@ void vmbus_device_unregister(struct hv_device *device_obj) pr_debug("child device %s unregistered\n", dev_name(&device_obj->device)); + kset_unregister(device_obj->channels_kset); + /* * Kick off the process of unregistering the device. * This will call vmbus_remove() and eventually vmbus_device_release() diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index c9790e2c3440..af5123042990 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -143,6 +143,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, struct hwmon_device *hwdev, int index) { struct hwmon_thermal_data *tdata; + struct thermal_zone_device *tzd; tdata = devm_kzalloc(dev, sizeof(*tdata), GFP_KERNEL); if (!tdata) @@ -151,8 +152,14 @@ static int hwmon_thermal_add_sensor(struct device *dev, tdata->hwdev = hwdev; tdata->index = index; - devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, - &hwmon_thermal_ops); + tzd = devm_thermal_zone_of_sensor_register(&hwdev->dev, index, tdata, + &hwmon_thermal_ops); + /* + * If CONFIG_THERMAL_OF is disabled, this returns -ENODEV, + * so ignore that error but forward any other error. + */ + if (IS_ERR(tzd) && (PTR_ERR(tzd) != -ENODEV)) + return PTR_ERR(tzd); return 0; } @@ -621,14 +628,20 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata, if (!chip->ops->is_visible(drvdata, hwmon_temp, hwmon_temp_input, j)) continue; - if (info[i]->config[j] & HWMON_T_INPUT) - hwmon_thermal_add_sensor(dev, hwdev, j); + if (info[i]->config[j] & HWMON_T_INPUT) { + err = hwmon_thermal_add_sensor(dev, + hwdev, j); + if (err) + goto free_device; + } } } } return hdev; +free_device: + device_unregister(hdev); free_hwmon: kfree(hwdev); ida_remove: diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index a1d687a664f8..66f0268f37a6 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, } #endif -struct ib_device *__ib_device_get_by_index(u32 ifindex); +struct ib_device *ib_device_get_by_index(u32 ifindex); /* RDMA device netlink */ void nldev_init(void); void nldev_exit(void); diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 30914f3baa5f..465520627e4b 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device) return 0; } -struct ib_device *__ib_device_get_by_index(u32 index) +static struct ib_device *__ib_device_get_by_index(u32 index) { struct ib_device *device; @@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index) return NULL; } +/* + * Caller is responsible to return refrerence count by calling put_device() + */ +struct ib_device *ib_device_get_by_index(u32 index) +{ + struct ib_device *device; + + down_read(&lists_rwsem); + device = __ib_device_get_by_index(index); + if (device) + get_device(&device->dev); + + up_read(&lists_rwsem); + return device; +} + static struct ib_device *__ib_device_get_by_name(const char *name) { struct ib_device *device; diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 9a05245a1acf..0dcd1aa6f683 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(index); + device = ib_device_get_by_index(index); if (!device) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!msg) { + err = -ENOMEM; + goto err; + } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); err = fill_dev_info(msg, device); - if (err) { - nlmsg_free(msg); - return err; - } + if (err) + goto err_free; nlmsg_end(msg, nlh); + put_device(&device->dev); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + +err_free: + nlmsg_free(msg); +err: + put_device(&device->dev); + return err; } static int _nldev_get_dumpit(struct ib_device *device, @@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh, return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(index); + device = ib_device_get_by_index(index); if (!device) return -EINVAL; port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); - if (!rdma_is_port_valid(device, port)) - return -EINVAL; + if (!rdma_is_port_valid(device, port)) { + err = -EINVAL; + goto err; + } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; + if (!msg) { + err = -ENOMEM; + goto err; + } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET), 0, 0); err = fill_port_info(msg, device, port); - if (err) { - nlmsg_free(msg); - return err; - } + if (err) + goto err_free; nlmsg_end(msg, nlh); + put_device(&device->dev); return rdma_nl_unicast(msg, NETLINK_CB(skb).portid); + +err_free: + nlmsg_free(msg); +err: + put_device(&device->dev); + return err; } static int nldev_port_get_dumpit(struct sk_buff *skb, @@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, return -EINVAL; ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); - device = __ib_device_get_by_index(ifindex); + device = ib_device_get_by_index(ifindex); if (!device) return -EINVAL; @@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, nlmsg_end(skb, nlh); } -out: cb->args[0] = idx; +out: + put_device(&device->dev); + cb->args[0] = idx; return skb->len; } diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index feafdb961c48..59b2f96d986a 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c @@ -386,6 +386,9 @@ int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) if (ret) return ret; + if (!qp->qp_sec) + return 0; + mutex_lock(&real_qp->qp_sec->mutex); ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, qp->qp_sec); diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index d0202bb176a4..840b24096690 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2074,8 +2074,8 @@ int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file, return -EOPNOTSUPP; if (ucore->inlen > sizeof(cmd)) { - if (ib_is_udata_cleared(ucore, sizeof(cmd), - ucore->inlen - sizeof(cmd))) + if (!ib_is_udata_cleared(ucore, sizeof(cmd), + ucore->inlen - sizeof(cmd))) return -EOPNOTSUPP; } diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 3fb8fb6cc824..e36d27ed4daa 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1438,7 +1438,8 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); - ib_close_shared_qp_security(qp->qp_sec); + if (qp->qp_sec) + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index b7bfc536e00f..6f2b26126c64 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -395,7 +395,7 @@ next_cqe: static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) { - if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) { + if (DRAIN_CQE(cqe)) { WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); return 0; } @@ -494,7 +494,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, /* * Special cqe for drain WR completions... */ - if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { + if (DRAIN_CQE(hw_cqe)) { *cookie = CQE_DRAIN_COOKIE(hw_cqe); *cqe = *hw_cqe; goto skip_cqe; @@ -571,10 +571,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ret = -EAGAIN; goto skip_cqe; } - if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { + if (unlikely(!CQE_STATUS(hw_cqe) && + CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { t4_set_wq_in_error(wq); - hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN)); - goto proc_cqe; + hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN)); } goto proc_cqe; } @@ -748,9 +748,6 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) c4iw_invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); break; - case C4IW_DRAIN_OPCODE: - wc->opcode = IB_WC_SEND; - break; default: pr_err("Unexpected opcode %d in the CQE received for QPID=0x%0x\n", CQE_OPCODE(&cqe), CQE_QPID(&cqe)); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 470f97a79ebb..65dd3726ca02 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -693,8 +693,6 @@ static inline int to_ib_qp_state(int c4iw_qp_state) return IB_QPS_ERR; } -#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN - static inline u32 c4iw_ib_to_tpt_access(int a) { return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 38bddd02a943..d5c92fc520d6 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -790,21 +790,57 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) return 0; } -static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) +static int ib_to_fw_opcode(int ib_opcode) +{ + int opcode; + + switch (ib_opcode) { + case IB_WR_SEND_WITH_INV: + opcode = FW_RI_SEND_WITH_INV; + break; + case IB_WR_SEND: + opcode = FW_RI_SEND; + break; + case IB_WR_RDMA_WRITE: + opcode = FW_RI_RDMA_WRITE; + break; + case IB_WR_RDMA_READ: + case IB_WR_RDMA_READ_WITH_INV: + opcode = FW_RI_READ_REQ; + break; + case IB_WR_REG_MR: + opcode = FW_RI_FAST_REGISTER; + break; + case IB_WR_LOCAL_INV: + opcode = FW_RI_LOCAL_INV; + break; + default: + opcode = -EINVAL; + } + return opcode; +} + +static int complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) { struct t4_cqe cqe = {}; struct c4iw_cq *schp; unsigned long flag; struct t4_cq *cq; + int opcode; schp = to_c4iw_cq(qhp->ibqp.send_cq); cq = &schp->cq; + opcode = ib_to_fw_opcode(wr->opcode); + if (opcode < 0) + return opcode; + cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(opcode) | CQE_TYPE_V(1) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&schp->lock, flag); @@ -819,6 +855,23 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) schp->ibcq.cq_context); spin_unlock_irqrestore(&schp->comp_handler_lock, flag); } + return 0; +} + +static int complete_sq_drain_wrs(struct c4iw_qp *qhp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + int ret = 0; + + while (wr) { + ret = complete_sq_drain_wr(qhp, wr); + if (ret) { + *bad_wr = wr; + break; + } + wr = wr->next; + } + return ret; } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) @@ -833,9 +886,10 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) cqe.u.drain_cookie = wr->wr_id; cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | - CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | + CQE_OPCODE_V(FW_RI_SEND) | CQE_TYPE_V(0) | CQE_SWCQE_V(1) | + CQE_DRAIN_V(1) | CQE_QPID_V(qhp->wq.sq.qid)); spin_lock_irqsave(&rchp->lock, flag); @@ -852,6 +906,14 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) } } +static void complete_rq_drain_wrs(struct c4iw_qp *qhp, struct ib_recv_wr *wr) +{ + while (wr) { + complete_rq_drain_wr(qhp, wr); + wr = wr->next; + } +} + int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { @@ -875,7 +937,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_sq_drain_wr(qhp, wr); + err = complete_sq_drain_wrs(qhp, wr, bad_wr); return err; } num_wrs = t4_sq_avail(&qhp->wq); @@ -1023,7 +1085,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, */ if (qhp->wq.flushed) { spin_unlock_irqrestore(&qhp->lock, flag); - complete_rq_drain_wr(qhp, wr); + complete_rq_drain_wrs(qhp, wr); return err; } num_wrs = t4_rq_avail(&qhp->wq); diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index e9ea94268d51..79e8ee12c391 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -197,6 +197,11 @@ struct t4_cqe { #define CQE_SWCQE_G(x) ((((x) >> CQE_SWCQE_S)) & CQE_SWCQE_M) #define CQE_SWCQE_V(x) ((x)<<CQE_SWCQE_S) +#define CQE_DRAIN_S 10 +#define CQE_DRAIN_M 0x1 +#define CQE_DRAIN_G(x) ((((x) >> CQE_DRAIN_S)) & CQE_DRAIN_M) +#define CQE_DRAIN_V(x) ((x)<<CQE_DRAIN_S) + #define CQE_STATUS_S 5 #define CQE_STATUS_M 0x1F #define CQE_STATUS_G(x) ((((x) >> CQE_STATUS_S)) & CQE_STATUS_M) @@ -213,6 +218,7 @@ struct t4_cqe { #define CQE_OPCODE_V(x) ((x)<<CQE_OPCODE_S) #define SW_CQE(x) (CQE_SWCQE_G(be32_to_cpu((x)->header))) +#define DRAIN_CQE(x) (CQE_DRAIN_G(be32_to_cpu((x)->header))) #define CQE_QPID(x) (CQE_QPID_G(be32_to_cpu((x)->header))) #define CQE_TYPE(x) (CQE_TYPE_G(be32_to_cpu((x)->header))) #define SQ_TYPE(x) (CQE_TYPE((x))) diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 4a9b4d7efe63..8ce9118d4a7f 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1131,7 +1131,6 @@ struct hfi1_devdata { u16 pcie_lnkctl; u16 pcie_devctl2; u32 pci_msix0; - u32 pci_lnkctl3; u32 pci_tph2; /* diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 09e50fd2a08f..8c7e7a60b715 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -411,15 +411,12 @@ int restore_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_write_config_dword(dd->pcidev, PCIE_CFG_TPH2, + dd->pci_tph2); + if (ret) + goto error; + } return 0; error: @@ -469,15 +466,12 @@ int save_pci_variables(struct hfi1_devdata *dd) if (ret) goto error; - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_SPCIE1, - &dd->pci_lnkctl3); - if (ret) - goto error; - - ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, &dd->pci_tph2); - if (ret) - goto error; - + if (pci_find_ext_capability(dd->pcidev, PCI_EXT_CAP_ID_TPH)) { + ret = pci_read_config_dword(dd->pcidev, PCIE_CFG_TPH2, + &dd->pci_tph2); + if (ret) + goto error; + } return 0; error: diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 313bfb9ccb71..4975f3e6596e 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, goto err_free_mr; mr->max_pages = max_num_sg; - err = mlx4_mr_enable(dev->dev, &mr->mmr); if (err) goto err_free_pl; @@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, return &mr->ibmr; err_free_pl: + mr->ibmr.device = pd->device; mlx4_free_priv_pages(mr); err_free_mr: (void) mlx4_mr_free(dev->dev, &mr->mmr); diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index 470995fa38d2..6f6712f87a73 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -47,17 +47,6 @@ int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey) return err; } -int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, - bool reset, void *out, int out_size) -{ - u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { }; - - MLX5_SET(query_cong_statistics_in, in, opcode, - MLX5_CMD_OP_QUERY_CONG_STATISTICS); - MLX5_SET(query_cong_statistics_in, in, clear, reset); - return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); -} - int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, void *out, int out_size) { diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index af4c24596274..78ffded7cc2c 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -37,8 +37,6 @@ #include <linux/mlx5/driver.h> int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey); -int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, - bool reset, void *out, int out_size); int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, void *out, int out_size); int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev, diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 543d0a4c8bf3..8ac50de2b242 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1463,6 +1463,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, } INIT_LIST_HEAD(&context->vma_private_list); + mutex_init(&context->vma_private_list_mutex); INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1624,7 +1625,9 @@ static void mlx5_ib_vma_close(struct vm_area_struct *area) * mlx5_ib_disassociate_ucontext(). */ mlx5_ib_vma_priv_data->vma = NULL; + mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex); list_del(&mlx5_ib_vma_priv_data->list); + mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex); kfree(mlx5_ib_vma_priv_data); } @@ -1644,10 +1647,13 @@ static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, return -ENOMEM; vma_prv->vma = vma; + vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex; vma->vm_private_data = vma_prv; vma->vm_ops = &mlx5_ib_vm_ops; + mutex_lock(&ctx->vma_private_list_mutex); list_add(&vma_prv->list, vma_head); + mutex_unlock(&ctx->vma_private_list_mutex); return 0; } @@ -1690,6 +1696,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * mlx5_ib_vma_close. */ down_write(&owning_mm->mmap_sem); + mutex_lock(&context->vma_private_list_mutex); list_for_each_entry_safe(vma_private, n, &context->vma_private_list, list) { vma = vma_private->vma; @@ -1704,6 +1711,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) list_del(&vma_private->list); kfree(vma_private); } + mutex_unlock(&context->vma_private_list_mutex); up_write(&owning_mm->mmap_sem); mmput(owning_mm); put_task_struct(owning_process); @@ -3737,34 +3745,6 @@ free: return ret; } -static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev, - struct mlx5_ib_port *port, - struct rdma_hw_stats *stats) -{ - int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); - void *out; - int ret, i; - int offset = port->cnts.num_q_counters; - - out = kvzalloc(outlen, GFP_KERNEL); - if (!out) - return -ENOMEM; - - ret = mlx5_cmd_query_cong_counter(dev->mdev, false, out, outlen); - if (ret) - goto free; - - for (i = 0; i < port->cnts.num_cong_counters; i++) { - stats->value[i + offset] = - be64_to_cpup((__be64 *)(out + - port->cnts.offsets[i + offset])); - } - -free: - kvfree(out); - return ret; -} - static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u8 port_num, int index) @@ -3782,7 +3762,12 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, num_counters = port->cnts.num_q_counters; if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { - ret = mlx5_ib_query_cong_counters(dev, port, stats); + ret = mlx5_lag_query_cong_counters(dev->mdev, + stats->value + + port->cnts.num_q_counters, + port->cnts.num_cong_counters, + port->cnts.offsets + + port->cnts.num_q_counters); if (ret) return ret; num_counters += port->cnts.num_cong_counters; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 6dd8cac78de2..2c5f3533bbc9 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -115,6 +115,8 @@ enum { struct mlx5_ib_vma_private_data { struct list_head list; struct vm_area_struct *vma; + /* protect vma_private_list add/del */ + struct mutex *vma_private_list_mutex; }; struct mlx5_ib_ucontext { @@ -129,6 +131,8 @@ struct mlx5_ib_ucontext { /* Transport Domain number */ u32 tdn; struct list_head vma_private_list; + /* protect vma_private_list add/del */ + struct mutex vma_private_list_mutex; unsigned long upd_xlt_page; /* protect ODP/KSM */ diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index ee0ee1f9994b..d109fe8290a7 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1637,6 +1637,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, MLX5_SET(mkc, mkc, access_mode, mr->access_mode); MLX5_SET(mkc, mkc, umr_en, 1); + mr->ibmr.device = pd->device; err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) goto err_destroy_psv; diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 50812b33291b..a9c3378bca38 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev) static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int rc; diff --git a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h index b7587f10e7de..78b49002fbd2 100644 --- a/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h +++ b/drivers/infiniband/hw/qedr/qedr_hsi_rdma.h @@ -164,6 +164,13 @@ struct rdma_srq_sge { __le32 l_key; }; +/* Rdma doorbell data for flags update */ +struct rdma_pwm_flags_data { + __le16 icid; /* internal CID */ + u8 agg_flags; /* aggregative flags */ + u8 reserved; +}; + /* Rdma doorbell data for SQ and RQ */ struct rdma_pwm_val16_data { __le16 icid; @@ -180,12 +187,16 @@ struct rdma_pwm_val32_data { __le16 icid; u8 agg_flags; u8 params; -#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 -#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 -#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 -#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 -#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F -#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 +#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 +#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3 +#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1 +#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4 +#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7 +#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5 __le32 value; }; @@ -478,23 +489,25 @@ struct rdma_sq_fmr_wqe { __le16 dif_app_tag_mask; __le16 dif_runt_crc_value; __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF -#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 - __le32 Reserved5; +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 + __le32 reserved5; }; /* First element (16 bytes) of fmr wqe */ @@ -558,23 +571,25 @@ struct rdma_sq_fmr_wqe_3rd { __le16 dif_app_tag_mask; __le16 dif_runt_crc_value; __le16 dif_flags; -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 -#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF -#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 - __le32 Reserved5; +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1 +#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7 +#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF +#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8 + __le32 reserved5; }; struct rdma_sq_local_inv_wqe { @@ -606,20 +621,22 @@ struct rdma_sq_rdma_wqe { __le32 xrc_srq; u8 req_type; u8 flags; -#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 -#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 -#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 -#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 -#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 -#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 -#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 -#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 -#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 +#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 +#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 +#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 +#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 u8 wqe_size; u8 prev_wqe_size; struct regpair remote_va; diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 63bc2efc34eb..4f7bd3b6a315 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -94,7 +94,7 @@ struct pvrdma_cq { u32 cq_handle; bool is_kernel; atomic_t refcnt; - wait_queue_head_t wait; + struct completion free; }; struct pvrdma_id_table { @@ -175,7 +175,7 @@ struct pvrdma_srq { u32 srq_handle; int npages; refcount_t refcnt; - wait_queue_head_t wait; + struct completion free; }; struct pvrdma_qp { @@ -197,7 +197,7 @@ struct pvrdma_qp { bool is_kernel; struct mutex mutex; /* QP state mutex. */ atomic_t refcnt; - wait_queue_head_t wait; + struct completion free; }; struct pvrdma_dev { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index 3562c0c30492..e529622cefad 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev, pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); atomic_set(&cq->refcnt, 1); - init_waitqueue_head(&cq->wait); + init_completion(&cq->free); spin_lock_init(&cq->cq_lock); memset(cmd, 0, sizeof(*cmd)); @@ -230,8 +230,9 @@ err_cq: static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq) { - atomic_dec(&cq->refcnt); - wait_event(cq->wait, !atomic_read(&cq->refcnt)); + if (atomic_dec_and_test(&cq->refcnt)) + complete(&cq->free); + wait_for_completion(&cq->free); if (!cq->is_kernel) ib_umem_release(cq->umem); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 1f4e18717a00..e92681878c93 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type) ibqp->event_handler(&e, ibqp->qp_context); } if (qp) { - atomic_dec(&qp->refcnt); - if (atomic_read(&qp->refcnt) == 0) - wake_up(&qp->wait); + if (atomic_dec_and_test(&qp->refcnt)) + complete(&qp->free); } } @@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type) ibcq->event_handler(&e, ibcq->cq_context); } if (cq) { - atomic_dec(&cq->refcnt); - if (atomic_read(&cq->refcnt) == 0) - wake_up(&cq->wait); + if (atomic_dec_and_test(&cq->refcnt)) + complete(&cq->free); } } @@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type) } if (srq) { if (refcount_dec_and_test(&srq->refcnt)) - wake_up(&srq->wait); + complete(&srq->free); } } @@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id) if (cq && cq->ibcq.comp_handler) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); if (cq) { - atomic_dec(&cq->refcnt); - if (atomic_read(&cq->refcnt)) - wake_up(&cq->wait); + if (atomic_dec_and_test(&cq->refcnt)) + complete(&cq->free); } pvrdma_idx_ring_inc(&ring->cons_head, ring_slots); } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index 10420a18d02f..4059308e1454 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c @@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, spin_lock_init(&qp->rq.lock); mutex_init(&qp->mutex); atomic_set(&qp->refcnt, 1); - init_waitqueue_head(&qp->wait); + init_completion(&qp->free); qp->state = IB_QPS_RESET; @@ -428,8 +428,16 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp) pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags); - atomic_dec(&qp->refcnt); - wait_event(qp->wait, !atomic_read(&qp->refcnt)); + if (atomic_dec_and_test(&qp->refcnt)) + complete(&qp->free); + wait_for_completion(&qp->free); + + if (!qp->is_kernel) { + if (qp->rumem) + ib_umem_release(qp->rumem); + if (qp->sumem) + ib_umem_release(qp->sumem); + } pvrdma_page_dir_cleanup(dev, &qp->pdir); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c index 826ccb864596..5acebb1ef631 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c @@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd, spin_lock_init(&srq->lock); refcount_set(&srq->refcnt, 1); - init_waitqueue_head(&srq->wait); + init_completion(&srq->free); dev_dbg(&dev->pdev->dev, "create shared receive queue from user space\n"); @@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq) dev->srq_tbl[srq->srq_handle] = NULL; spin_unlock_irqrestore(&dev->srq_tbl_lock, flags); - refcount_dec(&srq->refcnt); - wait_event(srq->wait, !refcount_read(&srq->refcnt)); + if (refcount_dec_and_test(&srq->refcnt)) + complete(&srq->free); + wait_for_completion(&srq->free); /* There is no support for kernel clients, so this is safe. */ ib_umem_release(srq->umem); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 3b96cdaf9a83..e6151a29c412 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1236,13 +1236,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, ipoib_ib_dev_down(dev); if (level == IPOIB_FLUSH_HEAVY) { - rtnl_lock(); if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) ipoib_ib_dev_stop(dev); - result = ipoib_ib_dev_open(dev); - rtnl_unlock(); - if (result) + if (ipoib_ib_dev_open(dev)) return; if (netif_queue_stopped(dev)) @@ -1282,7 +1279,9 @@ void ipoib_ib_dev_flush_heavy(struct work_struct *work) struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); + rtnl_lock(); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0); + rtnl_unlock(); } void ipoib_ib_dev_cleanup(struct net_device *dev) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 12b7f911f0e5..8880351df179 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev, return 0; } -static void neigh_add_path(struct sk_buff *skb, u8 *daddr, - struct net_device *dev) +static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr, + struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); struct rdma_netdev *rn = netdev_priv(dev); @@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); - return; + return NULL; + } + + /* To avoid race condition, make sure that the + * neigh will be added only once. + */ + if (unlikely(!list_empty(&neigh->list))) { + spin_unlock_irqrestore(&priv->lock, flags); + return neigh; } path = __path_find(dev, daddr + 4); @@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, path->ah->last_send = rn->send(dev, skb, path->ah->ah, IPOIB_QPN(daddr)); ipoib_neigh_put(neigh); - return; + return NULL; } } else { neigh->ah = NULL; @@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, spin_unlock_irqrestore(&priv->lock, flags); ipoib_neigh_put(neigh); - return; + return NULL; err_path: ipoib_neigh_free(neigh); @@ -983,6 +991,8 @@ err_drop: spin_unlock_irqrestore(&priv->lock, flags); ipoib_neigh_put(neigh); + + return NULL; } static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, @@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) case htons(ETH_P_TIPC): neigh = ipoib_neigh_get(dev, phdr->hwaddr); if (unlikely(!neigh)) { - neigh_add_path(skb, phdr->hwaddr, dev); - return NETDEV_TX_OK; + neigh = neigh_add_path(skb, phdr->hwaddr, dev); + if (likely(!neigh)) + return NETDEV_TX_OK; } break; case htons(ETH_P_ARP): diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 93e149efc1f5..9b3f47ae2016 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) spin_lock_irqsave(&priv->lock, flags); if (!neigh) { neigh = ipoib_neigh_alloc(daddr, dev); - if (neigh) { + /* Make sure that the neigh will be added only + * once to mcast list. + */ + if (neigh && list_empty(&neigh->list)) { kref_get(&mcast->ah->ref); neigh->ah = mcast->ah; list_add_tail(&neigh->list, &mcast->neigh_list); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 8a1bd354b1cc..bfa576aa9f03 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp) return -ENOMEM; attr->qp_state = IB_QPS_INIT; - attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | - IB_ACCESS_REMOTE_WRITE; + attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE; attr->port_num = ch->sport->port; attr->pkey_index = 0; @@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, goto destroy_ib; } - guid = (__be16 *)¶m->primary_path->sgid.global.interface_id; + guid = (__be16 *)¶m->primary_path->dgid.global.interface_id; snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x", be16_to_cpu(guid[0]), be16_to_cpu(guid[1]), be16_to_cpu(guid[2]), be16_to_cpu(guid[3])); diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 3d8ff09eba57..c868a878c84f 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c @@ -163,7 +163,7 @@ static unsigned int get_time_pit(void) #define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "TSC" -#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE) +#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE) #define GET_TIME(x) do { x = get_cycles(); } while (0) #define DELTA(x,y) ((y)-(x)) #define TIME_NAME "get_cycles" diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index ae473123583b..3d51175c4d72 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf) return union_desc; dev_err(&intf->dev, - "Union descriptor to short (%d vs %zd\n)", + "Union descriptor too short (%d vs %zd)\n", union_desc->bLength, sizeof(*union_desc)); return NULL; } diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index 6bf56bb5f8d9..d91f3b1c5375 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev, 0, width, 0, 0); input_set_abs_params(mtouch, ABS_MT_POSITION_Y, 0, height, 0, 0); - input_set_abs_params(mtouch, ABS_MT_PRESSURE, - 0, 255, 0, 0); ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); if (ret) { diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index b84cd978fce2..a4aaa748e987 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd) case 5: etd->hw_version = 3; break; - case 6 ... 14: + case 6 ... 15: etd->hw_version = 4; break; default: diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index e102d7764bc2..a458e5ec9e41 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -27,6 +27,7 @@ #include <linux/module.h> #include <linux/input.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/platform_device.h> #include <linux/async.h> #include <linux/i2c.h> @@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client, } /* - * Systems using device tree should set up interrupt via DTS, - * the rest will use the default falling edge interrupts. + * Platform code (ACPI, DTS) should normally set up interrupt + * for us, but in case it did not let's fall back to using falling + * edge to be compatible with older Chromebooks. */ - irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING; + irqflags = irq_get_trigger_type(client->irq); + if (!irqflags) + irqflags = IRQF_TRIGGER_FALLING; error = devm_request_threaded_irq(&client->dev, client->irq, NULL, elants_i2c_irq, diff --git a/drivers/input/touchscreen/hideep.c b/drivers/input/touchscreen/hideep.c index fc080a7c2e1f..f1cd4dd9a4a3 100644 --- a/drivers/input/touchscreen/hideep.c +++ b/drivers/input/touchscreen/hideep.c @@ -10,8 +10,7 @@ #include <linux/of.h> #include <linux/firmware.h> #include <linux/delay.h> -#include <linux/gpio.h> -#include <linux/gpio/machine.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/interrupt.h> diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 7d5eb004091d..97baf88d9505 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4184,7 +4184,7 @@ static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, struct irq_cfg *cfg); static int irq_remapping_activate(struct irq_domain *domain, - struct irq_data *irq_data, bool early) + struct irq_data *irq_data, bool reserve) { struct amd_ir_data *data = irq_data->chip_data; struct irq_2_irte *irte_info = &data->irq_2_irte; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index f122071688fd..744592d330ca 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1UL << ias) - 1; domain->geometry.force_aperture = true; - smmu_domain->pgtbl_ops = pgtbl_ops; ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg); - if (ret < 0) + if (ret < 0) { free_io_pgtable_ops(pgtbl_ops); + return ret; + } - return ret; + smmu_domain->pgtbl_ops = pgtbl_ops; + return 0; } static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) @@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) { - int i; + int i, j; struct arm_smmu_master_data *master = fwspec->iommu_priv; struct arm_smmu_device *smmu = master->smmu; @@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec) u32 sid = fwspec->ids[i]; __le64 *step = arm_smmu_get_step_for_sid(smmu, sid); + /* Bridged PCI devices may end up with duplicated IDs */ + for (j = 0; j < i; j++) + if (fwspec->ids[j] == sid) + break; + if (j < i) + continue; + arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste); } } diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 76a193c7fcfc..66f69af2c219 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -1397,7 +1397,7 @@ static void intel_irq_remapping_free(struct irq_domain *domain, } static int intel_irq_remapping_activate(struct irq_domain *domain, - struct irq_data *irq_data, bool early) + struct irq_data *irq_data, bool reserve) { intel_ir_reconfigure_irte(irq_data, true); return 0; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 4039e64cd342..06f025fd5726 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -2303,7 +2303,7 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, } static int its_irq_domain_activate(struct irq_domain *domain, - struct irq_data *d, bool early) + struct irq_data *d, bool reserve) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); @@ -2818,7 +2818,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq } static int its_vpe_irq_domain_activate(struct irq_domain *domain, - struct irq_data *d, bool early) + struct irq_data *d, bool reserve) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_node *its; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 06f29cf5018a..cee59fe1321c 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -342,6 +342,9 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id) */ static struct lock_class_key intc_irqpin_irq_lock_class; +/* And this is for the request mutex */ +static struct lock_class_key intc_irqpin_irq_request_class; + static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { @@ -352,7 +355,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq, intc_irqpin_dbg(&p->irq[hw], "map"); irq_set_chip_data(virq, h->host_data); - irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class); + irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class, + &intc_irqpin_irq_request_class); irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); return 0; } diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index fd83c7f77a95..ede4fa0ac2cc 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -188,6 +188,7 @@ void led_blink_set(struct led_classdev *led_cdev, { del_timer_sync(&led_cdev->blink_timer); + clear_bit(LED_BLINK_SW, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags); clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index 09cf3699e354..a307832d7e45 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -184,6 +184,7 @@ static struct irq_chip arizona_irq_chip = { }; static struct lock_class_key arizona_irq_lock_class; +static struct lock_class_key arizona_irq_request_class; static int arizona_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) @@ -191,7 +192,8 @@ static int arizona_irq_map(struct irq_domain *h, unsigned int virq, struct arizona *data = h->host_data; irq_set_chip_data(virq, data); - irq_set_lockdep_class(virq, &arizona_irq_lock_class); + irq_set_lockdep_class(virq, &arizona_irq_lock_class, + &arizona_irq_request_class); irq_set_chip_and_handler(virq, &arizona_irq_chip, handle_simple_irq); irq_set_nested_thread(virq, 1); irq_set_noprobe(virq); diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index 590fb9aad77d..c3ed885c155c 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -1543,6 +1543,9 @@ static void rtsx_pci_shutdown(struct pci_dev *pcidev) rtsx_pci_power_off(pcr, HOST_ENTER_S1); pci_disable_device(pcidev); + free_irq(pcr->irq, (void *)pcr); + if (pcr->msi_en) + pci_disable_msi(pcr->pci); } #else /* CONFIG_PM */ diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 90b9a9ccbe60..9285f60e5783 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -963,6 +963,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command) switch (command) { case NAND_CMD_READ0: + case NAND_CMD_READOOB: case NAND_CMD_PAGEPROG: info->use_ecc = 1; break; diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 3cd371c94e83..634c51e6b8ae 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -543,7 +543,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) data = be32_to_cpup((__be32 *)&cf->data[0]); priv->write(data, &priv->tx_mb->data[0]); } - if (cf->can_dlc > 3) { + if (cf->can_dlc > 4) { data = be32_to_cpup((__be32 *)&cf->data[4]); priv->write(data, &priv->tx_mb->data[1]); } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index b00358297424..12ff0020ecd6 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) if (dev->can.state == CAN_STATE_ERROR_WARNING || dev->can.state == CAN_STATE_ERROR_PASSIVE) { + cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 68ac3e88a8ce..8bf80ad9dc44 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev) dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)", rc); - return rc; + return (rc > 0) ? 0 : rc; } static void gs_usb_xmit_callback(struct urb *urb) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 5d1753cfacea..ed6828821fbd 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, tbp = peer_tb; } - if (tbp[IFLA_IFNAME]) { + if (ifmp && tbp[IFLA_IFNAME]) { nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 561b05089cb6..db830a1141d9 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1497,10 +1497,13 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port) { struct b53_device *dev = ds->priv; - /* Older models support a different tag format that we do not - * support in net/dsa/tag_brcm.c yet. + /* Older models (5325, 5365) support a different tag format that we do + * not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed + * mode to be turned on which means we need to specifically manage ARL + * misses on multicast addresses (TBD). */ - if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port)) + if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) || + !b53_can_enable_brcm_tags(ds, port)) return DSA_TAG_PROTO_NONE; /* Broadcom BCM58xx chips have a flow accelerator on Port 8 diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index f412aad58253..2dead7fa1f93 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -249,7 +249,6 @@ static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg) return -EIO; } -/* Wait a while until mask & reg == value. Otherwise return timeout. */ static int lan9303_read_wait(struct lan9303 *chip, int offset, u32 mask) { int i; @@ -480,7 +479,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) { int reg; - /* depending on the 'phy_addr_sel_strap' setting, the three phys are + /* Calculate chip->phy_addr_base: + * Depending on the 'phy_addr_sel_strap' setting, the three phys are * using IDs 0-1-2 or IDs 1-2-3. We cannot read back the * 'phy_addr_sel_strap' setting directly, so we need a test, which * configuration is active: @@ -495,13 +495,10 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return reg; } - if ((reg != 0) && (reg != 0xffff)) - chip->phy_addr_sel_strap = 1; - else - chip->phy_addr_sel_strap = 0; + chip->phy_addr_base = reg != 0 && reg != 0xffff; dev_dbg(chip->dev, "Phy setup '%s' detected\n", - chip->phy_addr_sel_strap ? "1-2-3" : "0-1-2"); + chip->phy_addr_base ? "1-2-3" : "0-1-2"); return 0; } @@ -541,20 +538,19 @@ lan9303_alr_cache_find_mac(struct lan9303 *chip, const u8 *mac_addr) return NULL; } -/* Wait a while until mask & reg == value. Otherwise return timeout. */ -static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, - int mask, char value) +static int lan9303_csr_reg_wait(struct lan9303 *chip, int regno, u32 mask) { int i; - for (i = 0; i < 0x1000; i++) { + for (i = 0; i < 25; i++) { u32 reg; lan9303_read_switch_reg(chip, regno, ®); - if ((reg & mask) == value) + if (!(reg & mask)) return 0; usleep_range(1000, 2000); } + return -ETIMEDOUT; } @@ -564,8 +560,7 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1) lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_WR_DAT_1, dat1); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, LAN9303_ALR_CMD_MAKE_ENTRY); - lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND, - 0); + lan9303_csr_reg_wait(chip, LAN9303_SWE_ALR_CMD_STS, ALR_STS_MAKE_PEND); lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0); return 0; @@ -870,7 +865,7 @@ static int lan9303_check_device(struct lan9303 *chip) if ((reg >> 16) != LAN9303_CHIP_ID) { dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n", reg >> 16); - return ret; + return -ENODEV; } /* The default state of the LAN9303 device is to forward packets between @@ -1022,7 +1017,7 @@ static int lan9303_get_sset_count(struct dsa_switch *ds) static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) { struct lan9303 *chip = ds->priv; - int phy_base = chip->phy_addr_sel_strap; + int phy_base = chip->phy_addr_base; if (phy == phy_base) return lan9303_virt_phy_reg_read(chip, regnum); @@ -1036,7 +1031,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val) { struct lan9303 *chip = ds->priv; - int phy_base = chip->phy_addr_sel_strap; + int phy_base = chip->phy_addr_base; if (phy == phy_base) return lan9303_virt_phy_reg_write(chip, regnum, val); @@ -1073,7 +1068,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, res = lan9303_phy_write(ds, port, MII_BMCR, ctl); - if (port == chip->phy_addr_sel_strap) { + if (port == chip->phy_addr_base) { /* Virtual Phy: Remove Turbo 200Mbit mode */ lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl); @@ -1097,8 +1092,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, struct lan9303 *chip = ds->priv; lan9303_disable_processing_port(chip, port); - lan9303_phy_write(ds, chip->phy_addr_sel_strap + port, - MII_BMCR, BMCR_PDOWN); + lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); } static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, @@ -1286,13 +1280,16 @@ static const struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { + int base; + chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS); if (!chip->ds) return -ENOMEM; chip->ds->priv = chip; chip->ds->ops = &lan9303_switch_ops; - chip->ds->phys_mii_mask = chip->phy_addr_sel_strap ? 0xe : 0x7; + base = chip->phy_addr_base; + chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base); return dsa_register_switch(chip->ds); } diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index f4e13a7014bd..36c8950dbd2d 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -602,7 +602,7 @@ struct vortex_private { struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ - unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + unsigned int dirty_tx; /* The ring entries to be free()ed. */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ @@ -618,7 +618,6 @@ struct vortex_private { /* The remainder are related to chip state, mostly media selection. */ struct timer_list timer; /* Media selection timer. */ - struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ int options; /* User-settable misc. driver options. */ unsigned int media_override:4, /* Passed-in media type. */ default_media:4, /* Read from the EEPROM/Wn3_Config. */ @@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *vp, int phy_id, int location, int value); static void vortex_timer(struct timer_list *t); -static void rx_oom_timer(struct timer_list *t); static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, @@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev) timer_setup(&vp->timer, vortex_timer, 0); mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); - timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0); if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", @@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev) window_write16(vp, 0x0040, 4, Wn4_NetDiag); if (vp->full_bus_master_rx) { /* Boomerang bus master. */ - vp->cur_rx = vp->dirty_rx = 0; + vp->cur_rx = 0; /* Initialize the RxEarly register as recommended. */ iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); iowrite32(0x0020, ioaddr + PktStatus); @@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev) struct vortex_private *vp = netdev_priv(dev); int i; int retval; + dma_addr_t dma; /* Use the now-standard shared IRQ implementation. */ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? @@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev) break; /* Bad news! */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ - vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); + dma = pci_map_single(VORTEX_PCI(vp), skb->data, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) + break; + vp->rx_ring[i].addr = cpu_to_le32(dma); } if (i != RX_RING_SIZE) { pr_emerg("%s: no memory for rx ring\n", dev->name); @@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) int len = (skb->len + 3) & ~3; vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + spin_lock_irq(&vp->window_lock); window_set(vp, 7); iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); @@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev) int entry = vp->cur_rx % RX_RING_SIZE; void __iomem *ioaddr = vp->ioaddr; int rx_status; - int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; + int rx_work_limit = RX_RING_SIZE; if (vortex_debug > 5) pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); @@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev) } else { /* The packet length: up to 4.5K!. */ int pkt_len = rx_status & 0x1fff; - struct sk_buff *skb; + struct sk_buff *skb, *newskb; + dma_addr_t newdma; dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); if (vortex_debug > 4) @@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev) pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_copy++; } else { + /* Pre-allocate the replacement skb. If it or its + * mapping fails then recycle the buffer thats already + * in place + */ + newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); + if (!newskb) { + dev->stats.rx_dropped++; + goto clear_complete; + } + newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, + PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { + dev->stats.rx_dropped++; + consume_skb(newskb); + goto clear_complete; + } + /* Pass up the skbuff already on the Rx ring. */ skb = vp->rx_skbuff[entry]; - vp->rx_skbuff[entry] = NULL; + vp->rx_skbuff[entry] = newskb; + vp->rx_ring[entry].addr = cpu_to_le32(newdma); skb_put(skb, pkt_len); pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); vp->rx_nocopy++; @@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev) netif_rx(skb); dev->stats.rx_packets++; } - entry = (++vp->cur_rx) % RX_RING_SIZE; - } - /* Refill the Rx ring buffers. */ - for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { - struct sk_buff *skb; - entry = vp->dirty_rx % RX_RING_SIZE; - if (vp->rx_skbuff[entry] == NULL) { - skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); - if (skb == NULL) { - static unsigned long last_jif; - if (time_after(jiffies, last_jif + 10 * HZ)) { - pr_warn("%s: memory shortage\n", - dev->name); - last_jif = jiffies; - } - if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) - mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); - break; /* Bad news! */ - } - vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); - vp->rx_skbuff[entry] = skb; - } +clear_complete: vp->rx_ring[entry].status = 0; /* Clear complete bit. */ iowrite16(UpUnstall, ioaddr + EL3_CMD); + entry = (++vp->cur_rx) % RX_RING_SIZE; } return 0; } -/* - * If we've hit a total OOM refilling the Rx ring we poll once a second - * for some memory. Otherwise there is no way to restart the rx process. - */ -static void -rx_oom_timer(struct timer_list *t) -{ - struct vortex_private *vp = from_timer(vp, t, rx_oom_timer); - struct net_device *dev = vp->mii.dev; - - spin_lock_irq(&vp->lock); - if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ - boomerang_rx(dev); - if (vortex_debug > 1) { - pr_debug("%s: rx_oom_timer %s\n", dev->name, - ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); - } - spin_unlock_irq(&vp->lock); -} - static void vortex_down(struct net_device *dev, int final_down) { @@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down) netdev_reset_queue(dev); netif_stop_queue(dev); - del_timer_sync(&vp->rx_oom_timer); del_timer_sync(&vp->timer); /* Turn off statistics ASAP. We update dev->stats below. */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index c60421339a98..d50519ed7549 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -170,6 +170,7 @@ source "drivers/net/ethernet/sis/Kconfig" source "drivers/net/ethernet/sfc/Kconfig" source "drivers/net/ethernet/sgi/Kconfig" source "drivers/net/ethernet/smsc/Kconfig" +source "drivers/net/ethernet/socionext/Kconfig" source "drivers/net/ethernet/stmicro/Kconfig" source "drivers/net/ethernet/sun/Kconfig" source "drivers/net/ethernet/tehuti/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 39f6273358ed..6cf5aded9423 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -82,6 +82,7 @@ obj-$(CONFIG_SFC) += sfc/ obj-$(CONFIG_SFC_FALCON) += sfc/falcon/ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ +obj-$(CONFIG_NET_VENDOR_SOCIONEXT) += socionext/ obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ obj-$(CONFIG_NET_VENDOR_SUN) += sun/ obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index b11e573ad57a..ea149c134e15 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -504,3 +504,14 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) return 0; } + +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq) +{ + struct ena_eth_io_rx_cdesc_base *cdesc; + + cdesc = ena_com_get_next_rx_cdesc(io_cq); + if (cdesc) + return false; + else + return true; +} diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index bb53c3a4f8e9..2f7657227cfe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -88,6 +88,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id); +bool ena_com_cq_empty(struct ena_com_io_cq *io_cq); + static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq, struct ena_eth_io_intr_reg *intr_reg) { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 97c5a89a9cf7..6975150d144e 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq; MODULE_DEVICE_TABLE(pci, ena_pci_tbl); static int ena_rss_init_default(struct ena_adapter *adapter); +static void check_for_admin_com_state(struct ena_adapter *adapter); +static void ena_destroy_device(struct ena_adapter *adapter); +static int ena_restore_device(struct ena_adapter *adapter); static void ena_tx_timeout(struct net_device *dev) { @@ -158,6 +161,8 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, ring->per_napi_packets = 0; ring->per_napi_bytes = 0; ring->cpu = 0; + ring->first_interrupt = false; + ring->no_interrupt_event_cnt = 0; u64_stats_init(&ring->syncp); } @@ -1274,6 +1279,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) { struct ena_napi *ena_napi = data; + ena_napi->tx_ring->first_interrupt = true; + ena_napi->rx_ring->first_interrupt = true; + napi_schedule_irqoff(&ena_napi->napi); return IRQ_HANDLED; @@ -1565,7 +1573,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) static int ena_up_complete(struct ena_adapter *adapter) { - int rc, i; + int rc; rc = ena_rss_configure(adapter); if (rc) @@ -1584,17 +1592,6 @@ static int ena_up_complete(struct ena_adapter *adapter) ena_napi_enable_all(adapter); - /* Enable completion queues interrupt */ - for (i = 0; i < adapter->num_queues; i++) - ena_unmask_interrupt(&adapter->tx_ring[i], - &adapter->rx_ring[i]); - - /* schedule napi in case we had pending packets - * from the last time we disable napi - */ - for (i = 0; i < adapter->num_queues; i++) - napi_schedule(&adapter->ena_napi[i].napi); - return 0; } @@ -1731,7 +1728,7 @@ create_err: static int ena_up(struct ena_adapter *adapter) { - int rc; + int rc, i; netdev_dbg(adapter->netdev, "%s\n", __func__); @@ -1774,6 +1771,17 @@ static int ena_up(struct ena_adapter *adapter) set_bit(ENA_FLAG_DEV_UP, &adapter->flags); + /* Enable completion queues interrupt */ + for (i = 0; i < adapter->num_queues; i++) + ena_unmask_interrupt(&adapter->tx_ring[i], + &adapter->rx_ring[i]); + + /* schedule napi in case we had pending packets + * from the last time we disable napi + */ + for (i = 0; i < adapter->num_queues; i++) + napi_schedule(&adapter->ena_napi[i].napi); + return rc; err_up: @@ -1884,6 +1892,17 @@ static int ena_close(struct net_device *netdev) if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) ena_down(adapter); + /* Check for device status and issue reset if needed*/ + check_for_admin_com_state(adapter); + if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + netif_err(adapter, ifdown, adapter->netdev, + "Destroy failure, restarting device\n"); + ena_dump_stats_to_dmesg(adapter); + /* rtnl lock already obtained in dev_ioctl() layer */ + ena_destroy_device(adapter); + ena_restore_device(adapter); + } + return 0; } @@ -2544,11 +2563,12 @@ static void ena_destroy_device(struct ena_adapter *adapter) ena_com_set_admin_running_state(ena_dev, false); - ena_close(netdev); + if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + ena_down(adapter); /* Before releasing the ENA resources, a device reset is required. * (to prevent the device from accessing them). - * In case the reset flag is set and the device is up, ena_close + * In case the reset flag is set and the device is up, ena_down() * already perform the reset, so it can be skipped. */ if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) @@ -2648,8 +2668,32 @@ static void ena_fw_reset_device(struct work_struct *work) rtnl_unlock(); } -static int check_missing_comp_in_queue(struct ena_adapter *adapter, - struct ena_ring *tx_ring) +static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, + struct ena_ring *rx_ring) +{ + if (likely(rx_ring->first_interrupt)) + return 0; + + if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) + return 0; + + rx_ring->no_interrupt_event_cnt++; + + if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { + netif_err(adapter, rx_err, adapter->netdev, + "Potential MSIX issue on Rx side Queue = %d. Reset the device\n", + rx_ring->qid); + adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; + smp_mb__before_atomic(); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + return -EIO; + } + + return 0; +} + +static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, + struct ena_ring *tx_ring) { struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; @@ -2659,8 +2703,27 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; last_jiffies = tx_buf->last_jiffies; - if (unlikely(last_jiffies && - time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) { + + if (last_jiffies == 0) + /* no pending Tx at this location */ + continue; + + if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + + 2 * adapter->missing_tx_completion_to))) { + /* If after graceful period interrupt is still not + * received, we schedule a reset + */ + netif_err(adapter, tx_err, adapter->netdev, + "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", + tx_ring->qid); + adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT; + smp_mb__before_atomic(); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + return -EIO; + } + + if (unlikely(time_is_before_jiffies(last_jiffies + + adapter->missing_tx_completion_to))) { if (!tx_buf->print_once) netif_notice(adapter, tx_err, adapter->netdev, "Found a Tx that wasn't completed on time, qid %d, index %d.\n", @@ -2689,9 +2752,10 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, return rc; } -static void check_for_missing_tx_completions(struct ena_adapter *adapter) +static void check_for_missing_completions(struct ena_adapter *adapter) { struct ena_ring *tx_ring; + struct ena_ring *rx_ring; int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ @@ -2710,8 +2774,13 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { tx_ring = &adapter->tx_ring[i]; + rx_ring = &adapter->rx_ring[i]; + + rc = check_missing_comp_in_tx_queue(adapter, tx_ring); + if (unlikely(rc)) + return; - rc = check_missing_comp_in_queue(adapter, tx_ring); + rc = check_for_rx_interrupt_queue(adapter, rx_ring); if (unlikely(rc)) return; @@ -2870,7 +2939,7 @@ static void ena_timer_service(struct timer_list *t) check_for_admin_com_state(adapter); - check_for_missing_tx_completions(adapter); + check_for_missing_completions(adapter); check_for_empty_rx_ring(adapter); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 3bbc003871de..f1972b5ab650 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -44,7 +44,7 @@ #include "ena_eth_com.h" #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 3 +#define DRV_MODULE_VER_MINOR 5 #define DRV_MODULE_VER_SUBMINOR 0 #define DRV_MODULE_NAME "ena" @@ -122,6 +122,7 @@ * We wait for 6 sec just to be on the safe side. */ #define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ) +#define ENA_MAX_NO_INTERRUPT_ITERATIONS 3 #define ENA_MMIO_DISABLE_REG_READ BIT(0) @@ -236,6 +237,9 @@ struct ena_ring { /* The maximum header length the device can handle */ u8 tx_max_header_size; + bool first_interrupt; + u16 no_interrupt_event_cnt; + /* cpu for TPH */ int cpu; /* number of tx/rx_buffer_info's entries */ diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h index 9aec43c5bba8..48ca97fbe7bc 100644 --- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h @@ -60,6 +60,8 @@ enum ena_regs_reset_reason_types { ENA_REGS_RESET_USER_TRIGGER = 12, ENA_REGS_RESET_GENERIC = 13, + + ENA_REGS_RESET_MISS_INTERRUPT = 14, }; /* ena_registers offsets */ diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 1fbbbabe7588..14a59e51db67 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2128,27 +2128,25 @@ static int bcm_enetsw_open(struct net_device *dev) /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } - memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); - p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } - memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 087f01b4dc3a..f15a8fc6dfc9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1216,18 +1216,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, goto out; } - /* The Ethernet switch we are interfaced with needs packets to be at - * least 64 bytes (including FCS) otherwise they will be discarded when - * they enter the switch port logic. When Broadcom tags are enabled, we - * need to make sure that packets are at least 68 bytes - * (including FCS and tag) because the length verification is done after - * the Broadcom tag is stripped off the ingress packet. - */ - if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) { - ret = NETDEV_TX_OK; - goto out; - } - /* Insert TSB and checksum infos */ if (priv->tsb_en) { skb = bcm_sysport_insert_tsb(skb, dev); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 1d96cd594ade..8eef9fb6b1fe 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -128,8 +128,6 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, dma_desc->ctl1 = cpu_to_le32(ctl1); } -#define ENET_BRCM_TAG_LEN 4 - static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) @@ -142,18 +140,6 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, u32 flags; int i; - /* The Ethernet switch we are interfaced with needs packets to be at - * least 64 bytes (including FCS) otherwise they will be discarded when - * they enter the switch port logic. When Broadcom tags are enabled, we - * need to make sure that packets are at least 68 bytes - * (including FCS and tag) because the length verification is done after - * the Broadcom tag is stripped off the ingress packet. - */ - if (netdev_uses_dsa(net_dev)) { - if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) - goto err_stats; - } - if (skb->len > BGMAC_DESC_CTL1_LEN) { netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; @@ -240,7 +226,6 @@ err_dma_head: err_drop: dev_kfree_skb(skb); -err_stats: net_dev->stats.tx_dropped++; net_dev->stats.tx_errors++; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 01b7f2fc249c..d7c98e807ca8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -738,8 +738,9 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum); break; default: - WARN_ONCE(1, "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", - be16_to_cpu(skb->protocol)); + netdev_WARN_ONCE(bp->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + be16_to_cpu(skb->protocol)); } } #endif @@ -3029,7 +3030,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) del_timer_sync(&bp->timer); - if (IS_PF(bp)) { + if (IS_PF(bp) && !BP_NOMCP(bp)) { /* Set ALWAYS_ALIVE bit in shmem */ bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE; bnx2x_drv_pulse(bp); @@ -3115,7 +3116,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bp->cnic_loaded = false; /* Clear driver version indication in shmem */ - if (IS_PF(bp)) + if (IS_PF(bp) && !BP_NOMCP(bp)) bnx2x_update_mng_version(bp); /* Check if there are pending parity attentions. If there are - set diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 4d0654813de1..7b08323e3f3d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9578,6 +9578,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp) do { bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR); + + /* If we read all 0xFFs, means we are in PCI error state and + * should bail out to avoid crashes on adapter's FW reads. + */ + if (bp->common.shmem_base == 0xFFFFFFFF) { + bp->flags |= NO_MCP_FLAG; + return -ENODEV; + } + if (bp->common.shmem_base) { val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); if (val & SHR_MEM_VALIDITY_MB) @@ -14322,7 +14331,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) BNX2X_ERR("IO slot reset --> driver unload\n"); /* MCP should have been reset; Need to wait for validity */ - bnx2x_init_shmem(bp); + if (bnx2x_init_shmem(bp)) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) { u32 v; diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 59c8ec9c1cad..7c560d545c03 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9efbdc6f1fcb..cf6ebf1e324b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1645,6 +1645,8 @@ next_rx: rxr->rx_next_cons = NEXT_RX(cons); next_rx_no_prod: + cpr->rx_packets += 1; + cpr->rx_bytes += len; *raw_cons = tmp_raw_cons; return rc; @@ -1802,6 +1804,7 @@ static irqreturn_t bnxt_msix(int irq, void *dev_instance) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; u32 cons = RING_CMP(cpr->cp_raw_cons); + cpr->event_ctr++; prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]); napi_schedule(&bnapi->napi); return IRQ_HANDLED; @@ -2025,6 +2028,15 @@ static int bnxt_poll(struct napi_struct *napi, int budget) break; } } + if (bp->flags & BNXT_FLAG_DIM) { + struct net_dim_sample dim_sample; + + net_dim_sample(cpr->event_ctr, + cpr->rx_packets, + cpr->rx_bytes, + &dim_sample); + net_dim(&cpr->dim, dim_sample); + } mmiowb(); return work_done; } @@ -2247,6 +2259,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp) if (rxr->xdp_prog) bpf_prog_put(rxr->xdp_prog); + if (xdp_rxq_info_is_reg(&rxr->xdp_rxq)) + xdp_rxq_info_unreg(&rxr->xdp_rxq); + kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -2280,6 +2295,10 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) ring = &rxr->rx_ring_struct; + rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i); + if (rc < 0) + return rc; + rc = bnxt_alloc_ring(bp, ring); if (rc) return rc; @@ -2610,6 +2629,8 @@ static void bnxt_init_cp_rings(struct bnxt *bp) struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; ring->fw_ring_id = INVALID_HW_RING_ID; + cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks; + cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs; } } @@ -2834,6 +2855,9 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->cp_ring_mask = bp->cp_bit - 1; } +/* Changing allocation mode of RX rings. + * TODO: Update when extending xdp_rxq_info to support allocation modes. + */ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { if (page_mode) { @@ -4583,6 +4607,36 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, req->flags = cpu_to_le16(flags); } +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_coal coal; + unsigned int grp_idx; + + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal)); + + coal.coal_ticks = cpr->rx_ring_coal.coal_ticks; + coal.coal_bufs = cpr->rx_ring_coal.coal_bufs; + + if (!bnapi->rx_ring) + return -ENODEV; + + bnxt_hwrm_cmd_hdr_init(bp, &req_rx, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + + bnxt_hwrm_set_coal_params(&coal, &req_rx); + + grp_idx = bnapi->index; + req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + + return hwrm_send_message(bp, &req_rx, sizeof(req_rx), + HWRM_CMD_TIMEOUT); +} + int bnxt_hwrm_set_coal(struct bnxt *bp) { int i, rc = 0; @@ -5705,7 +5759,13 @@ static void bnxt_enable_napi(struct bnxt *bp) int i; for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; bp->bnapi[i]->in_reset = false; + + if (bp->bnapi[i]->rx_ring) { + INIT_WORK(&cpr->dim.work, bnxt_dim_work); + cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } napi_enable(&bp->bnapi[i]->napi); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5359a1f0045f..89887a88b1bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -23,6 +23,8 @@ #include <net/devlink.h> #include <net/dst_metadata.h> #include <net/switchdev.h> +#include <net/xdp.h> +#include <linux/net_dim.h> struct tx_bd { __le32 tx_bd_len_flags_type; @@ -607,6 +609,17 @@ struct bnxt_tx_ring_info { struct bnxt_ring_struct tx_ring_struct; }; +struct bnxt_coal { + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + /* RING_IDLE enabled when coal ticks < idle_thresh */ + u16 idle_thresh; + u8 bufs_per_record; + u8 budget; +}; + struct bnxt_tpa_info { void *data; u8 *data_ptr; @@ -664,12 +677,20 @@ struct bnxt_rx_ring_info { struct bnxt_ring_struct rx_ring_struct; struct bnxt_ring_struct rx_agg_ring_struct; + struct xdp_rxq_info xdp_rxq; }; struct bnxt_cp_ring_info { u32 cp_raw_cons; void __iomem *cp_doorbell; + struct bnxt_coal rx_ring_coal; + u64 rx_packets; + u64 rx_bytes; + u64 event_ctr; + + struct net_dim dim; + struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; @@ -944,17 +965,6 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 -struct bnxt_coal { - u16 coal_ticks; - u16 coal_ticks_irq; - u16 coal_bufs; - u16 coal_bufs_irq; - /* RING_IDLE enabled when coal ticks < idle_thresh */ - u16 idle_thresh; - u8 bufs_per_record; - u8 budget; -}; - struct bnxt_tc_flow_stats { u64 packets; u64 bytes; @@ -1126,6 +1136,7 @@ struct bnxt { #define BNXT_FLAG_DOUBLE_DB 0x400000 #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 + #define BNXT_FLAG_DIM 0x2000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1423,4 +1434,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); +void bnxt_dim_work(struct work_struct *work); +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi); + #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index fed37cd9ae1d..3c746f2d9ed8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -278,12 +278,11 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); + data = dma_zalloc_coherent(&bp->pdev->dev, data_len, &mapping, + GFP_KERNEL); if (!data) return -ENOMEM; - memset(data, 0, data_len); bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); get.dest_data_addr = cpu_to_le64(mapping); get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c new file mode 100644 index 000000000000..408dd190331e --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dim.c @@ -0,0 +1,32 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017-2018 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <linux/net_dim.h> +#include "bnxt_hsi.h" +#include "bnxt.h" + +void bnxt_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, + work); + struct bnxt_cp_ring_info *cpr = container_of(dim, + struct bnxt_cp_ring_info, + dim); + struct bnxt_napi *bnapi = container_of(cpr, + struct bnxt_napi, + cp_ring); + struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, + dim->profile_ix); + + cpr->rx_ring_coal.coal_ticks = cur_profile.usec; + cpr->rx_ring_coal.coal_bufs = cur_profile.pkts; + + bnxt_hwrm_set_ring_coal(bnapi->bp, bnapi); + dim->state = NET_DIM_START_MEASURE; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index fe7599f404bf..1801582076be 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -49,6 +49,8 @@ static int bnxt_get_coalesce(struct net_device *dev, memset(coal, 0, sizeof(*coal)); + coal->use_adaptive_rx_coalesce = bp->flags & BNXT_FLAG_DIM; + hw_coal = &bp->rx_coal; mult = hw_coal->bufs_per_record; coal->rx_coalesce_usecs = hw_coal->coal_ticks; @@ -77,6 +79,15 @@ static int bnxt_set_coalesce(struct net_device *dev, int rc = 0; u16 mult; + if (coal->use_adaptive_rx_coalesce) { + bp->flags |= BNXT_FLAG_DIM; + } else { + if (bp->flags & BNXT_FLAG_DIM) { + bp->flags &= ~(BNXT_FLAG_DIM); + goto reset_coalesce; + } + } + hw_coal = &bp->rx_coal; mult = hw_coal->bufs_per_record; hw_coal->coal_ticks = coal->rx_coalesce_usecs; @@ -104,6 +115,7 @@ static int bnxt_set_coalesce(struct net_device *dev, update_stats = true; } +reset_coalesce: if (netif_running(dev)) { if (update_stats) { rc = bnxt_close_nic(bp, true, false); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 5ee18660bc33..c9617675f934 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); return -EINVAL; } - if (vf_id >= bp->pf.max_vfs) { + if (vf_id >= bp->pf.active_vfs) { netdev_err(bp->dev, "Invalid VF id %d\n", vf_id); return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 2ae5ed151369..1d9b08c20f95 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -416,7 +416,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ - if (is_wildcard(&l3_mask, sizeof(l3_mask)) && + if (is_wildcard(l3_mask, sizeof(*l3_mask)) && is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) { flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2; } else { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 261e5847557a..1389ab5e05df 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -96,6 +96,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, xdp.data = *data_ptr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = *data_ptr + *len; + xdp.rxq = &rxr->xdp_rxq; orig_data = xdp.data; mapping = rx_buf->mapping - bp->rx_dma_offset; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d09c5a9c53b5..a77ee2f8fb8d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4,11 +4,13 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2014 Broadcom Corporation. + * Copyright (C) 2005-2016 Broadcom Corporation. + * Copyright (C) 2016-2017 Broadcom Limited. * * Firmware is: * Derived from proprietary unpublished source code, - * Copyright (C) 2000-2003 Broadcom Corporation. + * Copyright (C) 2000-2016 Broadcom Corporation. + * Copyright (C) 2016-2017 Broadcom Ltd. * * Permission is hereby granted for the distribution of this firmware * data in hexadecimal or equivalent format, provided this copyright @@ -3225,7 +3227,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp, return 0; } -#define NVRAM_CMD_TIMEOUT 5000 +#define NVRAM_CMD_TIMEOUT 10000 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) { @@ -10052,6 +10054,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy) tw32(GRC_MODE, tp->grc_mode | val); + /* On one of the AMD platform, MRRS is restricted to 4000 because of + * south bridge limitation. As a workaround, Driver is setting MRRS + * to 2048 instead of default 4096. + */ + if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) { + val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK; + tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048); + } + /* Setup the timer prescalar register. Clock is always 66Mhz. */ val = tr32(GRC_MISC_CFG); val &= ~0xff; @@ -14227,7 +14239,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu) */ if (tg3_asic_rev(tp) == ASIC_REV_57766 || tg3_asic_rev(tp) == ASIC_REV_5717 || - tg3_asic_rev(tp) == ASIC_REV_5719) + tg3_asic_rev(tp) == ASIC_REV_5719 || + tg3_asic_rev(tp) == ASIC_REV_5720) reset_phy = true; err = tg3_restart_hw(tp, reset_phy); @@ -14776,7 +14789,7 @@ static void tg3_get_5717_nvram_info(struct tg3 *tp) static void tg3_get_5720_nvram_info(struct tg3 *tp) { - u32 nvcfg1, nvmpinstrp; + u32 nvcfg1, nvmpinstrp, nv_status; nvcfg1 = tr32(NVRAM_CFG1); nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; @@ -14788,6 +14801,23 @@ static void tg3_get_5720_nvram_info(struct tg3 *tp) } switch (nvmpinstrp) { + case FLASH_5762_MX25L_100: + case FLASH_5762_MX25L_200: + case FLASH_5762_MX25L_400: + case FLASH_5762_MX25L_800: + case FLASH_5762_MX25L_160_320: + tp->nvram_pagesize = 4096; + tp->nvram_jedecnum = JEDEC_MACRONIX; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tg3_flag_set(tp, FLASH); + nv_status = tr32(NVRAM_AUTOSENSE_STATUS); + tp->nvram_size = + (1 << (nv_status >> AUTOSENSE_DEVID & + AUTOSENSE_DEVID_MASK) + << AUTOSENSE_SIZE_IN_MB); + return; + case FLASH_5762_EEPROM_HD: nvmpinstrp = FLASH_5720_EEPROM_HD; break; diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index c2d02d02d1e6..47f51cc0566d 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -5,7 +5,8 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2007-2014 Broadcom Corporation. + * Copyright (C) 2007-2016 Broadcom Corporation. + * Copyright (C) 2016-2017 Broadcom Limited. */ #ifndef _T3_H @@ -96,6 +97,7 @@ #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106 #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109 #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a +#define TG3PCI_SUBDEVICE_ID_DELL_5762 0x07f0 #define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a @@ -281,6 +283,9 @@ #define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ #define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ /* 0xa8 --> 0xb8 unused */ +#define TG3PCI_DEV_STATUS_CTRL 0x000000b4 +#define MAX_READ_REQ_SIZE_2048 0x00004000 +#define MAX_READ_REQ_MASK 0x00007000 #define TG3PCI_DUAL_MAC_CTRL 0x000000b8 #define DUAL_MAC_CTRL_CH_MASK 0x00000003 #define DUAL_MAC_CTRL_ID 0x00000004 @@ -1858,7 +1863,7 @@ #define NVRAM_STAT 0x00007004 #define NVRAM_WRDATA 0x00007008 #define NVRAM_ADDR 0x0000700c -#define NVRAM_ADDR_MSK 0x00ffffff +#define NVRAM_ADDR_MSK 0x07ffffff #define NVRAM_RDDATA 0x00007010 #define NVRAM_CFG1 0x00007014 #define NVRAM_CFG1_FLASHIF_ENAB 0x00000001 @@ -1940,6 +1945,11 @@ #define FLASH_5720_EEPROM_LD 0x00000003 #define FLASH_5762_EEPROM_HD 0x02000001 #define FLASH_5762_EEPROM_LD 0x02000003 +#define FLASH_5762_MX25L_100 0x00800000 +#define FLASH_5762_MX25L_200 0x00800002 +#define FLASH_5762_MX25L_400 0x00800001 +#define FLASH_5762_MX25L_800 0x00800003 +#define FLASH_5762_MX25L_160_320 0x03800002 #define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000 #define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002 #define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001 @@ -2004,7 +2014,11 @@ /* 0x702c unused */ #define NVRAM_ADDR_LOCKOUT 0x00007030 -/* 0x7034 --> 0x7500 unused */ +#define NVRAM_AUTOSENSE_STATUS 0x00007038 +#define AUTOSENSE_DEVID 0x00000010 +#define AUTOSENSE_DEVID_MASK 0x00000007 +#define AUTOSENSE_SIZE_IN_MB 17 +/* 0x703c --> 0x7500 unused */ #define OTP_MODE 0x00007500 #define OTP_MODE_OTP_THRU_GRC 0x00000001 @@ -3373,6 +3387,7 @@ struct tg3 { #define JEDEC_ST 0x20 #define JEDEC_SAIFUN 0x4f #define JEDEC_SST 0xbf +#define JEDEC_MACRONIX 0xc2 #define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB #define ATMEL_AT24C02_PAGE_SIZE (8) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 2c615ab09e64..f38abf626412 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -702,12 +702,10 @@ static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, size = octdevsize + priv_size + configsize + (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE); - buf = vmalloc(size); + buf = vzalloc(size); if (!buf) return NULL; - memset(buf, 0, size); - oct = (struct octeon_device *)buf; oct->priv = (void *)(buf + octdevsize); oct->chip = (void *)(buf + octdevsize + priv_size); @@ -840,10 +838,9 @@ octeon_allocate_ioq_vector(struct octeon_device *oct) size = sizeof(struct octeon_ioq_vector) * num_ioqs; - oct->ioq_vector = vmalloc(size); + oct->ioq_vector = vzalloc(size); if (!oct->ioq_vector) return 1; - memset(oct->ioq_vector, 0, size); for (i = 0; i < num_ioqs; i++) { ioq_vector = &oct->ioq_vector[i]; ioq_vector->oct_dev = oct; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 52b3a6044f85..21618d0d694f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -521,7 +521,7 @@ static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr) static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, struct cqe_rx_t *cqe_rx, struct snd_queue *sq, - struct sk_buff **skb) + struct rcv_queue *rq, struct sk_buff **skb) { struct xdp_buff xdp; struct page *page; @@ -545,6 +545,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, xdp.data = (void *)cpu_addr; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; rcu_read_lock(); @@ -698,7 +699,8 @@ static inline void nicvf_set_rxhash(struct net_device *netdev, static void nicvf_rcv_pkt_handler(struct net_device *netdev, struct napi_struct *napi, - struct cqe_rx_t *cqe_rx, struct snd_queue *sq) + struct cqe_rx_t *cqe_rx, + struct snd_queue *sq, struct rcv_queue *rq) { struct sk_buff *skb = NULL; struct nicvf *nic = netdev_priv(netdev); @@ -724,7 +726,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, /* For XDP, ignore pkts spanning multiple pages */ if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) { /* Packet consumed by XDP */ - if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb)) + if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb)) return; } else { skb = nicvf_get_rcv_skb(snic, cqe_rx, @@ -781,6 +783,7 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, struct cqe_rx_t *cq_desc; struct netdev_queue *txq; struct snd_queue *sq = &qs->sq[cq_idx]; + struct rcv_queue *rq = &qs->rq[cq_idx]; unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx; spin_lock_bh(&cq->lock); @@ -811,7 +814,7 @@ loop: switch (cq_desc->cqe_type) { case CQE_TYPE_RX: - nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq); + nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq); work_done++; break; case CQE_TYPE_SEND: diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index f38ea349aa00..14e62c6ac342 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -760,6 +760,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, if (!rq->enable) { nicvf_reclaim_rcv_queue(nic, qs, qidx); + xdp_rxq_info_unreg(&rq->xdp_rxq); return; } @@ -772,6 +773,9 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, /* all writes of RBDR data to be loaded into L2 Cache as well*/ rq->caching = 1; + /* Driver have no proper error path for failed XDP RX-queue info reg */ + WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx) < 0); + /* Send a mailbox msg to PF to config RQ */ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 178ab6e8e3c5..7d1e4e2aaad0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -12,6 +12,7 @@ #include <linux/netdevice.h> #include <linux/iommu.h> #include <linux/bpf.h> +#include <net/xdp.h> #include "q_struct.h" #define MAX_QUEUE_SET 128 @@ -255,6 +256,7 @@ struct rcv_queue { u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */ u8 caching; struct rx_tx_queue_stats stats; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct cmp_queue { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 2e71e334d819..b57acb8dc35b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -405,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ }; -static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = { - {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */ - {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ - -}; - -static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = { - {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */ - {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ +static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ + {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ + {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ + {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ + {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ + {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ + {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ + {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ + {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ +}; + +static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ + {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ + {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ + {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ + {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ + {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */ + {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */ + {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */ + {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ + {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ }; static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index e8173ae32158..88e740082a02 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -21,6 +21,7 @@ /* Error codes */ #define CUDBG_STATUS_NO_MEM -19 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24 +#define CUDBG_STATUS_NOT_IMPLEMENTED -28 #define CUDBG_SYSTEM_ERROR -29 #define CUDBG_STATUS_CCLK_NOT_DEFINED -32 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 336670d00a52..0a3871f10787 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -2422,11 +2422,21 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + u32 local_offset, local_range; struct ireg_buf *up_cim; + u32 size, j, iter; + u32 instance = 0; int i, rc, n; - u32 size; - n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); + if (is_t5(padap->params.chip)) + n = sizeof(t5_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else if (is_t6(padap->params.chip)) + n = sizeof(t6_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else + return CUDBG_STATUS_NOT_IMPLEMENTED; + size = sizeof(struct ireg_buf) * n; rc = cudbg_get_buff(dbg_buff, size, &temp_buff); if (rc) @@ -2444,6 +2454,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, t5_up_cim_reg_array[i][2]; up_cim_reg->ireg_offset_range = t5_up_cim_reg_array[i][3]; + instance = t5_up_cim_reg_array[i][4]; } else if (is_t6(padap->params.chip)) { up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; @@ -2451,13 +2462,35 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, t6_up_cim_reg_array[i][2]; up_cim_reg->ireg_offset_range = t6_up_cim_reg_array[i][3]; + instance = t6_up_cim_reg_array[i][4]; } - rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset, - up_cim_reg->ireg_offset_range, buff); - if (rc) { - cudbg_put_buff(&temp_buff, dbg_buff); - return rc; + switch (instance) { + case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES: + iter = up_cim_reg->ireg_offset_range; + local_offset = 0x120; + local_range = 1; + break; + case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES: + iter = up_cim_reg->ireg_offset_range; + local_offset = 0x10; + local_range = 1; + break; + default: + iter = 1; + local_offset = 0; + local_range = up_cim_reg->ireg_offset_range; + break; + } + + for (j = 0; j < iter; j++, buff++) { + rc = t4_cim_read(padap, + up_cim_reg->ireg_local_offset + + (j * local_offset), local_range, buff); + if (rc) { + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } } up_cim++; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 69d0b64e6986..baa67d362051 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -345,7 +345,6 @@ struct adapter_params { unsigned int sf_size; /* serial flash size in bytes */ unsigned int sf_nsec; /* # of flash sectors */ - unsigned int sf_fw_start; /* start of FW image in flash */ unsigned int fw_vers; /* firmware version */ unsigned int bs_vers; /* bootstrap version */ @@ -826,6 +825,10 @@ struct mbox_list { struct list_head list; }; +struct mps_encap_entry { + atomic_t refcnt; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -840,6 +843,8 @@ struct adapter { enum chip_type chip; int msg_enable; + __be16 vxlan_port; + u8 vxlan_port_cnt; struct adapter_params params; struct cxgb4_virt_res vres; @@ -869,7 +874,10 @@ struct adapter { unsigned int clipt_start; unsigned int clipt_end; struct clip_tbl *clipt; + unsigned int rawf_start; + unsigned int rawf_cnt; struct smt_data *smt; + struct mps_encap_entry *mps_encap; struct cxgb4_uld_info *uld; void *uld_handle[CXGB4_ULD_MAX]; unsigned int num_uld; @@ -1306,6 +1314,7 @@ void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); +enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb); extern int dbfifo_int_thresh; #define for_each_port(adapter, iter) \ @@ -1638,6 +1647,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int mtu, int promisc, int all_multi, int bcast, int vlanex, bool sleep_ok); +int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok); +int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok); int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 581d628f01cc..a2d6c8a69c52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -274,7 +274,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct cudbg_ulptx_la); break; case CUDBG_UP_CIM_INDIRECT: - n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); + n = 0; + if (is_t5(adap->params.chip)) + n = sizeof(t5_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else if (is_t6(adap->params.chip)) + n = sizeof(t6_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); len = sizeof(struct ireg_buf) * n; break; case CUDBG_PBT_TABLE: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index d3ced0438474..4ea76c1411dc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1743,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v) */ if (lookup_type && (lookup_type != DATALKPTYPE_M)) { /* Inner header VNI */ - vniy = ((data2 & DATAVIDH2_F) << 23) | + vniy = (data2 & DATAVIDH2_F) | (DATAVIDH1_G(data2) << 16) | VIDL_G(val); dip_hit = data2 & DATADIPHIT_F; } else { @@ -1753,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v) port_num = DATAPORTNUM_G(data2); /* Read tcamx. Change the control param */ + vnix = 0; ctl |= CTLXYBITSEL_V(1); t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A); @@ -1761,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v) data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A); if (lookup_type && (lookup_type != DATALKPTYPE_M)) { /* Inner header VNI mask */ - vnix = ((data2 & DATAVIDH2_F) << 23) | + vnix = (data2 & DATAVIDH2_F) | (DATAVIDH1_G(data2) << 16) | VIDL_G(val); } } else { @@ -1834,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v) addr[1], addr[2], addr[3], addr[4], addr[5], (unsigned long long)mask, - vniy, vnix, dip_hit ? 'Y' : 'N', + vniy, (vnix | vniy), + dip_hit ? 'Y' : 'N', port_num, (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi), diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 541419b084ac..7852d98bad75 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, else return PORT_OTHER; } else if (port_type == FW_PORT_TYPE_KR4_100G || - port_type == FW_PORT_TYPE_KR_SFP28) { + port_type == FW_PORT_TYPE_KR_SFP28 || + port_type == FW_PORT_TYPE_KR_XLAUI) { return PORT_NONE; } @@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full); break; + case FW_PORT_TYPE_KR_XLAUI: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full); + break; + case FW_PORT_TYPE_CR2_QSFP: SET_LMM(FIBRE); SET_LMM(50000baseSR2_Full); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 5980f308a253..677a3ba83c1f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -694,7 +694,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) if (f->smt) cxgb4_smt_release(f->smt); - if (f->fs.hash && f->fs.type) + if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); /* The zeroing of the filter rule below clears the filter valid, @@ -1189,6 +1189,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); unsigned int max_fidx, fidx; struct filter_entry *f; u32 iconf; @@ -1225,12 +1226,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, * insertion. */ if (fs->type == 0) { /* IPv4 */ - /* If our IPv4 filter isn't being written to a - * multiple of four filter index and there's an IPv6 - * filter at the multiple of 4 base slot, then we - * prevent insertion. + /* For T6, If our IPv4 filter isn't being written to a + * multiple of two filter index and there's an IPv6 + * filter at the multiple of 2 base slot, then we need + * to delete that IPv6 filter ... + * For adapters below T6, IPv6 filter occupies 4 entries. + * Hence we need to delete the filter in multiple of 4 slot. */ - fidx = filter_id & ~0x3; + if (chip_ver < CHELSIO_T6) + fidx = filter_id & ~0x3; + else + fidx = filter_id & ~0x1; + if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) { f = &adapter->tids.ftid_tab[fidx]; @@ -1291,6 +1298,16 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, if (f->valid) clear_filter(adapter, f); + if (is_t6(adapter->params.chip) && fs->type && + ipv6_addr_type((const struct in6_addr *)fs->val.lip) != + IPV6_ADDR_ANY) { + ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1); + if (ret) { + cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6); + return ret; + } + } + /* Convert the filter specification into our internal format. * We copy the PF/VF specification into the Outer VLAN field * here so the rest of the code -- including the interface to diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 87ac1e4dafc1..3293980ce6e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -65,6 +65,7 @@ #include <net/addrconf.h> #include <linux/uaccess.h> #include <linux/crash_dump.h> +#include <net/udp_tunnel.h> #include "cxgb4.h" #include "cxgb4_filter.h" @@ -2987,6 +2988,151 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static void cxgb_del_udp_tunnel(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + int ret = 0, i; + + if (chip_ver < CHELSIO_T6) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!adapter->vxlan_port_cnt || + adapter->vxlan_port != ti->port) + return; /* Invalid VxLAN destination port */ + + adapter->vxlan_port_cnt--; + if (adapter->vxlan_port_cnt) + return; + + adapter->vxlan_port = 0; + t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0); + break; + default: + return; + } + + /* Matchall mac entries can be deleted only after all tunnel ports + * are brought down or removed. + */ + if (!adapter->rawf_cnt) + return; + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + ret = t4_free_raw_mac_filt(adapter, pi->viid, + match_all_mac, match_all_mac, + adapter->rawf_start + + pi->port_id, + 1, pi->port_id, true); + if (ret < 0) { + netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", + i); + return; + } + atomic_dec(&adapter->mps_encap[adapter->rawf_start + + pi->port_id].refcnt); + } +} + +static void cxgb_add_udp_tunnel(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); + u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + int i, ret; + + if (chip_ver < CHELSIO_T6) + return; + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + /* For T6 fw reserves last 2 entries for + * storing match all mac filter (config file entry). + */ + if (!adapter->rawf_cnt) + return; + + /* Callback for adding vxlan port can be called with the same + * port for both IPv4 and IPv6. We should not disable the + * offloading when the same port for both protocols is added + * and later one of them is removed. + */ + if (adapter->vxlan_port_cnt && + adapter->vxlan_port == ti->port) { + adapter->vxlan_port_cnt++; + return; + } + + /* We will support only one VxLAN port */ + if (adapter->vxlan_port_cnt) { + netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", + be16_to_cpu(adapter->vxlan_port), + be16_to_cpu(ti->port)); + return; + } + + adapter->vxlan_port = ti->port; + adapter->vxlan_port_cnt = 1; + + t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, + VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); + break; + default: + return; + } + + /* Create a 'match all' mac filter entry for inner mac, + * if raw mac interface is supported. Once the linux kernel provides + * driver entry points for adding/deleting the inner mac addresses, + * we will remove this 'match all' entry and fallback to adding + * exact match filters. + */ + if (adapter->rawf_cnt) { + for_each_port(adapter, i) { + pi = adap2pinfo(adapter, i); + + ret = t4_alloc_raw_mac_filt(adapter, pi->viid, + match_all_mac, + match_all_mac, + adapter->rawf_start + + pi->port_id, + 1, pi->port_id, true); + if (ret < 0) { + netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", + be16_to_cpu(ti->port)); + cxgb_del_udp_tunnel(netdev, ti); + return; + } + atomic_inc(&adapter->mps_encap[ret].refcnt); + } + } +} + +static netdev_features_t cxgb_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6) + return features; + + /* Check if hw supports offload for this packet */ + if (!skb->encapsulation || cxgb_encap_offload_supported(skb)) + return features; + + /* Offload is not supported for this encapsulated packet */ + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + static netdev_features_t cxgb_fix_features(struct net_device *dev, netdev_features_t features) { @@ -3018,6 +3164,9 @@ static const struct net_device_ops cxgb4_netdev_ops = { #endif /* CONFIG_CHELSIO_T4_FCOE */ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, + .ndo_udp_tunnel_add = cxgb_add_udp_tunnel, + .ndo_udp_tunnel_del = cxgb_del_udp_tunnel, + .ndo_features_check = cxgb_features_check, .ndo_fix_features = cxgb_fix_features, }; @@ -5080,6 +5229,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_TC; + + if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5) + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + if (highdma) netdev->hw_features |= NETIF_F_HIGHDMA; netdev->features |= netdev->hw_features; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 922f2f937789..eab781fab2a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -770,12 +770,19 @@ static inline unsigned int flits_to_desc(unsigned int n) * Returns whether an Ethernet packet is small enough to fit as * immediate data. Return value corresponds to headroom required. */ -static inline int is_eth_imm(const struct sk_buff *skb) +static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver) { - int hdrlen = skb_shinfo(skb)->gso_size ? - sizeof(struct cpl_tx_pkt_lso_core) : 0; + int hdrlen = 0; - hdrlen += sizeof(struct cpl_tx_pkt); + if (skb->encapsulation && skb_shinfo(skb)->gso_size && + chip_ver > CHELSIO_T5) { + hdrlen = sizeof(struct cpl_tx_tnl_lso); + hdrlen += sizeof(struct cpl_tx_pkt_core); + } else { + hdrlen = skb_shinfo(skb)->gso_size ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + hdrlen += sizeof(struct cpl_tx_pkt); + } if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) return hdrlen; return 0; @@ -788,10 +795,11 @@ static inline int is_eth_imm(const struct sk_buff *skb) * Returns the number of flits needed for a Tx WR for the given Ethernet * packet, including the needed WR and CPL headers. */ -static inline unsigned int calc_tx_flits(const struct sk_buff *skb) +static inline unsigned int calc_tx_flits(const struct sk_buff *skb, + unsigned int chip_ver) { unsigned int flits; - int hdrlen = is_eth_imm(skb); + int hdrlen = is_eth_imm(skb, chip_ver); /* If the skb is small enough, we can pump it out as a work request * with only immediate data. In that case we just have to have the @@ -810,13 +818,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * with an embedded TX Packet Write CPL message. */ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); - if (skb_shinfo(skb)->gso_size) - flits += (sizeof(struct fw_eth_tx_pkt_wr) + - sizeof(struct cpl_tx_pkt_lso_core) + - sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); - else + if (skb_shinfo(skb)->gso_size) { + if (skb->encapsulation && chip_ver > CHELSIO_T5) + hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_tnl_lso); + else + hdrlen = sizeof(struct fw_eth_tx_pkt_wr) + + sizeof(struct cpl_tx_pkt_lso_core); + + hdrlen += sizeof(struct cpl_tx_pkt_core); + flits += (hdrlen / sizeof(__be64)); + } else { flits += (sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + } return flits; } @@ -827,9 +842,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb) * Returns the number of Tx descriptors needed for the given Ethernet * packet, including the needed WR and CPL headers. */ -static inline unsigned int calc_tx_descs(const struct sk_buff *skb) +static inline unsigned int calc_tx_descs(const struct sk_buff *skb, + unsigned int chip_ver) { - return flits_to_desc(calc_tx_flits(skb)); + return flits_to_desc(calc_tx_flits(skb, chip_ver)); } /** @@ -1154,6 +1170,102 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, } #endif /* CONFIG_CHELSIO_T4_FCOE */ +/* Returns tunnel type if hardware supports offloading of the same. + * It is called only for T5 and onwards. + */ +enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb) +{ + u8 l4_hdr = 0; + enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; + struct port_info *pi = netdev_priv(skb->dev); + struct adapter *adapter = pi->adapter; + + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB)) + return tnl_type; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return tnl_type; + } + + switch (l4_hdr) { + case IPPROTO_UDP: + if (adapter->vxlan_port == udp_hdr(skb)->dest) + tnl_type = TX_TNL_TYPE_VXLAN; + break; + default: + return tnl_type; + } + + return tnl_type; +} + +static inline void t6_fill_tnl_lso(struct sk_buff *skb, + struct cpl_tx_tnl_lso *tnl_lso, + enum cpl_tx_tnl_lso_type tnl_type) +{ + u32 val; + int in_eth_xtra_len; + int l3hdr_len = skb_network_header_len(skb); + int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + const struct skb_shared_info *ssi = skb_shinfo(skb); + bool v6 = (ip_hdr(skb)->version == 6); + + val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) | + CPL_TX_TNL_LSO_FIRST_F | + CPL_TX_TNL_LSO_LAST_F | + (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) | + CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) | + CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) | + (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) | + CPL_TX_TNL_LSO_IPLENSETOUT_F | + (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F); + tnl_lso->op_to_IpIdSplitOut = htonl(val); + + tnl_lso->IpIdOffsetOut = 0; + + /* Get the tunnel header length */ + val = skb_inner_mac_header(skb) - skb_mac_header(skb); + in_eth_xtra_len = skb_inner_network_header(skb) - + skb_inner_mac_header(skb) - ETH_HLEN; + + switch (tnl_type) { + case TX_TNL_TYPE_VXLAN: + tnl_lso->UdpLenSetOut_to_TnlHdrLen = + htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F | + CPL_TX_TNL_LSO_UDPLENSETOUT_F); + break; + default: + tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; + break; + } + + tnl_lso->UdpLenSetOut_to_TnlHdrLen |= + htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) | + CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type)); + + tnl_lso->r1 = 0; + + val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) | + CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | + CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) | + CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4); + tnl_lso->Flow_to_TcpHdrLen = htonl(val); + + tnl_lso->IpIdOffset = htons(0); + + tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); + tnl_lso->TCPSeqOffset = htonl(0); + tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); +} + /** * t4_eth_xmit - add a packet to an Ethernet Tx queue * @skb: the packet @@ -1177,6 +1289,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) bool immediate = false; int len, max_pkt_len; bool ptp_enabled = is_ptp_enabled(skb, dev); + unsigned int chip_ver; + enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE; + #ifdef CONFIG_CHELSIO_T4_FCOE int err; #endif /* CONFIG_CHELSIO_T4_FCOE */ @@ -1227,7 +1342,8 @@ out_free: dev_kfree_skb_any(skb); } #endif /* CONFIG_CHELSIO_T4_FCOE */ - flits = calc_tx_flits(skb); + chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); + flits = calc_tx_flits(skb, chip_ver); ndesc = flits_to_desc(flits); credits = txq_avail(&q->q) - ndesc; @@ -1241,9 +1357,12 @@ out_free: dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } - if (is_eth_imm(skb)) + if (is_eth_imm(skb, chip_ver)) immediate = true; + if (skb->encapsulation && chip_ver > CHELSIO_T5) + tnl_type = cxgb_encap_offload_supported(skb); + if (!immediate && unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { q->mapping_err++; @@ -1270,33 +1389,58 @@ out_free: dev_kfree_skb_any(skb); bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; + struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1); + + if (tnl_type) + len += sizeof(*tnl_lso); + else + len += sizeof(*lso); - len += sizeof(*lso); wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | FW_WR_IMMDLEN_V(len)); - lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | - LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | - LSO_IPV6_V(v6) | - LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | - LSO_IPHDR_LEN_V(l3hdr_len / 4) | - LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); - lso->c.ipid_ofst = htons(0); - lso->c.mss = htons(ssi->gso_size); - lso->c.seqno_offset = htonl(0); - if (is_t4(adap->params.chip)) - lso->c.len = htonl(skb->len); - else - lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); - cpl = (void *)(lso + 1); + if (tnl_type) { + struct iphdr *iph = ip_hdr(skb); - if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) - cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); - else - cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + t6_fill_tnl_lso(skb, tnl_lso, tnl_type); + cpl = (void *)(tnl_lso + 1); + /* Driver is expected to compute partial checksum that + * does not include the IP Total Length. + */ + if (iph->version == 4) { + iph->check = 0; + iph->tot_len = 0; + iph->check = (u16)(~ip_fast_csum((u8 *)iph, + iph->ihl)); + } + if (skb->ip_summed == CHECKSUM_PARTIAL) + cntrl = hwcsum(adap->params.chip, skb); + } else { + lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | + LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F | + LSO_IPV6_V(v6) | + LSO_ETHHDR_LEN_V(eth_xtra_len / 4) | + LSO_IPHDR_LEN_V(l3hdr_len / 4) | + LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); + lso->c.ipid_ofst = htons(0); + lso->c.mss = htons(ssi->gso_size); + lso->c.seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->c.len = htonl(skb->len); + else + lso->c.len = + htonl(LSO_T5_XFER_SIZE_V(skb->len)); + cpl = (void *)(lso + 1); - cntrl |= TXPKT_CSUM_TYPE_V(v6 ? - TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | - TXPKT_IPHDR_LEN_V(l3hdr_len); + if (CHELSIO_CHIP_VERSION(adap->params.chip) + <= CHELSIO_T5) + cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len); + else + cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len); + + cntrl |= TXPKT_CSUM_TYPE_V(v6 ? + TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) | + TXPKT_IPHDR_LEN_V(l3hdr_len); + } q->tso++; q->tx_cso += ssi->gso_segs; } else { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 44930ca06c4b..6d76851a4da9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2847,8 +2847,6 @@ enum { SF_RD_DATA_FAST = 0xb, /* read flash */ SF_RD_ID = 0x9f, /* read ID */ SF_ERASE_SECTOR = 0xd8, /* erase sector */ - - FW_MAX_SIZE = 16 * SF_SEC_SIZE, }; /** @@ -3561,8 +3559,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) const __be32 *p = (const __be32 *)fw_data; const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; - unsigned int fw_img_start = adap->params.sf_fw_start; - unsigned int fw_start_sec = fw_img_start / sf_sec_size; + unsigned int fw_start_sec = FLASH_FW_START_SEC; + unsigned int fw_size = FLASH_FW_MAX_SIZE; + unsigned int fw_start = FLASH_FW_START; if (!size) { dev_err(adap->pdev_dev, "FW image has no data\n"); @@ -3578,9 +3577,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) "FW image size differs from size in FW header\n"); return -EINVAL; } - if (size > FW_MAX_SIZE) { + if (size > fw_size) { dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", - FW_MAX_SIZE); + fw_size); return -EFBIG; } if (!t4_fw_matches_chip(adap, hdr)) @@ -3607,11 +3606,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) */ memcpy(first_page, fw_data, SF_PAGE_SIZE); ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff); - ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); + ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page); if (ret) goto out; - addr = fw_img_start; + addr = fw_start; for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { addr += SF_PAGE_SIZE; fw_data += SF_PAGE_SIZE; @@ -3621,7 +3620,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) } ret = t4_write_flash(adap, - fw_img_start + offsetof(struct fw_hdr, fw_ver), + fw_start + offsetof(struct fw_hdr, fw_ver), sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); out: if (ret) @@ -6084,6 +6083,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type) "CR2_QSFP", "SFP28", "KR_SFP28", + "KR_XLAUI" }; if (port_type < ARRAY_SIZE(port_type_description)) @@ -7467,6 +7467,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, } /** + * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam + * @adap: the adapter + * @viid: the VI id + * @addr: the MAC address + * @mask: the mask + * @idx: index of the entry in mps tcam + * @lookup_type: MAC address for inner (1) or outer (0) header + * @port_id: the port index + * @sleep_ok: call is allowed to sleep + * + * Removes the mac entry at the specified index using raw mac interface. + * + * Returns a negative error number on failure. + */ +int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok) +{ + struct fw_vi_mac_cmd c; + struct fw_vi_mac_raw *p = &c.u.raw; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + val = FW_CMD_LEN16_V(1) | + FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW); + c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(val)); + + p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) | + FW_VI_MAC_ID_BASED_FREE); + + /* Lookup Type. Outer header: 0, Inner header: 1 */ + p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) | + DATAPORTNUM_V(port_id)); + /* Lookup mask and port mask */ + p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) | + DATAPORTNUM_V(DATAPORTNUM_M)); + + /* Copy the address and the mask */ + memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); + memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); + + return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); +} + +/** + * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam + * @adap: the adapter + * @viid: the VI id + * @mac: the MAC address + * @mask: the mask + * @idx: index at which to add this entry + * @port_id: the port index + * @lookup_type: MAC address for inner (1) or outer (0) header + * @sleep_ok: call is allowed to sleep + * + * Adds the mac entry at the specified index using raw mac interface. + * + * Returns a negative error number or the allocated index for this mac. + */ +int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid, + const u8 *addr, const u8 *mask, unsigned int idx, + u8 lookup_type, u8 port_id, bool sleep_ok) +{ + int ret = 0; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_raw *p = &c.u.raw; + u32 val; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_VI_MAC_CMD_VIID_V(viid)); + val = FW_CMD_LEN16_V(1) | + FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW); + c.freemacs_to_len16 = cpu_to_be32(val); + + /* Specify that this is an inner mac address */ + p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx)); + + /* Lookup Type. Outer header: 0, Inner header: 1 */ + p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) | + DATAPORTNUM_V(port_id)); + /* Lookup mask and port mask */ + p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) | + DATAPORTNUM_V(DATAPORTNUM_M)); + + /* Copy the address and the mask */ + memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN); + memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN); + + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok); + if (ret == 0) { + ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd)); + if (ret != idx) + ret = -ENOMEM; + } + + return ret; +} + +/** * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses * @adap: the adapter * @mbox: mailbox to use for the FW command diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 7e12f241145b..d0db4427b77e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -107,6 +107,7 @@ enum { CPL_FW6_MSG = 0xE0, CPL_FW6_PLD = 0xE1, + CPL_TX_TNL_LSO = 0xEC, CPL_TX_PKT_LSO = 0xED, CPL_TX_PKT_XT = 0xEE, @@ -1479,6 +1480,169 @@ struct ulp_txpkt { #define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S) #define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U) +enum cpl_tx_tnl_lso_type { + TX_TNL_TYPE_OPAQUE, + TX_TNL_TYPE_NVGRE, + TX_TNL_TYPE_VXLAN, + TX_TNL_TYPE_GENEVE, +}; + +struct cpl_tx_tnl_lso { + __be32 op_to_IpIdSplitOut; + __be16 IpIdOffsetOut; + __be16 UdpLenSetOut_to_TnlHdrLen; + __be64 r1; + __be32 Flow_to_TcpHdrLen; + __be16 IpIdOffset; + __be16 IpIdSplit_to_Mss; + __be32 TCPSeqOffset; + __be32 EthLenOffset_Size; + /* encapsulated CPL (TX_PKT_XT) follows here */ +}; + +#define CPL_TX_TNL_LSO_OPCODE_S 24 +#define CPL_TX_TNL_LSO_OPCODE_M 0xff +#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S) +#define CPL_TX_TNL_LSO_OPCODE_G(x) \ + (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M) + +#define CPL_TX_TNL_LSO_FIRST_S 23 +#define CPL_TX_TNL_LSO_FIRST_M 0x1 +#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S) +#define CPL_TX_TNL_LSO_FIRST_G(x) \ + (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M) +#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U) + +#define CPL_TX_TNL_LSO_LAST_S 22 +#define CPL_TX_TNL_LSO_LAST_M 0x1 +#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S) +#define CPL_TX_TNL_LSO_LAST_G(x) \ + (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M) +#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U) + +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21 +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1 +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \ + CPL_TX_TNL_LSO_ETHHDRLENXOUT_M) +#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U) + +#define CPL_TX_TNL_LSO_IPV6OUT_S 20 +#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1 +#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S) +#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M) +#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U) + +#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16 +#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf +#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S) +#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M) + +#define CPL_TX_TNL_LSO_IPHDRLEN_S 4 +#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff +#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S) +#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M) + +#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0 +#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf +#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S) +#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M) + +#define CPL_TX_TNL_LSO_MSS_S 0 +#define CPL_TX_TNL_LSO_MSS_M 0x3fff +#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S) +#define CPL_TX_TNL_LSO_MSS_G(x) \ + (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M) + +#define CPL_TX_TNL_LSO_SIZE_S 0 +#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff +#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S) +#define CPL_TX_TNL_LSO_SIZE_G(x) \ + (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M) + +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16 +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S) +#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M) + +#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4 +#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff +#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S) +#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M) + +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3 +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1 +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S) +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M) +#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U) + +#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2 +#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1 +#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S) +#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M) +#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U) + +#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1 +#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1 +#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S) +#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M) +#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U) + +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14 +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1 +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S) +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \ + CPL_TX_TNL_LSO_UDPCHKCLROUT_M) +#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U) + +#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15 +#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1 +#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \ + ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S) +#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \ + (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \ + CPL_TX_TNL_LSO_UDPLENSETOUT_M) +#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U) + +#define CPL_TX_TNL_LSO_TNLTYPE_S 12 +#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3 +#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S) +#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \ + (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M) + +#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16 +#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf +#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN) +#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN) + +#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0 +#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff +#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S) +#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \ + (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M) + +#define CPL_TX_TNL_LSO_IPV6_S 20 +#define CPL_TX_TNL_LSO_IPV6_M 0x1 +#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S) +#define CPL_TX_TNL_LSO_IPV6_G(x) \ + (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M) +#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U) + #define ULP_TX_SC_MORE_S 23 #define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S) #define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index f6701e0a6701..d9c06d6dc7b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -45,6 +45,9 @@ #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) #define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) +#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4 +#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16 + #define MYPORT_BASE 0x1c000 #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) @@ -2508,6 +2511,17 @@ #define MPS_RX_MAC_BG_PG_CNT0_A 0x11208 #define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218 +#define MPS_RX_VXLAN_TYPE_A 0x11234 + +#define VXLAN_EN_S 16 +#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S) +#define VXLAN_EN_F VXLAN_EN_V(1U) + +#define VXLAN_S 0 +#define VXLAN_M 0xffffU +#define VXLAN_V(x) ((x) << VXLAN_S) +#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M) + #define MPS_CLS_TCAM_Y_L_A 0xf000 #define MPS_CLS_TCAM_DATA0_A 0xf000 #define MPS_CLS_TCAM_DATA1_A 0xf004 @@ -2534,8 +2548,14 @@ #define DATAPORTNUM_S 12 #define DATAPORTNUM_M 0xfU +#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S) #define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M) +#define DATALKPTYPE_S 10 +#define DATALKPTYPE_M 0x3U +#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S) +#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M) + #define DATADIPHIT_S 8 #define DATADIPHIT_V(x) ((x) << DATADIPHIT_S) #define DATADIPHIT_F DATADIPHIT_V(1U) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 01f5a5ec16fa..f3310d5b3c4c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2060,6 +2060,7 @@ struct fw_vi_cmd { #define FW_VI_MAC_ADD_MAC 0x3FF #define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE #define FW_VI_MAC_MAC_BASED_FREE 0x3FD +#define FW_VI_MAC_ID_BASED_FREE 0x3FC #define FW_CLS_TCAM_NUM_ENTRIES 336 enum fw_vi_mac_smac { @@ -2076,6 +2077,13 @@ enum fw_vi_mac_result { FW_VI_MAC_R_F_ACL_CHECK }; +enum fw_vi_mac_entry_types { + FW_VI_MAC_TYPE_EXACTMAC, + FW_VI_MAC_TYPE_HASHVEC, + FW_VI_MAC_TYPE_RAW, + FW_VI_MAC_TYPE_EXACTMAC_VNI, +}; + struct fw_vi_mac_cmd { __be32 op_to_viid; __be32 freemacs_to_len16; @@ -2087,6 +2095,13 @@ struct fw_vi_mac_cmd { struct fw_vi_mac_hash { __be64 hashvec; } hash; + struct fw_vi_mac_raw { + __be32 raw_idx_pkd; + __be32 data0_pkd; + __be32 data1[2]; + __be64 data0m_pkd; + __be32 data1m[2]; + } raw; } u; }; @@ -2096,6 +2111,12 @@ struct fw_vi_mac_cmd { #define FW_VI_MAC_CMD_FREEMACS_S 31 #define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S) +#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23 +#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7 +#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S) +#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \ + (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M) + #define FW_VI_MAC_CMD_HASHVECEN_S 23 #define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S) #define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U) @@ -2122,6 +2143,12 @@ struct fw_vi_mac_cmd { #define FW_VI_MAC_CMD_IDX_G(x) \ (((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M) +#define FW_VI_MAC_CMD_RAW_IDX_S 16 +#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff +#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S) +#define FW_VI_MAC_CMD_RAW_IDX_G(x) \ + (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M) + #define FW_RXMODE_MTU_NO_CHG 65535 struct fw_vi_rxmode_cmd { @@ -2829,6 +2856,7 @@ enum fw_port_type { FW_PORT_TYPE_CR2_QSFP, FW_PORT_TYPE_SFP28, FW_PORT_TYPE_KR_SFP28, + FW_PORT_TYPE_KR_XLAUI, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index b48361cfdc78..96f69f80dc99 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1229,7 +1229,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, else return PORT_OTHER; } else if (port_type == FW_PORT_TYPE_KR4_100G || - port_type == FW_PORT_TYPE_KR_SFP28) { + port_type == FW_PORT_TYPE_KR_SFP28 || + port_type == FW_PORT_TYPE_KR_XLAUI) { return PORT_NONE; } @@ -1323,6 +1324,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, SET_LMM(25000baseKR_Full); break; + case FW_PORT_TYPE_KR_XLAUI: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full); + break; + case FW_PORT_TYPE_CR2_QSFP: SET_LMM(FIBRE); SET_LMM(50000baseSR2_Full); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 14d7e673c656..129b914a434c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2619,8 +2619,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; /* @@ -2628,9 +2628,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) != @@ -2642,8 +2653,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 02dd5246dfae..1a49297224ed 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -103,7 +103,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = { static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) { int i; - int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map); + int num_entries = ARRAY_SIZE(cmd_priv_map); u32 cmd_privileges = adapter->cmd_privileges; for (i = 0; i < num_entries; i++) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d1b06579c1a..90aa69a08922 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -818,6 +818,12 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; @@ -3483,6 +3489,10 @@ fec_probe(struct platform_device *pdev) goto failed_regulator; } } else { + if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto failed_regulator; + } fep->reg_phy = NULL; } @@ -3566,8 +3576,9 @@ failed_clk_ipg: failed_clk: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); -failed_phy: of_node_put(phy_node); +failed_phy: + dev_id--; failed_ioremap: free_netdev(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 8b5cdf490850..cac86e9ae0dd 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -1168,7 +1168,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb) int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { - if (!mac_cb || !mac_cb->cpld_ctrl) + if (!mac_cb) return 0; return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 408b63faf9a8..ca247c2cc238 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -18,6 +18,7 @@ enum _dsm_op_index { HNS_OP_LED_SET_FUNC = 0x3, HNS_OP_GET_PORT_TYPE_FUNC = 0x4, HNS_OP_GET_SFP_STAT_FUNC = 0x5, + HNS_OP_LOCATE_LED_SET_FUNC = 0x6, }; enum _dsm_rst_type { @@ -81,6 +82,33 @@ static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type, ACPI_FREE(obj); } +static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb, + u8 op_type, u32 locate, + u32 port) +{ + union acpi_object obj_args[2], argv4; + union acpi_object *obj; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = locate; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = port; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 2; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4); + if (!obj) { + dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n", + locate, port); + return; + } + + ACPI_FREE(obj); +} + static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, u16 speed, int data) { @@ -160,6 +188,9 @@ static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb) static int cpld_set_led_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { + if (!mac_cb->cpld_ctrl) + return 0; + switch (status) { case HNAE_LED_ACTIVE: mac_cb->cpld_led_value = @@ -184,6 +215,30 @@ static int cpld_set_led_id(struct hns_mac_cb *mac_cb, return 0; } +static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb, + enum hnae_led_state status) +{ + switch (status) { + case HNAE_LED_ACTIVE: + hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb, + HNS_OP_LOCATE_LED_SET_FUNC, + CPLD_LED_ON_VALUE, + mac_cb->mac_id); + break; + case HNAE_LED_INACTIVE: + hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb, + HNS_OP_LOCATE_LED_SET_FUNC, + CPLD_LED_DEFAULT_VALUE, + mac_cb->mac_id); + break; + default: + dev_err(mac_cb->dev, "invalid led state: %d!", status); + return -EINVAL; + } + + return 0; +} + #define RESET_REQ_OR_DREQ 1 static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type, @@ -660,7 +715,7 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { misc_op->cpld_set_led = hns_cpld_set_led_acpi; misc_op->cpld_reset_led = cpld_led_reset_acpi; - misc_op->cpld_set_led_id = cpld_set_led_id; + misc_op->cpld_set_led_id = cpld_set_led_id_acpi; misc_op->dsaf_reset = hns_dsaf_rst_acpi; misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a9e2b32834c5..adec88d941df 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -274,10 +274,14 @@ struct hnae3_ae_dev { * Get firmware version * get_mdix_mode() * Get media typr of phy + * enable_vlan_filter() + * Enable vlan filter * set_vlan_filter() * Set vlan filter config of Ports * set_vf_vlan_filter() * Set vlan filter config of vf + * enable_hw_strip_rxvtag() + * Enable/disable hardware strip vlan tag of packets received */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); @@ -380,12 +384,21 @@ struct hnae3_ae_ops { void (*get_mdix_mode)(struct hnae3_handle *handle, u8 *tp_mdix_ctrl, u8 *tp_mdix); + void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable); int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); + int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); void (*reset_event)(struct hnae3_handle *handle, enum hnae3_reset_type reset); + void (*get_channels)(struct hnae3_handle *handle, + struct ethtool_channels *ch); + void (*get_tqps_and_rss_info)(struct hnae3_handle *h, + u16 *free_tqps, u16 *max_rss_size); + int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num); + void (*get_flowctrl_adv)(struct hnae3_handle *handle, + u32 *flowctrl_adv); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index c2c13238db52..14c7625c6f19 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -247,6 +247,8 @@ static int hns3_nic_net_up(struct net_device *netdev) if (ret) goto out_start_err; + clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); + return 0; out_start_err: @@ -286,6 +288,9 @@ static void hns3_nic_net_down(struct net_device *netdev) const struct hnae3_ae_ops *ops; int i; + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) @@ -723,6 +728,58 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } +static int hns3_fill_desc_vtags(struct sk_buff *skb, + struct hns3_enet_ring *tx_ring, + u32 *inner_vlan_flag, + u32 *out_vlan_flag, + u16 *inner_vtag, + u16 *out_vtag) +{ +#define HNS3_TX_VLAN_PRIO_SHIFT 13 + + if (skb->protocol == htons(ETH_P_8021Q) && + !(tx_ring->tqp->handle->kinfo.netdev->features & + NETIF_F_HW_VLAN_CTAG_TX)) { + /* When HW VLAN acceleration is turned off, and the stack + * sets the protocol to 802.1q, the driver just need to + * set the protocol to the encapsulated ethertype. + */ + skb->protocol = vlan_get_protocol(skb); + return 0; + } + + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag; + + vlan_tag = skb_vlan_tag_get(skb); + vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT; + + /* Based on hw strategy, use out_vtag in two layer tag case, + * and use inner_vtag in one tag case. + */ + if (skb->protocol == htons(ETH_P_8021Q)) { + hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); + *out_vtag = vlan_tag; + } else { + hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); + *inner_vtag = vlan_tag; + } + } else if (skb->protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *vhdr; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7) + << HNS3_TX_VLAN_PRIO_SHIFT); + } + + skb->protocol = vlan_get_protocol(skb); + return 0; +} + static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, enum hns_desc_type type) @@ -733,6 +790,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, u16 bdtp_fe_sc_vld_ra_ri = 0; u32 type_cs_vlan_tso = 0; struct sk_buff *skb; + u16 inner_vtag = 0; + u16 out_vtag = 0; u32 paylen = 0; u16 mss = 0; __be16 protocol; @@ -756,15 +815,16 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, skb = (struct sk_buff *)priv; paylen = skb->len; + ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso, + &ol_type_vlan_len_msec, + &inner_vtag, &out_vtag); + if (unlikely(ret)) + return ret; + if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_mac_len(skb); protocol = skb->protocol; - /* vlan packet*/ - if (protocol == htons(ETH_P_8021Q)) { - protocol = vlan_get_protocol(skb); - skb->protocol = protocol; - } ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (ret) return ret; @@ -790,6 +850,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, cpu_to_le32(type_cs_vlan_tso); desc->tx.paylen = cpu_to_le32(paylen); desc->tx.mss = cpu_to_le16(mss); + desc->tx.vlan_tag = cpu_to_le16(inner_vtag); + desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); } /* move ring pointer to next.*/ @@ -1032,6 +1094,9 @@ static int hns3_nic_set_features(struct net_device *netdev, netdev_features_t features) { struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + netdev_features_t changed; + int ret; if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { priv->ops.fill_desc = hns3_fill_desc_tso; @@ -1041,15 +1106,32 @@ static int hns3_nic_set_features(struct net_device *netdev, priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; } + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + h->ae_algo->ops->enable_vlan_filter(h, true); + else + h->ae_algo->ops->enable_vlan_filter(h, false); + + changed = netdev->features ^ features; + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true); + else + ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false); + + if (ret) + return ret; + } + netdev->features = features; return 0; } -static void -hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +static void hns3_nic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) { struct hns3_nic_priv *priv = netdev_priv(netdev); int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hnae3_handle *handle = priv->ae_handle; struct hns3_enet_ring *ring; unsigned int start; unsigned int idx; @@ -1057,6 +1139,13 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) u64 rx_bytes = 0; u64 tx_pkts = 0; u64 rx_pkts = 0; + u64 tx_drop = 0; + u64 rx_drop = 0; + + if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return; + + handle->ae_algo->ops->update_stats(handle, &netdev->stats); for (idx = 0; idx < queue_num; idx++) { /* fetch the tx stats */ @@ -1065,6 +1154,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; + tx_drop += ring->stats.tx_busy; + tx_drop += ring->stats.sw_err_cnt; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); /* fetch the rx stats */ @@ -1073,6 +1164,9 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; + rx_drop += ring->stats.non_vld_descs; + rx_drop += ring->stats.err_pkt_len; + rx_drop += ring->stats.l2_err; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } @@ -1088,8 +1182,8 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) stats->rx_missed_errors = netdev->stats.rx_missed_errors; stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = netdev->stats.rx_dropped; - stats->tx_dropped = netdev->stats.tx_dropped; + stats->rx_dropped = rx_drop + netdev->stats.rx_dropped; + stats->tx_dropped = tx_drop + netdev->stats.tx_dropped; stats->collisions = netdev->stats.collisions; stats->rx_over_errors = netdev->stats.rx_over_errors; stats->rx_frame_errors = netdev->stats.rx_frame_errors; @@ -1319,6 +1413,8 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } + netdev->mtu = new_mtu; + /* if the netdev was running earlier, bring it up again */ if (if_running && hns3_nic_net_open(netdev)) ret = -EINVAL; @@ -1478,6 +1574,8 @@ static struct pci_driver hns3_driver = { /* set default feature to hns3 */ static void hns3_set_default_feature(struct net_device *netdev) { + struct hnae3_handle *h = hns3_get_handle(netdev); + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -1492,6 +1590,7 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | @@ -1505,11 +1604,15 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (!(h->flags & HNAE3_SUPPORT_VF)) + netdev->hw_features |= + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -2085,6 +2188,22 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, prefetchw(skb->data); + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + if (!(vlan_tag & VLAN_VID_MASK)) + vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + if (vlan_tag & VLAN_VID_MASK) + __vlan_hwaccel_put_tag(skb, + htons(ETH_P_8021Q), + vlan_tag); + } + bnum = 1; if (length <= HNS3_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -2651,6 +2770,19 @@ err: return ret; } +static void hns3_put_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + devm_kfree(priv->dev, priv->ring_data[i].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); + } + devm_kfree(priv->dev, priv->ring_data); +} + static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) { int ret; @@ -2787,8 +2919,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) h->ae_algo->ops->reset_queue(h, i); hns3_fini_ring(priv->ring_data[i].ring); + devm_kfree(priv->dev, priv->ring_data[i].ring); hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + devm_kfree(priv->dev, + priv->ring_data[i + h->kinfo.num_tqps].ring); } + devm_kfree(priv->dev, priv->ring_data); return 0; } @@ -3162,6 +3298,115 @@ static int hns3_reset_notify(struct hnae3_handle *handle, return ret; } +static u16 hns3_get_max_available_channels(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u16 free_tqps, max_rss_size, max_tqps; + + h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size); + max_tqps = h->kinfo.num_tc * max_rss_size; + + return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps)); +} + +static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret; + + ret = h->ae_algo->ops->set_channels(h, new_tqp_num); + if (ret) + return ret; + + ret = hns3_get_ring_config(priv); + if (ret) + return ret; + + ret = hns3_nic_init_vector_data(priv); + if (ret) + goto err_uninit_vector; + + ret = hns3_init_all_ring(priv); + if (ret) + goto err_put_ring; + + return 0; + +err_put_ring: + hns3_put_ring_config(priv); +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); + return ret; +} + +static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num) +{ + return (new_tqp_num / num_tc) * num_tc; +} + +int hns3_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + bool if_running = netif_running(netdev); + u32 new_tqp_num = ch->combined_count; + u16 org_tqp_num; + int ret; + + if (ch->rx_count || ch->tx_count) + return -EINVAL; + + if (new_tqp_num > hns3_get_max_available_channels(netdev) || + new_tqp_num < kinfo->num_tc) { + dev_err(&netdev->dev, + "Change tqps fail, the tqp range is from %d to %d", + kinfo->num_tc, + hns3_get_max_available_channels(netdev)); + return -EINVAL; + } + + new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num); + if (kinfo->num_tqps == new_tqp_num) + return 0; + + if (if_running) + dev_close(netdev); + + hns3_clear_all_ring(h); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) { + dev_err(&netdev->dev, + "Unbind vector with tqp fail, nothing is changed"); + goto open_netdev; + } + + hns3_uninit_all_ring(priv); + + org_tqp_num = h->kinfo.num_tqps; + ret = hns3_modify_tqp_num(netdev, new_tqp_num); + if (ret) { + ret = hns3_modify_tqp_num(netdev, org_tqp_num); + if (ret) { + /* If revert to old tqp failed, fatal error occurred */ + dev_err(&netdev->dev, + "Revert to old tqp num fail, ret=%d", ret); + return ret; + } + dev_info(&netdev->dev, + "Change tqp num fail, Revert to old tqp num"); + } + +open_netdev: + if (if_running) + dev_open(netdev); + + return ret; +} + static const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 8a9de759957b..a2a7ea3e9a3a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -595,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) void hns3_ethtool_set_ops(struct net_device *netdev); +int hns3_set_channels(struct net_device *netdev, + struct ethtool_channels *ch); bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 65a69b439457..d3cb3ec2c62b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -15,26 +15,25 @@ struct hns3_stats { char stats_string[ETH_GSTRING_LEN]; - int stats_size; int stats_offset; }; /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ .stats_string = _string, \ - .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ - .stats_offset = offsetof(struct hns3_enet_ring, stats), \ -} \ + .stats_offset = offsetof(struct hns3_enet_ring, stats) +\ + offsetof(struct ring_stats, _member), \ +} static const struct hns3_stats hns3_txq_stats[] = { /* Tx per-queue statistics */ - HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("tx_pkts", tx_pkts), - HNS3_TQP_STAT("tx_bytes", tx_bytes), - HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), - HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_dropped", sw_err_cnt), + HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("packets", tx_pkts), + HNS3_TQP_STAT("bytes", tx_bytes), + HNS3_TQP_STAT("errors", tx_err_cnt), + HNS3_TQP_STAT("tx_wake", restart_queue), HNS3_TQP_STAT("tx_busy", tx_busy), }; @@ -42,18 +41,18 @@ static const struct hns3_stats hns3_txq_stats[] = { static const struct hns3_stats hns3_rxq_stats[] = { /* Rx per-queue statistics */ - HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("rx_pkts", rx_pkts), - HNS3_TQP_STAT("rx_bytes", rx_bytes), - HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), - HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), - HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), - HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), - HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), - HNS3_TQP_STAT("rx_l2_err", l2_err), - HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), + HNS3_TQP_STAT("io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_dropped", sw_err_cnt), + HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("packets", rx_pkts), + HNS3_TQP_STAT("bytes", rx_bytes), + HNS3_TQP_STAT("errors", rx_err_cnt), + HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("err_bd_num", err_bd_num), + HNS3_TQP_STAT("l2_err", l2_err), + HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) @@ -389,9 +388,9 @@ static int hns3_get_sset_count(struct net_device *netdev, int stringset) } static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, - u32 stat_count, u32 num_tqps) + u32 stat_count, u32 num_tqps, const char *prefix) { -#define MAX_PREFIX_SIZE (8 + 4) +#define MAX_PREFIX_SIZE (6 + 4) u32 size_left; u32 i, j; u32 n1; @@ -401,7 +400,8 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, data[ETH_GSTRING_LEN - 1] = '\0'; /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = snprintf(data, MAX_PREFIX_SIZE, "%s#%d_", + prefix, i); n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); size_left = (ETH_GSTRING_LEN - 1) - n1; @@ -417,14 +417,16 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + const char tx_prefix[] = "txq"; + const char rx_prefix[] = "rxq"; /* get strings for Tx */ data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, - kinfo->num_tqps); + kinfo->num_tqps, tx_prefix); /* get strings for Rx */ data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, - kinfo->num_tqps); + kinfo->num_tqps, rx_prefix); return data; } @@ -455,13 +457,13 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hns3_enet_ring *ring; u8 *stat; - u32 i; + int i, j; /* get stats for Tx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i].ring; - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -469,8 +471,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) /* get stats for Rx */ for (i = 0; i < kinfo->num_tqps; i++) { ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { + stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; *data++ = *(u64 *)stat; } } @@ -559,10 +561,23 @@ static void hns3_get_pauseparam(struct net_device *netdev, ¶m->rx_pause, ¶m->tx_pause); } +static int hns3_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->set_pauseparam) + return h->ae_algo->ops->set_pauseparam(h, param->autoneg, + param->rx_pause, + param->tx_pause); + return -EOPNOTSUPP; +} + static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); + u32 flowctrl_adv = 0; u32 supported_caps; u32 advertised_caps; u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; @@ -638,6 +653,8 @@ static int hns3_get_link_ksettings(struct net_device *netdev, if (!cmd->base.autoneg) advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + advertised_caps &= ~HNS3_LM_PAUSE_BIT; + /* now, map driver link modes to ethtool link modes */ hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(advertised_caps, cmd, true); @@ -650,6 +667,18 @@ static int hns3_get_link_ksettings(struct net_device *netdev, /* 4.mdio_support */ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + /* 5.get flow control setttings */ + if (h->ae_algo->ops->get_flowctrl_adv) + h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); + + if (flowctrl_adv & ADVERTISED_Pause) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Pause); + + if (flowctrl_adv & ADVERTISED_Asym_Pause) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); + return 0; } @@ -730,7 +759,7 @@ static int hns3_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; + cmd->data = h->kinfo.rss_size; break; case ETHTOOL_GRXFH: return h->ae_algo->ops->get_rss_tuple(h, cmd); @@ -849,6 +878,15 @@ static int hns3_nway_reset(struct net_device *netdev) return genphy_restart_aneg(phy); } +static void hns3_get_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->get_channels) + h->ae_algo->ops->get_channels(h, ch); +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, @@ -871,6 +909,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_ringparam = hns3_get_ringparam, .set_ringparam = hns3_set_ringparam, .get_pauseparam = hns3_get_pauseparam, + .set_pauseparam = hns3_set_pauseparam, .get_strings = hns3_get_strings, .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, @@ -883,6 +922,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_link_ksettings = hns3_get_link_ksettings, .set_link_ksettings = hns3_set_link_ksettings, .nway_reset = hns3_nway_reset, + .get_channels = hns3_get_channels, + .set_channels = hns3_set_channels, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index ce5ed8845042..3c3159b2d3bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -180,6 +180,10 @@ enum hclge_opcode_type { /* Promisuous mode command */ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + /* Vlan offload command */ + HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01, + HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02, + /* Interrupts cmd */ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, @@ -191,6 +195,7 @@ enum hclge_opcode_type { HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + HCLGE_OPC_MAC_VLAN_MASK_SET = 0x1012, /* Multicast linear table cmd */ HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, @@ -399,6 +404,8 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0) #define HCLGE_CFG_DEFAULT_SPEED_S 16 #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) +#define HCLGE_CFG_RSS_SIZE_S 24 +#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24) struct hclge_cfg_param_cmd { __le32 offset; @@ -549,8 +556,6 @@ struct hclge_config_auto_neg_cmd { u8 rsv[20]; }; -#define HCLGE_MAC_MIN_MTU 64 -#define HCLGE_MAC_MAX_MTU 9728 #define HCLGE_MAC_UPLINK_PORT 0x100 struct hclge_config_max_frm_size_cmd { @@ -587,6 +592,15 @@ struct hclge_mac_vlan_tbl_entry_cmd { u8 rsv2[6]; }; +#define HCLGE_VLAN_MASK_EN_B 0x0 +struct hclge_mac_vlan_mask_entry_cmd { + u8 rsv0[2]; + u8 vlan_mask; + u8 rsv1; + u8 mac_mask[6]; + u8 rsv2[14]; +}; + #define HCLGE_CFG_MTA_MAC_SEL_S 0x0 #define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) #define HCLGE_CFG_MTA_MAC_EN_B 0x7 @@ -658,6 +672,47 @@ struct hclge_vlan_filter_vf_cfg_cmd { u8 vf_bitmap[16]; }; +#define HCLGE_ACCEPT_TAG_B 0 +#define HCLGE_ACCEPT_UNTAG_B 1 +#define HCLGE_PORT_INS_TAG1_EN_B 2 +#define HCLGE_PORT_INS_TAG2_EN_B 3 +#define HCLGE_CFG_NIC_ROCE_SEL_B 4 +struct hclge_vport_vtag_tx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[2]; + __le16 def_vlan_tag1; + __le16 def_vlan_tag2; + u8 vf_bitmap[8]; + u8 rsv2[8]; +}; + +#define HCLGE_REM_TAG1_EN_B 0 +#define HCLGE_REM_TAG2_EN_B 1 +#define HCLGE_SHOW_TAG1_EN_B 2 +#define HCLGE_SHOW_TAG2_EN_B 3 +struct hclge_vport_vtag_rx_cfg_cmd { + u8 vport_vlan_cfg; + u8 vf_offset; + u8 rsv1[6]; + u8 vf_bitmap[8]; + u8 rsv2[8]; +}; + +struct hclge_tx_vlan_type_cfg_cmd { + __le16 ot_vlan_type; + __le16 in_vlan_type; + u8 rsv[20]; +}; + +struct hclge_rx_vlan_type_cfg_cmd { + __le16 ot_fst_vlan_type; + __le16 ot_sec_vlan_type; + __le16 in_fst_vlan_type; + __le16 in_sec_vlan_type; + u8 rsv[16]; +}; + struct hclge_cfg_com_tqp_queue_cmd { __le16 tqp_id; __le16 stream_id; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e97fd6654e5e..d7352f5f75c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -17,6 +17,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/platform_device.h> +#include <linux/if_vlan.h> #include <net/rtnetlink.h> #include "hclge_cmd.h" #include "hclge_dcb.h" @@ -35,6 +36,7 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, enum hclge_mta_dmac_sel_type mta_mac_sel, bool enable); +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); @@ -279,8 +281,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, - {"mac_tx_overrsize_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, + {"mac_tx_oversize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)}, {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, {"mac_tx_65_127_oct_pkt_num", @@ -293,8 +295,24 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, - {"mac_tx_1519_max_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, + {"mac_tx_1519_2047_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)}, + {"mac_tx_2048_4095_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)}, + {"mac_tx_4096_8191_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)}, + {"mac_tx_8192_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_12287_oct_pkt_num)}, + {"mac_tx_8192_9216_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)}, + {"mac_tx_9217_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)}, + {"mac_tx_12288_16383_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)}, + {"mac_tx_1519_max_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)}, + {"mac_tx_1519_max_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)}, {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, {"mac_rx_total_oct_num", @@ -315,8 +333,8 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, - {"mac_rx_overrsize_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, + {"mac_rx_oversize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)}, {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, {"mac_rx_65_127_oct_pkt_num", @@ -329,33 +347,49 @@ static const struct hclge_comm_stats_str g_mac_stats_string[] = { HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, - {"mac_rx_1519_max_oct_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, - - {"mac_trans_fragment_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, - {"mac_trans_undermin_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, - {"mac_trans_jabber_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, - {"mac_trans_err_all_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, - {"mac_trans_from_app_good_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, - {"mac_trans_from_app_bad_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, - {"mac_rcv_fragment_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, - {"mac_rcv_undermin_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, - {"mac_rcv_jabber_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, - {"mac_rcv_fcs_err_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, - {"mac_rcv_send_app_good_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, - {"mac_rcv_send_app_bad_pkt_num", - HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} + {"mac_rx_1519_2047_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)}, + {"mac_rx_2048_4095_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)}, + {"mac_rx_4096_8191_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)}, + {"mac_rx_8192_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_12287_oct_pkt_num)}, + {"mac_rx_8192_9216_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)}, + {"mac_rx_9217_12287_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)}, + {"mac_rx_12288_16383_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)}, + {"mac_rx_1519_max_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)}, + {"mac_rx_1519_max_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)}, + + {"mac_tx_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)}, + {"mac_tx_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)}, + {"mac_tx_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)}, + {"mac_tx_err_all_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)}, + {"mac_tx_from_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)}, + {"mac_tx_from_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)}, + {"mac_rx_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)}, + {"mac_rx_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)}, + {"mac_rx_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)}, + {"mac_rx_fcs_err_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)}, + {"mac_rx_send_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)}, + {"mac_rx_send_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)} }; static int hclge_64_bit_update_stats(struct hclge_dev *hdev) @@ -463,7 +497,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) static int hclge_mac_update_stats(struct hclge_dev *hdev) { -#define HCLGE_MAC_CMD_NUM 17 +#define HCLGE_MAC_CMD_NUM 21 #define HCLGE_RTN_DATA_NUM 4 u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); @@ -525,7 +559,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) return ret; } tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - le32_to_cpu(desc[0].data[4]); + le32_to_cpu(desc[0].data[1]); } for (i = 0; i < kinfo->num_tqps; i++) { @@ -545,7 +579,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) return ret; } tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - le32_to_cpu(desc[0].data[4]); + le32_to_cpu(desc[0].data[1]); } return 0; @@ -587,7 +621,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -595,7 +629,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < kinfo->num_tqps; i++) { struct hclge_tqp *tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", tqp->index); buff = buff + ETH_GSTRING_LEN; } @@ -643,23 +677,22 @@ static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; - net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_errors = hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; - net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; - net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_errors += hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; - net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rx_fcs_err_pkt_num; net_stats->rx_length_errors = hw_stats->mac_stats.mac_rx_undersize_pkt_num; net_stats->rx_length_errors += - hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + hw_stats->mac_stats.mac_rx_oversize_pkt_num; net_stats->rx_over_errors = - hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + hw_stats->mac_stats.mac_rx_oversize_pkt_num; } static void hclge_update_stats_for_all(struct hclge_dev *hdev) @@ -699,6 +732,9 @@ static void hclge_update_stats(struct hnae3_handle *handle, struct hclge_hw_stats *hw_stats = &hdev->hw_stats; int status; + if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) + return; + status = hclge_mac_update_stats(hdev); if (status) dev_err(&hdev->pdev->dev, @@ -724,6 +760,8 @@ static void hclge_update_stats(struct hnae3_handle *handle, status); hclge_update_netstat(hw_stats, net_stats); + + clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); } static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) @@ -982,6 +1020,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), HCLGE_CFG_DEFAULT_SPEED_M, HCLGE_CFG_DEFAULT_SPEED_S); + cfg->rss_size_max = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_RSS_SIZE_M, + HCLGE_CFG_RSS_SIZE_S); + for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; @@ -1059,7 +1101,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->num_vmdq_vport = cfg.vmdq_vport_num; hdev->base_tqp_pid = 0; - hdev->rss_size_max = 1; + hdev->rss_size_max = cfg.rss_size_max; hdev->rx_buf_len = cfg.rx_buf_len; ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); hdev->hw.mac.media_type = cfg.media_type; @@ -1096,10 +1138,7 @@ static int hclge_configure(struct hclge_dev *hdev) for (i = 0; i < hdev->tm_info.num_tc; i++) hnae_set_bit(hdev->hw_tc_map, i, 1); - if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) - hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; - else - hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; return ret; } @@ -2133,28 +2172,6 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, return 0; } -static int hclge_query_autoneg_result(struct hclge_dev *hdev) -{ - struct hclge_mac *mac = &hdev->hw.mac; - struct hclge_query_an_speed_dup_cmd *req; - struct hclge_desc desc; - int ret; - - req = (struct hclge_query_an_speed_dup_cmd *)desc.data; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "autoneg result query cmd failed %d.\n", ret); - return ret; - } - - mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B); - - return 0; -} - static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { struct hclge_config_auto_neg_cmd *req; @@ -2190,15 +2207,45 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; - hclge_query_autoneg_result(hdev); + if (phydev) + return phydev->autoneg; return hdev->hw.mac.autoneg; } +static int hclge_set_default_mac_vlan_mask(struct hclge_dev *hdev, + bool mask_vlan, + u8 *mac_mask) +{ + struct hclge_mac_vlan_mask_entry_cmd *req; + struct hclge_desc desc; + int status; + + req = (struct hclge_mac_vlan_mask_entry_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_MASK_SET, false); + + hnae_set_bit(req->vlan_mask, HCLGE_VLAN_MASK_EN_B, + mask_vlan ? 1 : 0); + ether_addr_copy(req->mac_mask, mac_mask); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Config mac_vlan_mask failed for cmd_send, ret =%d\n", + status); + + return status; +} + static int hclge_mac_init(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; + struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; + u8 mac_mask[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + int mtu; int ret; ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); @@ -2224,7 +2271,33 @@ static int hclge_mac_init(struct hclge_dev *hdev) return ret; } - return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); + ret = hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mta filter mode fail ret=%d\n", ret); + return ret; + } + + ret = hclge_set_default_mac_vlan_mask(hdev, true, mac_mask); + if (ret) { + dev_err(&hdev->pdev->dev, + "set default mac_vlan_mask fail ret=%d\n", ret); + return ret; + } + + if (netdev) + mtu = netdev->mtu; + else + mtu = ETH_DATA_LEN; + + ret = hclge_set_mtu(handle, mtu); + if (ret) { + dev_err(&hdev->pdev->dev, + "set mtu failed ret=%d\n", ret); + return ret; + } + + return 0; } static void hclge_mbx_task_schedule(struct hclge_dev *hdev) @@ -2363,6 +2436,7 @@ static void hclge_service_timer(struct timer_list *t) struct hclge_dev *hdev = from_timer(hdev, t, service_timer); mod_timer(&hdev->service_timer, jiffies + HZ); + hdev->hw_stats.stats_timer++; hclge_task_schedule(hdev); } @@ -2762,9 +2836,13 @@ static void hclge_service_task(struct work_struct *work) struct hclge_dev *hdev = container_of(work, struct hclge_dev, service_task); + if (hdev->hw_stats.stats_timer >= HCLGE_STATS_TIMER_INTERVAL) { + hclge_update_stats_for_all(hdev); + hdev->hw_stats.stats_timer = 0; + } + hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); - hclge_update_stats_for_all(hdev); hclge_service_complete(hdev); } @@ -4179,6 +4257,7 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) const unsigned char *new_addr = (const unsigned char *)p; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + int ret; /* mac addr check */ if (is_zero_ether_addr(new_addr) || @@ -4190,14 +4269,39 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) return -EINVAL; } - hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + ret = hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + if (ret) + dev_warn(&hdev->pdev->dev, + "remove old uc mac address fail, ret =%d.\n", + ret); - if (!hclge_add_uc_addr(handle, new_addr)) { - ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); - return 0; + ret = hclge_add_uc_addr(handle, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "add uc mac address fail, ret =%d.\n", + ret); + + ret = hclge_add_uc_addr(handle, hdev->hw.mac.mac_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore uc mac address fail, ret =%d.\n", + ret); + } + + return -EIO; } - return -EIO; + ret = hclge_mac_pause_addr_cfg(hdev, new_addr); + if (ret) { + dev_err(&hdev->pdev->dev, + "configure mac pause address fail, ret =%d.\n", + ret); + return -EIO; + } + + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + + return 0; } static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, @@ -4223,6 +4327,17 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, return 0; } +#define HCLGE_FILTER_TYPE_VF 0 +#define HCLGE_FILTER_TYPE_PORT 1 + +static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, enable); +} + int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, bool is_kill, u16 vlan, u8 qos, __be16 proto) { @@ -4339,43 +4454,204 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); } +static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; + struct hclge_vport_vtag_tx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false); + + req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data; + req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1); + req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG_B, + vcfg->accept_tag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG_B, + vcfg->accept_untag ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B, + vcfg->insert_tag1_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B, + vcfg->insert_tag2_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + req->vf_bitmap[req->vf_offset] = + 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port txvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) +{ + struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg; + struct hclge_vport_vtag_rx_cfg_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false); + + req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data; + hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B, + vcfg->strip_tag1_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B, + vcfg->strip_tag2_en ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B, + vcfg->vlan1_vlan_prionly ? 1 : 0); + hnae_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B, + vcfg->vlan2_vlan_prionly ? 1 : 0); + + req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD; + req->vf_bitmap[req->vf_offset] = + 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send port rxvlan cfg command fail, ret =%d\n", + status); + + return status; +} + +static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) +{ + struct hclge_rx_vlan_type_cfg_cmd *rx_req; + struct hclge_tx_vlan_type_cfg_cmd *tx_req; + struct hclge_desc desc; + int status; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false); + rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data; + rx_req->ot_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); + rx_req->ot_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); + rx_req->in_fst_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); + rx_req->in_sec_vlan_type = + cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) { + dev_err(&hdev->pdev->dev, + "Send rxvlan protocol type command fail, ret =%d\n", + status); + return status; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false); + + tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)&desc.data; + tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); + tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); + + status = hclge_cmd_send(&hdev->hw, &desc, 1); + if (status) + dev_err(&hdev->pdev->dev, + "Send txvlan protocol type command fail, ret =%d\n", + status); + + return status; +} + static int hclge_init_vlan_config(struct hclge_dev *hdev) { -#define HCLGE_VLAN_TYPE_VF_TABLE 0 -#define HCLGE_VLAN_TYPE_PORT_TABLE 1 +#define HCLGE_DEF_VLAN_TYPE 0x8100 + struct hnae3_handle *handle; + struct hclge_vport *vport; int ret; + int i; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, - true); + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, true); if (ret) return ret; - ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, - true); + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, true); if (ret) return ret; + hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE; + hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE; + + ret = hclge_set_vlan_protocol_type(hdev); + if (ret) + return ret; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + vport = &hdev->vport[i]; + vport->txvlan_cfg.accept_tag = true; + vport->txvlan_cfg.accept_untag = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag1 = 0; + vport->txvlan_cfg.default_tag2 = 0; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = true; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_rx_offload_cfg(vport); + if (ret) + return ret; + } + handle = &hdev->vport[0].nic; return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } +static int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_config_max_frm_size_cmd *req; struct hclge_dev *hdev = vport->back; struct hclge_desc desc; + int max_frm_size; int ret; - if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > HCLGE_MAC_MAX_FRAME) return -EINVAL; - hdev->mps = new_mtu; + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); req = (struct hclge_config_max_frm_size_cmd *)desc.data; - req->max_frm_size = cpu_to_le16(new_mtu); + req->max_frm_size = cpu_to_le16(max_frm_size); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -4383,6 +4659,8 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) return ret; } + hdev->mps = max_frm_size; + return 0; } @@ -4481,6 +4759,100 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } +static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, + u32 *flowctrl_adv) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | + (phydev->advertising & ADVERTISED_Asym_Pause); +} + +static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + + if (!phydev) + return; + + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + + if (rx_en) + phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + + if (tx_en) + phydev->advertising ^= ADVERTISED_Asym_Pause; +} + +static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) +{ + int ret; + + if (rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_FULL; + else if (rx_en && !tx_en) + hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; + else if (!rx_en && tx_en) + hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; + else + hdev->fc_mode_last_time = HCLGE_FC_NONE; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + return 0; + + ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); + if (ret) { + dev_err(&hdev->pdev->dev, "configure pauseparam error, ret = %d.\n", + ret); + return ret; + } + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + + return 0; +} + +int hclge_cfg_flowctrl(struct hclge_dev *hdev) +{ + struct phy_device *phydev = hdev->hw.mac.phydev; + u16 remote_advertising = 0; + u16 local_advertising = 0; + u32 rx_pause, tx_pause; + u8 flowctl; + + if (!phydev->link || !phydev->autoneg) + return 0; + + if (phydev->advertising & ADVERTISED_Pause) + local_advertising = ADVERTISE_PAUSE_CAP; + + if (phydev->advertising & ADVERTISED_Asym_Pause) + local_advertising |= ADVERTISE_PAUSE_ASYM; + + if (phydev->pause) + remote_advertising = LPA_PAUSE_CAP; + + if (phydev->asym_pause) + remote_advertising |= LPA_PAUSE_ASYM; + + flowctl = mii_resolve_flowctrl_fdx(local_advertising, + remote_advertising); + tx_pause = flowctl & FLOW_CTRL_TX; + rx_pause = flowctl & FLOW_CTRL_RX; + + if (phydev->duplex == HCLGE_MAC_HALF) { + tx_pause = 0; + rx_pause = 0; + } + + return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); +} + static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en) { @@ -4510,6 +4882,41 @@ static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, } } +static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg, + u32 rx_en, u32 tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + u32 fc_autoneg; + + /* Only support flow control negotiation for netdev with + * phy attached for now. + */ + if (!phydev) + return -EOPNOTSUPP; + + fc_autoneg = hclge_get_autoneg(handle); + if (auto_neg != fc_autoneg) { + dev_info(&hdev->pdev->dev, + "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); + return -EOPNOTSUPP; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + dev_info(&hdev->pdev->dev, + "Priority flow control enabled. Cannot set link flow control.\n"); + return -EOPNOTSUPP; + } + + hclge_set_flowctrl_adv(hdev, rx_en, tx_en); + + if (!fc_autoneg) + return hclge_cfg_pauseparam(hdev, rx_en, tx_en); + + return phy_start_aneg(phydev); +} + static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex) { @@ -5002,6 +5409,136 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) ae_dev->priv = NULL; } +static u32 hclge_get_max_channels(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps); +} + +static void hclge_get_channels(struct hnae3_handle *handle, + struct ethtool_channels *ch) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + ch->max_combined = hclge_get_max_channels(handle); + ch->other_count = 1; + ch->max_other = 1; + ch->combined_count = vport->alloc_tqps; +} + +static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, + u16 *free_tqps, u16 *max_rss_size) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u16 temp_tqps = 0; + int i; + + for (i = 0; i < hdev->num_tqps; i++) { + if (!hdev->htqp[i].alloced) + temp_tqps++; + } + *free_tqps = temp_tqps; + *max_rss_size = hdev->rss_size_max; +} + +static void hclge_release_tqp(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int i; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = + container_of(kinfo->tqp[i], struct hclge_tqp, q); + + tqp->q.handle = NULL; + tqp->q.tqp_index = 0; + tqp->alloced = false; + } + + devm_kfree(&hdev->pdev->dev, kinfo->tqp); + kinfo->tqp = NULL; +} + +static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int cur_rss_size = kinfo->rss_size; + int cur_tqps = kinfo->num_tqps; + u16 tc_offset[HCLGE_MAX_TC_NUM]; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 tc_size[HCLGE_MAX_TC_NUM]; + u16 roundup_size; + u32 *rss_indir; + int ret, i; + + hclge_release_tqp(vport); + + ret = hclge_knic_setup(vport, new_tqps_num); + if (ret) { + dev_err(&hdev->pdev->dev, "setup nic fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_map_tqp_to_vport(hdev, vport); + if (ret) { + dev_err(&hdev->pdev->dev, "map vport tqp fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "tm schd init fail, ret =%d\n", ret); + return ret; + } + + roundup_size = roundup_pow_of_two(kinfo->rss_size); + roundup_size = ilog2(roundup_size); + /* Set the RSS TC mode according to the new RSS size */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + tc_valid[i] = 0; + + if (!(hdev->hw_tc_map & BIT(i))) + continue; + + tc_valid[i] = 1; + tc_size[i] = roundup_size; + tc_offset[i] = kinfo->rss_size * i; + } + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + if (ret) + return ret; + + /* Reinitializes the rss indirect table according to the new RSS size */ + rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + rss_indir[i] = i % kinfo->rss_size; + + ret = hclge_set_rss(handle, rss_indir, NULL, 0); + if (ret) + dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", + ret); + + kfree(rss_indir); + + if (!ret) + dev_info(&hdev->pdev->dev, + "Channels changed, rss_size from %d to %d, tqps from %d to %d", + cur_rss_size, kinfo->rss_size, + cur_tqps, kinfo->rss_size * kinfo->num_tc); + + return ret; +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -5035,6 +5572,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_autoneg = hclge_set_autoneg, .get_autoneg = hclge_get_autoneg, .get_pauseparam = hclge_get_pauseparam, + .set_pauseparam = hclge_set_pauseparam, .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, @@ -5043,9 +5581,15 @@ static const struct hnae3_ae_ops hclge_ops = { .get_sset_count = hclge_get_sset_count, .get_fw_version = hclge_get_fw_version, .get_mdix_mode = hclge_get_mdix_mode, + .enable_vlan_filter = hclge_enable_vlan_filter, .set_vlan_filter = hclge_set_port_vlan_filter, .set_vf_vlan_filter = hclge_set_vf_vlan_filter, + .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, + .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, + .set_channels = hclge_set_channels, + .get_channels = hclge_get_channels, + .get_flowctrl_adv = hclge_get_flowctrl_adv, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index fb043b54583d..eeb6c8d66e4e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -79,6 +79,10 @@ #define HCLGE_PHY_MDIX_STATUS_B (6) #define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) +/* Factor used to calculate offset and bitmap of VF num */ +#define HCLGE_VF_NUM_PER_CMD 64 +#define HCLGE_VF_NUM_PER_BYTE 8 + /* Reset related Registers */ #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_GLOBAL_RESET_REG 0x20A00 @@ -97,6 +101,11 @@ /* CMDQ register bits for RX event(=MBX event) */ #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 +#define HCLGE_MAC_DEFAULT_FRAME \ + (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) +#define HCLGE_MAC_MIN_FRAME 64 +#define HCLGE_MAC_MAX_FRAME 9728 + enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, @@ -108,6 +117,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_RST_HANDLING, HCLGE_STATE_MBX_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, + HCLGE_STATE_STATISTICS_UPDATING, HCLGE_STATE_MAX }; @@ -220,6 +230,7 @@ struct hclge_cfg { u8 tc_num; u16 tqp_desc_num; u16 rx_buf_len; + u16 rss_size_max; u8 phy_addr; u8 media_type; u8 mac_addr[ETH_ALEN]; @@ -376,14 +387,23 @@ struct hclge_mac_stats { u64 mac_tx_multi_pkt_num; u64 mac_tx_broad_pkt_num; u64 mac_tx_undersize_pkt_num; - u64 mac_tx_overrsize_pkt_num; + u64 mac_tx_oversize_pkt_num; u64 mac_tx_64_oct_pkt_num; u64 mac_tx_65_127_oct_pkt_num; u64 mac_tx_128_255_oct_pkt_num; u64 mac_tx_256_511_oct_pkt_num; u64 mac_tx_512_1023_oct_pkt_num; u64 mac_tx_1024_1518_oct_pkt_num; - u64 mac_tx_1519_max_oct_pkt_num; + u64 mac_tx_1519_2047_oct_pkt_num; + u64 mac_tx_2048_4095_oct_pkt_num; + u64 mac_tx_4096_8191_oct_pkt_num; + u64 mac_tx_8192_12287_oct_pkt_num; /* valid for GE MAC only */ + u64 mac_tx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 mac_tx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC */ + u64 mac_tx_12288_16383_oct_pkt_num; + u64 mac_tx_1519_max_good_oct_pkt_num; + u64 mac_tx_1519_max_bad_oct_pkt_num; + u64 mac_rx_total_pkt_num; u64 mac_rx_total_oct_num; u64 mac_rx_good_pkt_num; @@ -394,33 +414,52 @@ struct hclge_mac_stats { u64 mac_rx_multi_pkt_num; u64 mac_rx_broad_pkt_num; u64 mac_rx_undersize_pkt_num; - u64 mac_rx_overrsize_pkt_num; + u64 mac_rx_oversize_pkt_num; u64 mac_rx_64_oct_pkt_num; u64 mac_rx_65_127_oct_pkt_num; u64 mac_rx_128_255_oct_pkt_num; u64 mac_rx_256_511_oct_pkt_num; u64 mac_rx_512_1023_oct_pkt_num; u64 mac_rx_1024_1518_oct_pkt_num; - u64 mac_rx_1519_max_oct_pkt_num; - - u64 mac_trans_fragment_pkt_num; - u64 mac_trans_undermin_pkt_num; - u64 mac_trans_jabber_pkt_num; - u64 mac_trans_err_all_pkt_num; - u64 mac_trans_from_app_good_pkt_num; - u64 mac_trans_from_app_bad_pkt_num; - u64 mac_rcv_fragment_pkt_num; - u64 mac_rcv_undermin_pkt_num; - u64 mac_rcv_jabber_pkt_num; - u64 mac_rcv_fcs_err_pkt_num; - u64 mac_rcv_send_app_good_pkt_num; - u64 mac_rcv_send_app_bad_pkt_num; + u64 mac_rx_1519_2047_oct_pkt_num; + u64 mac_rx_2048_4095_oct_pkt_num; + u64 mac_rx_4096_8191_oct_pkt_num; + u64 mac_rx_8192_12287_oct_pkt_num;/* valid for GE MAC only */ + u64 mac_rx_8192_9216_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 mac_rx_9217_12287_oct_pkt_num; /* valid for LGE & CGE MAC only */ + u64 mac_rx_12288_16383_oct_pkt_num; + u64 mac_rx_1519_max_good_oct_pkt_num; + u64 mac_rx_1519_max_bad_oct_pkt_num; + + u64 mac_tx_fragment_pkt_num; + u64 mac_tx_undermin_pkt_num; + u64 mac_tx_jabber_pkt_num; + u64 mac_tx_err_all_pkt_num; + u64 mac_tx_from_app_good_pkt_num; + u64 mac_tx_from_app_bad_pkt_num; + u64 mac_rx_fragment_pkt_num; + u64 mac_rx_undermin_pkt_num; + u64 mac_rx_jabber_pkt_num; + u64 mac_rx_fcs_err_pkt_num; + u64 mac_rx_send_app_good_pkt_num; + u64 mac_rx_send_app_bad_pkt_num; }; +#define HCLGE_STATS_TIMER_INTERVAL (60 * 5) struct hclge_hw_stats { struct hclge_mac_stats mac_stats; struct hclge_64_bit_stats all_64_bit_stats; struct hclge_32_bit_stats all_32_bit_stats; + u32 stats_timer; +}; + +struct hclge_vlan_type_cfg { + u16 rx_ot_fst_vlan_type; + u16 rx_ot_sec_vlan_type; + u16 rx_in_fst_vlan_type; + u16 rx_in_sec_vlan_type; + u16 tx_ot_vlan_type; + u16 tx_in_vlan_type; }; struct hclge_dev { @@ -509,6 +548,26 @@ struct hclge_dev { enum hclge_mta_dmac_sel_type mta_mac_sel_type; bool enable_mta; /* Mutilcast filter enable */ bool accept_mta_mc; /* Whether accept mta filter multicast */ + + struct hclge_vlan_type_cfg vlan_type_cfg; +}; + +/* VPort level vlan tag configuration for TX direction */ +struct hclge_tx_vtag_cfg { + bool accept_tag; /* Whether accept tagged packet from host */ + bool accept_untag; /* Whether accept untagged packet from host */ + bool insert_tag1_en; /* Whether insert inner vlan tag */ + bool insert_tag2_en; /* Whether insert outer vlan tag */ + u16 default_tag1; /* The default inner vlan tag to insert */ + u16 default_tag2; /* The default outer vlan tag to insert */ +}; + +/* VPort level vlan tag configuration for RX direction */ +struct hclge_rx_vtag_cfg { + bool strip_tag1_en; /* Whether strip inner vlan tag */ + bool strip_tag2_en; /* Whether strip outer vlan tag */ + bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ + bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_vport { @@ -523,6 +582,9 @@ struct hclge_vport { u16 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + struct hclge_tx_vtag_cfg txvlan_cfg; + struct hclge_rx_vtag_cfg rxvlan_cfg; + int vport_id; struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; @@ -565,4 +627,5 @@ int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_mbx_handler(struct hclge_dev *hdev); void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); +int hclge_cfg_flowctrl(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 7069e9408d7d..c1dea3a47bdd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -17,6 +17,7 @@ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ SUPPORTED_TP | \ SUPPORTED_Pause | \ + SUPPORTED_Asym_Pause | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ PHY_1000BT_FEATURES) @@ -183,6 +184,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); if (ret) netdev_err(netdev, "failed to adjust link.\n"); + + ret = hclge_cfg_flowctrl(hdev); + if (ret) + netdev_err(netdev, "failed to configure flow control.\n"); } int hclge_mac_start_phy(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 7bfa2e5497cb..36bd79a77940 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -23,8 +23,8 @@ enum hclge_shaper_level { HCLGE_SHAPER_LVL_PF = 1, }; -#define HCLGE_SHAPER_BS_U_DEF 1 -#define HCLGE_SHAPER_BS_S_DEF 4 +#define HCLGE_SHAPER_BS_U_DEF 5 +#define HCLGE_SHAPER_BS_S_DEF 20 #define HCLGE_ETHER_MAX_RATE 100000 @@ -112,7 +112,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, return 0; } -static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) { struct hclge_desc desc; @@ -138,6 +138,46 @@ static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_mac_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, + u8 pause_trans_gap, u16 pause_trans_time) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + + pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); + + ether_addr_copy(pause_param->mac_addr, addr); + pause_param->pause_trans_gap = pause_trans_gap; + pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + u16 trans_time; + u8 trans_gap; + int ret; + + pause_param = (struct hclge_cfg_pause_param_cmd *)&desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + return ret; + + trans_gap = pause_param->pause_trans_gap; + trans_time = le16_to_cpu(pause_param->pause_trans_time); + + return hclge_mac_pause_param_cfg(hdev, mac_addr, trans_gap, + trans_time); +} + static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) { u8 tc; @@ -1056,6 +1096,15 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) return hclge_tm_schd_mode_hw(hdev); } +static int hclge_mac_pause_param_setup_hw(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + return hclge_mac_pause_param_cfg(hdev, mac->mac_addr, + HCLGE_DEFAULT_PAUSE_TRANS_GAP, + HCLGE_DEFAULT_PAUSE_TRANS_TIME); +} + static int hclge_pfc_setup_hw(struct hclge_dev *hdev) { u8 enable_bitmap = 0; @@ -1102,8 +1151,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) int ret; u8 i; - if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) - return hclge_mac_pause_setup_hw(hdev); + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + ret = hclge_mac_pause_setup_hw(hdev); + if (ret) + return ret; + + return hclge_mac_pause_param_setup_hw(hdev); + } /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index bf59961918ab..5401e7559437 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -18,6 +18,9 @@ #define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) +#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0xFF +#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF + /* SP or DWRR */ #define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) #define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) @@ -99,6 +102,13 @@ struct hclge_pfc_en_cmd { u8 pri_en_bitmap; }; +struct hclge_cfg_pause_param_cmd { + u8 mac_addr[ETH_ALEN]; + u8 pause_trans_gap; + u8 rsvd; + __le16 pause_trans_time; +}; + struct hclge_port_shapping_cmd { __le32 port_shapping_para; }; @@ -118,4 +128,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); +int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx); +int hclge_mac_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 31866057bc7d..655f522e44aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -49,7 +49,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) return status; } tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - le32_to_cpu(desc.data[4]); + le32_to_cpu(desc.data[1]); hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS, true); @@ -63,7 +63,7 @@ static int hclgevf_tqps_update_stats(struct hnae3_handle *handle) return status; } tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - le32_to_cpu(desc.data[4]); + le32_to_cpu(desc.data[1]); } return 0; @@ -105,7 +105,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < hdev->num_tqps; i++) { struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "txq#%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } @@ -113,7 +113,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data) for (i = 0; i < hdev->num_tqps; i++) { struct hclgevf_tqp *tqp = container_of(handle->kinfo.tqp[i], struct hclgevf_tqp, q); - snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + snprintf(buff, ETH_GSTRING_LEN, "rxq#%d_pktnum_rcd", tqp->index); buff += ETH_GSTRING_LEN; } @@ -1288,7 +1288,7 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) pci_set_master(pdev); hw = &hdev->hw; hw->hdev = hdev; - hw->io_base = pci_iomap(pdev, 2, 0);; + hw->io_base = pci_iomap(pdev, 2, 0); if (!hw->io_base) { dev_err(&pdev->dev, "can't map configuration register space\n"); ret = -ENOMEM; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6911b7cc06c5..461014b7ccdd 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2453,6 +2453,12 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + /* When booting a kdump kernel we can hit pending interrupts + * prior to completing driver initialization. + */ + if (unlikely(adapter->state != VNIC_OPEN)) + return IRQ_NONE; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index d7bdea79e9fa..8fd2458060a0 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -331,7 +331,8 @@ struct e1000_adapter { enum e1000_state_t { __E1000_TESTING, __E1000_RESETTING, - __E1000_DOWN + __E1000_DOWN, + __E1000_DISABLED }; #undef pr_fmt diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3b3983a1ffbb..dc71e87c3260 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1838,8 +1838,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, p = (char *)adapter + stat->stat_offset; break; default: - WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", - stat->type, i); + netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); continue; } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 1982f7917a8d..3dd4aeb2706d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter, static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; - struct e1000_adapter *adapter; + struct e1000_adapter *adapter = NULL; struct e1000_hw *hw; static int cards_found; @@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 tmp = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; int bars, need_ioport; + bool disable_dev = false; /* do not allocate ioport bars when not needed */ need_ioport = e1000_is_need_ioport(pdev); @@ -1259,11 +1260,13 @@ err_mdio_ioremap: iounmap(hw->ce4100_gbe_mdio_base_virt); iounmap(hw->hw_addr); err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); free_netdev(netdev); err_alloc_etherdev: pci_release_selected_regions(pdev, bars); err_pci_reg: - pci_disable_device(pdev); + if (!adapter || disable_dev) + pci_disable_device(pdev); return err; } @@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + bool disable_dev; e1000_down_and_stop(adapter); e1000_release_manageability(adapter); @@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev) iounmap(hw->flash_address); pci_release_selected_regions(pdev, adapter->bars); + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); free_netdev(netdev); - pci_disable_device(pdev); + if (disable_dev) + pci_disable_device(pdev); } /** @@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) if (netif_running(netdev)) e1000_free_irq(adapter); - pci_disable_device(pdev); + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); return 0; } @@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev) pr_err("Cannot enable PCI device from suspend\n"); return err; } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); @@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) e1000_down(adapter); - pci_disable_device(pdev); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); /* Request a slot slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) pr_err("Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); pci_set_master(pdev); pci_enable_wake(pdev, PCI_D3hot, 0); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index d6d4ed7acf03..31277d3bb7dc 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1367,6 +1367,9 @@ out: * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) { @@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b0188b8f91ba..c5776340517c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -198,7 +198,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, - /* Pipeline Personalization Profile */ + /* Dynamic Device Personalization */ i40e_aqc_opc_write_personalization_profile = 0x0270, i40e_aqc_opc_get_personalization_profile_list = 0x0271, @@ -1594,7 +1594,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); -/* Pipeline Personalization Profile */ +/* Dynamic Device Personalization */ struct i40e_aqc_write_personalization_profile { u8 flags; u8 reserved[3]; @@ -1605,7 +1605,7 @@ struct i40e_aqc_write_personalization_profile { I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); -struct i40e_aqc_write_ppp_resp { +struct i40e_aqc_write_ddp_resp { __le32 error_offset; __le32 error_info; __le32 addr_high; @@ -1614,8 +1614,8 @@ struct i40e_aqc_write_ppp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_PPP_GET_CONF 0x1 -#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2 +#define I40E_AQC_GET_DDP_GET_CONF 0x1 +#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 095965f268bd..40c5f7628aa1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -5236,7 +5236,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, } /** - * i40e_aq_write_ppp - Write pipeline personalization profile (ppp) + * i40e_aq_write_ddp - Write dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes @@ -5246,7 +5246,7 @@ i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, struct i40e_asq_cmd_details *cmd_details) @@ -5255,7 +5255,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, struct i40e_aqc_write_personalization_profile *cmd = (struct i40e_aqc_write_personalization_profile *) &desc.params.raw; - struct i40e_aqc_write_ppp_resp *resp; + struct i40e_aqc_write_ddp_resp *resp; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, @@ -5271,7 +5271,7 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { - resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw; + resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; if (error_offset) *error_offset = le32_to_cpu(resp->error_offset); if (error_info) @@ -5282,14 +5282,14 @@ i40e_status_code i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, } /** - * i40e_aq_get_ppp_list - Read pipeline personalization profile (ppp) + * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff, +i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, struct i40e_asq_cmd_details *cmd_details) { @@ -5364,11 +5364,6 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 offset = 0, info = 0; u32 i; - if (!track_id) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0."); - return I40E_NOT_SUPPORTED; - } - dev_cnt = profile->device_table_count; for (i = 0; i < dev_cnt; i++) { @@ -5378,7 +5373,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, break; } if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP"); + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); return I40E_ERR_DEVICE_NOT_SUPPORTED; } @@ -5397,7 +5392,7 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, sizeof(struct i40e_profile_section_header); /* Write profile */ - status = i40e_aq_write_ppp(hw, (void *)sec, (u16)section_size, + status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, @@ -5439,10 +5434,10 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; - pinfo->op = I40E_PPP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE); + pinfo->op = I40E_DDP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end, + status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5f6cf7212d4f..34173f821fd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -126,6 +126,10 @@ static const struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), I40E_PF_STAT("link_xon_tx", stats.link_xon_tx), I40E_PF_STAT("link_xoff_tx", stats.link_xoff_tx), + I40E_PF_STAT("priority_xon_rx", stats.priority_xon_rx), + I40E_PF_STAT("priority_xoff_rx", stats.priority_xoff_rx), + I40E_PF_STAT("priority_xon_tx", stats.priority_xon_tx), + I40E_PF_STAT("priority_xoff_tx", stats.priority_xoff_tx), I40E_PF_STAT("rx_size_64", stats.rx_size_64), I40E_PF_STAT("rx_size_127", stats.rx_size_127), I40E_PF_STAT("rx_size_255", stats.rx_size_255), @@ -1585,6 +1589,8 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].desc = NULL; rx_rings[i].rx_bi = NULL; + /* Clear cloned XDP RX-queue info before setup call */ + memset(&rx_rings[i].xdp_rxq, 0, sizeof(rx_rings[i].xdp_rxq)); /* this is to allow wr32 to have something to write to * during early allocation of Rx buffers */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 321d8be80871..2ab22eba0c7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -47,8 +47,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 2 -#define DRV_VERSION_MINOR 1 -#define DRV_VERSION_BUILD 14 +#define DRV_VERSION_MINOR 3 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p) else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + /* Copy the address first, so that we avoid a possible race with + * .set_rx_mode(). If we copy after changing the address in the filter + * list, we might open ourselves to a narrow race window where + * .set_rx_mode could delete our dev_addr filter and prevent traffic + * from passing. + */ + ether_addr_copy(netdev->dev_addr, addr->sa_data); + spin_lock_bh(&vsi->mac_filter_hash_lock); i40e_del_mac_filter(vsi, netdev->dev_addr); i40e_add_mac_filter(vsi, addr->sa_data); spin_unlock_bh(&vsi->mac_filter_hash_lock); - ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; @@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; + /* Under some circumstances, we might receive a request to delete + * our own device address from our uc list. Because we store the + * device address in the VSI's MAC/VLAN filter list, we need to ignore + * such requests and not delete our device address from this list. + */ + if (ether_addr_equal(addr, netdev->dev_addr)) + return 0; + i40e_del_mac_filter(vsi, addr); return 0; @@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) /* Set Bit 7 to be valid */ mode = I40E_AQ_SET_SWITCH_BIT7_VALID; - /* Set L4type to both TCP and UDP support */ - mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; + /* Set L4type for TCP support */ + mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP; /* Set cloud filter mode */ mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; @@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, is_valid_ether_addr(filter->src_mac)) || (is_multicast_ether_addr(filter->dst_mac) && is_multicast_ether_addr(filter->src_mac))) - return -EINVAL; + return -EOPNOTSUPP; - /* Make sure port is specified, otherwise bail out, for channel - * specific cloud filter needs 'L4 port' to be non-zero + /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP + * ports are not supported via big buffer now. */ - if (!filter->dst_port) - return -EINVAL; + if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) + return -EOPNOTSUPP; /* adding filter using src_port/src_ip is not supported at this stage */ if (filter->src_port || filter->src_ipv4 || !ipv6_addr_any(&filter->ip.v6.src_ip6)) - return -EINVAL; + return -EOPNOTSUPP; /* copy element needed to add cloud filter from filter */ i40e_set_cld_element(filter, &cld_filter.element); @@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, is_multicast_ether_addr(filter->src_mac)) { /* MAC + IP : unsupported mode */ if (filter->dst_ipv4) - return -EINVAL; + return -EOPNOTSUPP; /* since we validated that L4 port must be valid before * we get here, start with respective "flags" value @@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, if (tc < 0) { dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); - return -EINVAL; + return -EOPNOTSUPP; } if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 7689c2ee0d46..425713fb72e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -389,7 +389,7 @@ static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { i40e_status ret_code; - u16 read_size = *words; + u16 read_size; bool last_cmd = false; u16 words_read = 0; u16 i = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 3bb6659db822..b3cc89cc3a86 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -343,6 +343,37 @@ static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) return i40e_ptype_lookup[ptype]; } +/** + * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition + * @link_speed: the speed to convert + * + * Returns the link_speed in terms of the virtchnl interface, for use in + * converting link_speed as reported by the AdminQ into the format used for + * talking to virtchnl devices. If we can't represent the link speed properly, + * report LINK_SPEED_UNKNOWN. + **/ +static inline enum virtchnl_link_speed +i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) +{ + switch (link_speed) { + case I40E_LINK_SPEED_100MB: + return VIRTCHNL_LINK_SPEED_100MB; + case I40E_LINK_SPEED_1GB: + return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_10GB: + return VIRTCHNL_LINK_SPEED_10GB; + case I40E_LINK_SPEED_40GB: + return VIRTCHNL_LINK_SPEED_40GB; + case I40E_LINK_SPEED_20GB: + return VIRTCHNL_LINK_SPEED_20GB; + case I40E_LINK_SPEED_25GB: + return VIRTCHNL_LINK_SPEED_25GB; + case I40E_LINK_SPEED_UNKNOWN: + default: + return VIRTCHNL_LINK_SPEED_UNKNOWN; + } +} + /* prototype for functions used for SW locks */ /* i40e_common for VF drivers*/ @@ -400,13 +431,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); -i40e_status i40e_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + struct i40e_asq_cmd_details * + cmd_details); +i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details); + struct i40e_asq_cmd_details * + cmd_details); struct i40e_generic_seg_header * i40e_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 4566d66ffc7c..40edb6e5e6f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -27,6 +27,7 @@ #include <linux/prefetch.h> #include <net/busy_poll.h> #include <linux/bpf_trace.h> +#include <net/xdp.h> #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" @@ -1236,6 +1237,8 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) void i40e_free_rx_resources(struct i40e_ring *rx_ring) { i40e_clean_rx_ring(rx_ring); + if (rx_ring->vsi->type == I40E_VSI_MAIN) + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); rx_ring->xdp_prog = NULL; kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; @@ -1256,6 +1259,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; + int err = -ENOMEM; int bi_size; /* warn if we are about to overwrite the pointer */ @@ -1283,13 +1287,21 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + /* XDP RX-queue info only needed for RX rings exposed to XDP */ + if (rx_ring->vsi->type == I40E_VSI_MAIN) { + err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, + rx_ring->queue_index); + if (err < 0) + goto err; + } + rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; return 0; err: kfree(rx_ring->rx_bi); rx_ring->rx_bi = NULL; - return -ENOMEM; + return err; } /** @@ -2068,11 +2080,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false, xdp_xmit = false; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; - struct xdp_buff xdp; unsigned int size; u16 vlan_tag; u8 rx_ptype; @@ -3047,10 +3061,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -3058,7 +3092,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index fbae1182e2ea..2d08760fc4ce 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -27,6 +27,8 @@ #ifndef _I40E_TXRX_H_ #define _I40E_TXRX_H_ +#include <net/xdp.h> + /* Interrupt Throttling and Rate Limiting Goodies */ #define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */ @@ -428,6 +430,7 @@ struct i40e_ring { */ struct i40e_channel *ch; + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 0e8568719b4e..5a708c363d99 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1502,19 +1502,19 @@ struct i40e_lldp_variables { #define I40E_FLEX_57_SHIFT 6 #define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) -/* Version format for PPP */ -struct i40e_ppp_version { +/* Version format for Dynamic Device Personalization(DDP) */ +struct i40e_ddp_version { u8 major; u8 minor; u8 update; u8 draft; }; -#define I40E_PPP_NAME_SIZE 32 +#define I40E_DDP_NAME_SIZE 32 /* Package header */ struct i40e_package_header { - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 segment_count; u32 segment_offset[1]; }; @@ -1526,16 +1526,16 @@ struct i40e_generic_seg_header { #define SEGMENT_TYPE_I40E 0x00000011 #define SEGMENT_TYPE_X722 0x00000012 u32 type; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 size; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_metadata_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 track_id; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_device_id_entry { @@ -1545,8 +1545,8 @@ struct i40e_device_id_entry { struct i40e_profile_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; - char name[I40E_PPP_NAME_SIZE]; + struct i40e_ddp_version version; + char name[I40E_DDP_NAME_SIZE]; u32 device_table_count; struct i40e_device_id_entry device_table[1]; }; @@ -1573,11 +1573,11 @@ struct i40e_profile_section_header { struct i40e_profile_info { u32 track_id; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u8 op; -#define I40E_PPP_ADD_TRACKID 0x01 -#define I40E_PPP_REMOVE_TRACKID 0x02 +#define I40E_DDP_ADD_TRACKID 0x01 +#define I40E_DDP_REMOVE_TRACKID 0x02 u8 reserved[7]; - u8 name[I40E_PPP_NAME_SIZE]; + u8 name[I40E_DDP_NAME_SIZE]; }; #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 36cb8e068e85..e9309fb9084b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -81,12 +81,12 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = - (vf->link_up ? I40E_LINK_SPEED_40GB : 0); + (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0); } else { pfe.event_data.link_event.link_status = ls->link_info & I40E_AQ_LINK_UP; pfe.event_data.link_event.link_speed = - (enum virtchnl_link_speed)ls->link_speed; + i40e_virtchnl_link_speed(ls->link_speed); } i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); @@ -2749,6 +2749,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, break; case VIRTCHNL_OP_GET_VF_RESOURCES: ret = i40e_vc_get_vf_resources_msg(vf, msg); + i40e_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: i40e_vc_reset_vf_msg(vf); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 06b04572c518..435a112d09f5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -198,7 +198,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, - /* Pipeline Personalization Profile */ + /* Dynamic Device Personalization */ i40e_aqc_opc_write_personalization_profile = 0x0270, i40e_aqc_opc_get_personalization_profile_list = 0x0271, @@ -1562,7 +1562,7 @@ struct i40e_aqc_add_delete_mirror_rule_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); -/* Pipeline Personalization Profile */ +/* Dynamic Device Personalization */ struct i40e_aqc_write_personalization_profile { u8 flags; u8 reserved[3]; @@ -1573,7 +1573,7 @@ struct i40e_aqc_write_personalization_profile { I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); -struct i40e_aqc_write_ppp_resp { +struct i40e_aqc_write_ddp_resp { __le32 error_offset; __le32 error_info; __le32 addr_high; @@ -1582,8 +1582,8 @@ struct i40e_aqc_write_ppp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_PPP_GET_CONF 0x1 -#define I40E_AQC_GET_PPP_GET_RDPU_CONF 0x2 +#define I40E_AQC_GET_DDP_GET_CONF 0x1 +#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 7d70bf69b249..a94648429a5b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1202,7 +1202,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) } /** - * i40evf_aq_write_ppp - Write pipeline personalization profile (ppp) + * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes @@ -1212,7 +1212,7 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, struct i40e_asq_cmd_details *cmd_details) @@ -1221,7 +1221,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, struct i40e_aqc_write_personalization_profile *cmd = (struct i40e_aqc_write_personalization_profile *) &desc.params.raw; - struct i40e_aqc_write_ppp_resp *resp; + struct i40e_aqc_write_ddp_resp *resp; i40e_status status; i40evf_fill_default_direct_cmd_desc(&desc, @@ -1237,7 +1237,7 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); if (!status) { - resp = (struct i40e_aqc_write_ppp_resp *)&desc.params.raw; + resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; if (error_offset) *error_offset = le32_to_cpu(resp->error_offset); if (error_info) @@ -1248,16 +1248,16 @@ i40e_status_code i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, } /** - * i40evf_aq_get_ppp_list - Read pipeline personalization profile (ppp) + * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes * @cmd_details: pointer to command details structure or NULL **/ enum -i40e_status_code i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff, +i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_get_applied_profiles *cmd = @@ -1330,11 +1330,6 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, u32 offset = 0, info = 0; u32 i; - if (!track_id) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0."); - return I40E_NOT_SUPPORTED; - } - dev_cnt = profile->device_table_count; for (i = 0; i < dev_cnt; i++) { @@ -1344,7 +1339,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, break; } if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support PPP"); + i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); return I40E_ERR_DEVICE_NOT_SUPPORTED; } @@ -1363,7 +1358,7 @@ i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, sizeof(struct i40e_profile_section_header); /* Write profile */ - status = i40evf_aq_write_ppp(hw, (void *)sec, (u16)section_size, + status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, track_id, &offset, &info, NULL); if (status) { i40e_debug(hw, I40E_DEBUG_PACKAGE, @@ -1405,10 +1400,10 @@ i40evf_add_pinfo_to_list(struct i40e_hw *hw, sec->section.offset); pinfo->track_id = track_id; pinfo->version = profile->version; - pinfo->op = I40E_PPP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_PPP_NAME_SIZE); + pinfo->op = I40E_DDP_ADD_TRACKID; + memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - status = i40evf_aq_write_ppp(hw, (void *)sec, sec->data_end, + status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); return status; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index b624b5994075..47c429931a57 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -131,13 +131,15 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, u32 time, u32 interval); -i40e_status i40evf_aq_write_ppp(struct i40e_hw *hw, void *buff, +i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, u16 buff_size, u32 track_id, u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40evf_aq_get_ppp_list(struct i40e_hw *hw, void *buff, + struct i40e_asq_cmd_details * + cmd_details); +i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details); + struct i40e_asq_cmd_details * + cmd_details); struct i40e_generic_seg_header * i40evf_find_segment_in_package(u32 segment_type, struct i40e_package_header *pkg_header); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 50864f99446d..1ba29bb85b67 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > I40E_MAX_DATA_PER_TXD) { + int align_pad = -(stale->page_offset) & + (I40E_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > I40E_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 213b773dfad6..6afc31616e04 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -1422,19 +1422,19 @@ enum i40e_reset_type { #define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ I40E_FD_INSET_FLEX_WORD57_SHIFT) -/* Version format for PPP */ -struct i40e_ppp_version { +/* Version format for Dynamic Device Personalization(DDP) */ +struct i40e_ddp_version { u8 major; u8 minor; u8 update; u8 draft; }; -#define I40E_PPP_NAME_SIZE 32 +#define I40E_DDP_NAME_SIZE 32 /* Package header */ struct i40e_package_header { - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 segment_count; u32 segment_offset[1]; }; @@ -1446,16 +1446,16 @@ struct i40e_generic_seg_header { #define SEGMENT_TYPE_I40E 0x00000011 #define SEGMENT_TYPE_X722 0x00000012 u32 type; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 size; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_metadata_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u32 track_id; - char name[I40E_PPP_NAME_SIZE]; + char name[I40E_DDP_NAME_SIZE]; }; struct i40e_device_id_entry { @@ -1465,8 +1465,8 @@ struct i40e_device_id_entry { struct i40e_profile_segment { struct i40e_generic_seg_header header; - struct i40e_ppp_version version; - char name[I40E_PPP_NAME_SIZE]; + struct i40e_ddp_version version; + char name[I40E_DDP_NAME_SIZE]; u32 device_table_count; struct i40e_device_id_entry device_table[1]; }; @@ -1493,11 +1493,11 @@ struct i40e_profile_section_header { struct i40e_profile_info { u32 track_id; - struct i40e_ppp_version version; + struct i40e_ddp_version version; u8 op; -#define I40E_PPP_ADD_TRACKID 0x01 -#define I40E_PPP_REMOVE_TRACKID 0x02 +#define I40E_DDP_ADD_TRACKID 0x01 +#define I40E_DDP_REMOVE_TRACKID 0x02 u8 reserved[7]; - u8 name[I40E_PPP_NAME_SIZE]; + u8 name[I40E_DDP_NAME_SIZE]; }; #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index de0af521d602..47040ab2e298 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -199,6 +199,9 @@ struct i40evf_adapter { wait_queue_head_t down_waitqueue; struct i40e_q_vector *q_vectors; struct list_head vlan_filter_list; + struct list_head mac_filter_list; + /* Lock to protect accesses to MAC and VLAN lists */ + spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; int num_active_queues; int num_req_queues; @@ -206,7 +209,6 @@ struct i40evf_adapter { /* TX */ struct i40e_ring *tx_rings; u32 tx_timeout_count; - struct list_head mac_filter_list; u32 tx_desc_count; /* RX */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7b2a4eba92e2..f92587aba3c7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -45,8 +45,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 3 -#define DRV_VERSION_MINOR 0 -#define DRV_VERSION_BUILD 1 +#define DRV_VERSION_MINOR 2 +#define DRV_VERSION_BUILD 2 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -276,37 +276,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask) if (mask & BIT(i - 1)) { wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK); - } - } -} - -/** - * i40evf_fire_sw_int - Generate SW interrupt for specified vectors - * @adapter: board private structure - * @mask: bitmap of vectors to trigger - **/ -static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask) -{ - struct i40e_hw *hw = &adapter->hw; - int i; - u32 dyn_ctl; - - if (mask & 1) { - dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01); - dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; - wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl); - } - for (i = 1; i < adapter->num_msix_vectors; i++) { - if (mask & BIT(i)) { - dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1)); - dyn_ctl |= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; - wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), dyn_ctl); + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); } } } @@ -337,15 +307,10 @@ static irqreturn_t i40evf_msix_aq(int irq, void *data) struct net_device *netdev = data; struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40e_hw *hw = &adapter->hw; - u32 val; /* handle non-queue interrupts, these reads clear the registers */ - val = rd32(hw, I40E_VFINT_ICR01); - val = rd32(hw, I40E_VFINT_ICR0_ENA1); - - val = rd32(hw, I40E_VFINT_DYN_CTL01) | - I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; - wr32(hw, I40E_VFINT_DYN_CTL01, val); + rd32(hw, I40E_VFINT_ICR01); + rd32(hw, I40E_VFINT_ICR0_ENA1); /* schedule work on the private workqueue */ schedule_work(&adapter->adminq_task); @@ -706,7 +671,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) * @adapter: board private structure * @vlan: vlan tag * - * Returns ptr to the filter object or NULL + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. **/ static struct i40evf_vlan_filter *i40evf_find_vlan(struct i40evf_adapter *adapter, u16 vlan) @@ -731,14 +697,8 @@ static struct i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) { struct i40evf_vlan_filter *f = NULL; - int count = 50; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) - goto out; - } + spin_lock_bh(&adapter->mac_vlan_list_lock); f = i40evf_find_vlan(adapter, vlan); if (!f) { @@ -755,8 +715,7 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan) } clearout: - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); -out: + spin_unlock_bh(&adapter->mac_vlan_list_lock); return f; } @@ -768,21 +727,16 @@ out: static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan) { struct i40evf_vlan_filter *f; - int count = 50; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) - return; - } + spin_lock_bh(&adapter->mac_vlan_list_lock); f = i40evf_find_vlan(adapter, vlan); if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER; } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + + spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** @@ -824,7 +778,8 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev, * @adapter: board private structure * @macaddr: the MAC address * - * Returns ptr to the filter object or NULL + * Returns ptr to the filter object or NULL. Must be called while holding the + * mac_vlan_list_lock. **/ static struct i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter, @@ -854,26 +809,17 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, u8 *macaddr) { struct i40evf_mac_filter *f; - int count = 50; if (!macaddr) return NULL; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) - return NULL; - } + spin_lock_bh(&adapter->mac_vlan_list_lock); f = i40evf_find_filter(adapter, macaddr); if (!f) { f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) { - clear_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section); - return NULL; - } + if (!f) + goto clearout; ether_addr_copy(f->macaddr, macaddr); @@ -884,7 +830,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, f->remove = false; } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); +clearout: + spin_unlock_bh(&adapter->mac_vlan_list_lock); return f; } @@ -911,12 +858,16 @@ static int i40evf_set_mac(struct net_device *netdev, void *p) if (adapter->flags & I40EVF_FLAG_ADDR_SET_BY_PF) return -EPERM; + spin_lock_bh(&adapter->mac_vlan_list_lock); + f = i40evf_find_filter(adapter, hw->mac.addr); if (f) { f->remove = true; adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + f = i40evf_add_filter(adapter, addr->sa_data); if (f) { ether_addr_copy(hw->mac.addr, addr->sa_data); @@ -937,7 +888,6 @@ static void i40evf_set_rx_mode(struct net_device *netdev) struct netdev_hw_addr *uca; struct netdev_hw_addr *mca; struct netdev_hw_addr *ha; - int count = 50; /* add addr if not already in the filter list */ netdev_for_each_uc_addr(uca, netdev) { @@ -947,16 +897,8 @@ static void i40evf_set_rx_mode(struct net_device *netdev) i40evf_add_filter(adapter, mca->addr); } - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { - udelay(1); - if (--count == 0) { - dev_err(&adapter->pdev->dev, - "Failed to get lock in %s\n", __func__); - return; - } - } - /* remove filter if not in netdev list */ + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { netdev_for_each_mc_addr(mca, netdev) if (ether_addr_equal(mca->addr, f->macaddr)) @@ -995,7 +937,7 @@ bottom_of_search_loop: adapter->flags & I40EVF_FLAG_ALLMULTI_ON) adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + spin_unlock_bh(&adapter->mac_vlan_list_lock); } /** @@ -1058,6 +1000,8 @@ static void i40evf_configure(struct i40evf_adapter *adapter) /** * i40evf_up_complete - Finish the last steps of bringing up a connection * @adapter: board private structure + * + * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. **/ static void i40evf_up_complete(struct i40evf_adapter *adapter) { @@ -1075,6 +1019,8 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter) /** * i40e_down - Shutdown the connection processing * @adapter: board private structure + * + * Expects to be called while holding the __I40EVF_IN_CRITICAL_TASK bit lock. **/ void i40evf_down(struct i40evf_adapter *adapter) { @@ -1084,16 +1030,14 @@ void i40evf_down(struct i40evf_adapter *adapter) if (adapter->state <= __I40EVF_DOWN_PENDING) return; - while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, - &adapter->crit_section)) - usleep_range(500, 1000); - netif_carrier_off(netdev); netif_tx_disable(netdev); adapter->link_up = false; i40evf_napi_disable_all(adapter); i40evf_irq_disable(adapter); + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* remove all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->remove = true; @@ -1102,6 +1046,9 @@ void i40evf_down(struct i40evf_adapter *adapter) list_for_each_entry(f, &adapter->vlan_filter_list, list) { f->remove = true; } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) && adapter->state != __I40EVF_RESETTING) { /* cancel any current operation */ @@ -1115,7 +1062,6 @@ void i40evf_down(struct i40evf_adapter *adapter) adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); } @@ -1770,13 +1716,6 @@ static void i40evf_watchdog_task(struct work_struct *work) if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); watchdog_done: - if (adapter->state == __I40EVF_RUNNING) { - i40evf_irq_enable_queues(adapter, ~0); - i40evf_fire_sw_int(adapter, 0xFF); - } else { - i40evf_fire_sw_int(adapter, 0x1); - } - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); restart_watchdog: if (adapter->state == __I40EVF_REMOVE) @@ -1796,7 +1735,11 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED; - if (netif_running(adapter->netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + if (adapter->state == __I40EVF_RUNNING) { set_bit(__I40E_VSI_DOWN, adapter->vsi.state); netif_carrier_off(adapter->netdev); netif_tx_disable(adapter->netdev); @@ -1808,6 +1751,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) i40evf_free_all_rx_resources(adapter); } + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* Delete all of the filters, both MAC and VLAN. */ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) { list_del(&f->list); @@ -1819,6 +1764,8 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter) kfree(fv); } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); i40evf_free_queues(adapter); @@ -1854,6 +1801,7 @@ static void i40evf_reset_task(struct work_struct *work) struct i40evf_mac_filter *f; u32 reg_val; int i = 0, err; + bool running; while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section)) @@ -1913,7 +1861,13 @@ static void i40evf_reset_task(struct work_struct *work) } continue_reset: - if (netif_running(netdev)) { + /* We don't use netif_running() because it may be true prior to + * ndo_open() returning, so we can't assume it means all our open + * tasks have finished, since we're not holding the rtnl_lock here. + */ + running = (adapter->state == __I40EVF_RUNNING); + + if (running) { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); adapter->link_up = false; @@ -1948,6 +1902,8 @@ continue_reset: adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; + spin_lock_bh(&adapter->mac_vlan_list_lock); + /* re-add all MAC filters */ list_for_each_entry(f, &adapter->mac_filter_list, list) { f->add = true; @@ -1956,15 +1912,19 @@ continue_reset: list_for_each_entry(vlf, &adapter->vlan_filter_list, list) { vlf->add = true; } + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; - clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); - clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); - if (netif_running(adapter->netdev)) { + /* We were running when the reset started, so we need to restore some + * state here. + */ + if (running) { /* allocate transmit descriptors */ err = i40evf_setup_all_tx_resources(adapter); if (err) @@ -1993,9 +1953,13 @@ continue_reset: adapter->state = __I40EVF_DOWN; wake_up(&adapter->down_waitqueue); } + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return; reset_err: + clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); i40evf_close(netdev); } @@ -2239,8 +2203,14 @@ static int i40evf_open(struct net_device *netdev) return -EIO; } - if (adapter->state != __I40EVF_DOWN) - return -EBUSY; + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + + if (adapter->state != __I40EVF_DOWN) { + err = -EBUSY; + goto err_unlock; + } /* allocate transmit descriptors */ err = i40evf_setup_all_tx_resources(adapter); @@ -2264,6 +2234,8 @@ static int i40evf_open(struct net_device *netdev) i40evf_irq_enable(adapter, true); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + return 0; err_req_irq: @@ -2273,6 +2245,8 @@ err_setup_rx: i40evf_free_all_rx_resources(adapter); err_setup_tx: i40evf_free_all_tx_resources(adapter); +err_unlock: + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); return err; } @@ -2296,6 +2270,9 @@ static int i40evf_close(struct net_device *netdev) if (adapter->state <= __I40EVF_DOWN_PENDING) return 0; + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); set_bit(__I40E_VSI_DOWN, adapter->vsi.state); if (CLIENT_ENABLED(adapter)) @@ -2305,6 +2282,8 @@ static int i40evf_close(struct net_device *netdev) adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in * i40evf_virtchnl_completion() after we get confirmation from the PF @@ -2943,6 +2922,8 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); + spin_lock_init(&adapter->mac_vlan_list_lock); + INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); @@ -2985,6 +2966,10 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(netdev); + while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, + &adapter->crit_section)) + usleep_range(500, 1000); + if (netif_running(netdev)) { rtnl_lock(); i40evf_down(adapter); @@ -2993,6 +2978,8 @@ static int i40evf_suspend(struct pci_dev *pdev, pm_message_t state) i40evf_free_misc_irq(adapter); i40evf_reset_interrupt_capability(adapter); + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + retval = pci_save_state(pdev); if (retval) return retval; @@ -3118,6 +3105,7 @@ static void i40evf_remove(struct pci_dev *pdev) i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); kfree(adapter->vf_res); + spin_lock_bh(&adapter->mac_vlan_list_lock); /* If we got removed before an up/down sequence, we've got a filter * hanging out there that we need to get rid of. */ @@ -3130,6 +3118,8 @@ static void i40evf_remove(struct pci_dev *pdev) kfree(f); } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 46c8b8a3907c..feb95b62a077 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -433,12 +433,16 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) adapter->current_op); return; } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->add) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_ADD_ETH_ADDR; @@ -456,8 +460,10 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } veal = kzalloc(len, GFP_KERNEL); - if (!veal) + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } veal->vsi_id = adapter->vsi_res->vsi_id; veal->num_elements = count; @@ -472,6 +478,9 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)veal, len); kfree(veal); @@ -498,12 +507,16 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) adapter->current_op); return; } + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->mac_filter_list, list) { if (f->remove) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_DEL_ETH_ADDR; @@ -520,8 +533,10 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) more = true; } veal = kzalloc(len, GFP_KERNEL); - if (!veal) + if (!veal) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } veal->vsi_id = adapter->vsi_res->vsi_id; veal->num_elements = count; @@ -537,6 +552,9 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)veal, len); kfree(veal); @@ -564,12 +582,15 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) return; } + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->add) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_ADD_VLAN; @@ -586,8 +607,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) more = true; } vvfl = kzalloc(len, GFP_KERNEL); - if (!vvfl) + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; @@ -602,6 +625,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -628,12 +654,15 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) return; } + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->remove) count++; } if (!count) { adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; } adapter->current_op = VIRTCHNL_OP_DEL_VLAN; @@ -650,8 +679,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) more = true; } vvfl = kzalloc(len, GFP_KERNEL); - if (!vvfl) + if (!vvfl) { + spin_unlock_bh(&adapter->mac_vlan_list_lock); return; + } vvfl->vsi_id = adapter->vsi_res->vsi_id; vvfl->num_elements = count; @@ -667,6 +698,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) } if (!more) adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER; + + spin_unlock_bh(&adapter->mac_vlan_list_lock); + i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len); kfree(vvfl); } @@ -705,8 +739,10 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) } if (!flags) { - adapter->flags &= ~I40EVF_FLAG_PROMISC_ON; - adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC; + adapter->flags &= ~(I40EVF_FLAG_PROMISC_ON | + I40EVF_FLAG_ALLMULTI_ON); + adapter->aq_required &= ~(I40EVF_FLAG_AQ_RELEASE_PROMISC | + I40EVF_FLAG_AQ_RELEASE_ALLMULTI); dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 468c3555a629..03a4df0bed96 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -53,6 +53,7 @@ #include <linux/dca.h> #endif +#include <net/xdp.h> #include <net/busy_poll.h> /* common prefix used by pr_<> macros */ @@ -332,7 +333,6 @@ struct ixgbe_ring { struct net_device *netdev; /* netdev ring belongs to */ struct bpf_prog *xdp_prog; struct device *dev; /* device for DMA mapping */ - struct ixgbe_fwd_adapter *l2_accel_priv; void *desc; /* descriptor ring memory */ union { struct ixgbe_tx_buffer *tx_buffer_info; @@ -371,6 +371,7 @@ struct ixgbe_ring { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; + struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; enum ixgbe_ring_f_enum { @@ -395,8 +396,7 @@ enum ixgbe_ring_f_enum { #define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) #define IXGBE_MAX_L2A_QUEUES 4 #define IXGBE_BAD_L2A_QUEUE 3 -#define IXGBE_MAX_MACVLANS 31 -#define IXGBE_MAX_DCBMACVLANS 8 +#define IXGBE_MAX_MACVLANS 63 struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ @@ -721,8 +721,7 @@ struct ixgbe_adapter { u16 bridge_mode; - u16 eeprom_verh; - u16 eeprom_verl; + char eeprom_id[NVM_VER_SIZE]; u16 eeprom_cap; u32 interrupt_event; @@ -766,7 +765,8 @@ struct ixgbe_adapter { #endif /*CONFIG_DEBUG_FS*/ u8 default_up; - unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + /* Bitmask indicating in use pools */ + DECLARE_BITMAP(fwd_bitmask, IXGBE_MAX_MACVLANS + 1); #define IXGBE_MAX_LINK_HANDLE 10 struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 9bef255f6a18..1948e4208fb4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -4028,6 +4028,118 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_get_orom_version - Return option ROM from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid option ROM version, nvm_ver->or_valid set to true + * else nvm_ver->or_valid is false. + **/ +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; + + nvm_ver->or_valid = false; + /* Option Rom may or may not be present. Start with pointer */ + hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); + + /* make sure offset is valid */ + if (offset == 0x0 || offset == NVM_INVALID_PTR) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); + hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); + + /* option rom exists and is valid */ + if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || + eeprom_cfg_blkl == NVM_VER_INVALID || + eeprom_cfg_blkh == NVM_VER_INVALID) + return; + + nvm_ver->or_valid = true; + nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; + nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | + (eeprom_cfg_blkh >> NVM_OROM_SHIFT); + nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; +} + +/** + * ixgbe_get_oem_prod_version Etrack ID from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * if valid OEM product version, nvm_ver->oem_valid set to true + * else nvm_ver->oem_valid is false. + **/ +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 rel_num, prod_ver, mod_len, cap, offset; + + nvm_ver->oem_valid = false; + hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); + + /* Return is offset to OEM Product Version block is invalid */ + if (offset == 0x0 && offset == NVM_INVALID_PTR) + return; + + /* Read product version block */ + hw->eeprom.ops.read(hw, offset, &mod_len); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); + + /* Return if OEM product version block is invalid */ + if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || + (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) + return; + + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); + hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); + + /* Return if version is invalid */ + if ((rel_num | prod_ver) == 0x0 || + rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) + return; + + nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; + nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; + nvm_ver->oem_release = rel_num; + nvm_ver->oem_valid = true; +} + +/** + * ixgbe_get_etk_id - Return Etrack ID from EEPROM + * + * @hw: pointer to hardware structure + * @nvm_ver: pointer to output structure + * + * word read errors will return 0xFFFF + **/ +void ixgbe_get_etk_id(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver) +{ + u16 etk_id_l, etk_id_h; + + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) + etk_id_l = NVM_VER_INVALID; + if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) + etk_id_h = NVM_VER_INVALID; + + /* The word order for the version format is determined by high order + * word bit 15. + */ + if ((etk_id_h & NVM_ETK_VALID) == 0) { + nvm_ver->etk_id = etk_id_h; + nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); + } else { + nvm_ver->etk_id = etk_id_l; + nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); + } +} + void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) { u32 rxctrl; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index a01409e2e06c..4d4c02366cb3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -139,6 +139,12 @@ extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); +void ixgbe_get_etk_id(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); +void ixgbe_get_orom_version(struct ixgbe_hw *hw, + struct ixgbe_nvm_version *nvm_ver); void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 0aad1c2a3667..3bcf58b27d8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1014,16 +1014,13 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u32 nvm_track_id; strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ixgbe_driver_version, sizeof(drvinfo->version)); - nvm_track_id = (adapter->eeprom_verh << 16) | - adapter->eeprom_verl; - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - nvm_track_id); + strlcpy(drvinfo->fw_version, adapter->eeprom_id, + sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); @@ -1156,6 +1153,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, memcpy(&temp_ring[i], adapter->rx_ring[i], sizeof(struct ixgbe_ring)); + /* Clear copied XDP RX-queue info */ + memset(&temp_ring[i].xdp_rxq, 0, + sizeof(temp_ring[i].xdp_rxq)); + temp_ring[i].count = new_rx_count; err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); if (err) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index a23c2b5411a0..6e6b3c175267 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -1034,11 +1034,8 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, ixgbe_driver_name, ixgbe_driver_version); /* Firmware Version */ - snprintf(info->firmware_version, - sizeof(info->firmware_version), - "0x%08x", - (adapter->eeprom_verh << 16) | - adapter->eeprom_verl); + strlcpy(info->firmware_version, adapter->eeprom_id, + sizeof(info->firmware_version)); /* Model */ if (hw->mac.type == ixgbe_mac_82599EB) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 8e2a957aca18..cceafbc3f1db 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -350,6 +350,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; + /* limit VMDq instances on the PF by number of Tx queues */ + vmdq_i = min_t(u16, vmdq_i, MAX_TX_QUEUES / tcs); + /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; @@ -512,12 +515,14 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE u16 fcoe_i = 0; #endif - bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); /* only proceed if SR-IOV is enabled */ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return false; + /* limit l2fwd RSS based on total Tx queue limit */ + rss_i = min_t(u16, rss_i, MAX_TX_QUEUES / vmdq_i); + /* Add starting offset to total pool count */ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; @@ -525,7 +530,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); /* 64 pool mode with 2 queues per pool */ - if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) { + if (vmdq_i > 32) { vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; rss_m = IXGBE_RSS_2Q_MASK; rss_i = min_t(u16, rss_i, 2); @@ -701,7 +706,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; adapter->num_xdp_queues = 0; - adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_pools = 1; adapter->num_rx_queues_per_pool = 1; #ifdef CONFIG_IXGBE_DCB diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7737a05c717c..e47e0c470508 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -192,6 +192,13 @@ static struct workqueue_struct *ixgbe_wq; static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); +static const struct net_device_ops ixgbe_netdev_ops; + +static bool netif_is_ixgbe(struct net_device *dev) +{ + return dev && (dev->netdev_ops == &ixgbe_netdev_ops); +} + static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, u32 reg, u16 *value) { @@ -1064,24 +1071,12 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; - u32 head, tail; + unsigned int head, tail; - if (ring->l2_accel_priv) - adapter = ring->l2_accel_priv->real_adapter; - else - adapter = netdev_priv(ring->netdev); - - hw = &adapter->hw; - head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); - tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + head = ring->next_to_clean; + tail = ring->next_to_use; - if (head != tail) - return (head < tail) ? - tail - head : (tail + ring->count - head); - - return 0; + return ((head <= tail) ? tail : tail + ring->count) - head; } static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) @@ -2318,12 +2313,14 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); bool xdp_xmit = false; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; while (likely(total_rx_packets < budget)) { union ixgbe_adv_rx_desc *rx_desc; struct ixgbe_rx_buffer *rx_buffer; struct sk_buff *skb; - struct xdp_buff xdp; unsigned int size; /* return some buffers to hardware, one at a time is too slow */ @@ -2515,13 +2512,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); } -enum latency_range { - lowest_latency = 0, - low_latency = 1, - bulk_latency = 2, - latency_invalid = 255 -}; - /** * ixgbe_update_itr - update the dynamic ITR value based on statistics * @q_vector: structure containing interrupt and ring information @@ -2534,8 +2524,6 @@ enum latency_range { * based on theoretical maximum wire speed and thresholds were set based * on testing data as well as attempting to minimize response time * while increasing bulk throughput. - * this functionality is controlled by the InterruptThrottleRate module - * parameter (see ixgbe_param.c) **/ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, struct ixgbe_ring_container *ring_container) @@ -3853,16 +3841,20 @@ static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; - unsigned int pf_pool = adapter->num_vfs; /* Write redirection table to HW */ for (i = 0; i < reta_entries; i++) { + u16 pool = adapter->num_rx_pools; + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; - if ((i & 3) == 3) { - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + if ((i & 3) != 3) + continue; + + while (pool--) + IXGBE_WRITE_REG(hw, + IXGBE_PFVFRETA(i >> 2, VMDQ_P(pool)), vfreta); - vfreta = 0; - } + vfreta = 0; } } @@ -3899,13 +3891,17 @@ static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - unsigned int pf_pool = adapter->num_vfs; int i, j; /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), - *(adapter->rss_key + i)); + for (i = 0; i < 10; i++) { + u16 pool = adapter->num_rx_pools; + + while (pool--) + IXGBE_WRITE_REG(hw, + IXGBE_PFVFRSSRK(i, VMDQ_P(pool)), + *(adapter->rss_key + i)); + } /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { @@ -3971,7 +3967,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { - unsigned int pf_pool = adapter->num_vfs; + u16 pool = adapter->num_rx_pools; /* Enable VF RSS mode */ mrqc |= IXGBE_MRQC_MULTIPLE_RSS; @@ -3981,7 +3977,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; - IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); + + while (pool--) + IXGBE_WRITE_REG(hw, + IXGBE_PFVFMRQC(VMDQ_P(pool)), + vfmrqc); } else { ixgbe_setup_reta(adapter); mrqc |= rss_field; @@ -4144,7 +4144,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; int rss_i = adapter->ring_feature[RING_F_RSS].indices; - u16 pool; + u16 pool = adapter->num_rx_pools; /* PSRTYPE must be initialized in non 82598 adapters */ u32 psrtype = IXGBE_PSRTYPE_TCPHDR | @@ -4161,7 +4161,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) else if (rss_i > 1) psrtype |= 1u << 29; - for_each_set_bit(pool, &adapter->fwd_bitmask, 32) + while (pool--) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } @@ -4488,8 +4488,9 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (ring->l2_accel_priv) + if (!netif_is_ixgbe(ring->netdev)) continue; + j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl &= ~IXGBE_RXDCTL_VME; @@ -4525,8 +4526,9 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; - if (ring->l2_accel_priv) + if (!netif_is_ixgbe(ring->netdev)) continue; + j = ring->reg_idx; vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); vlnctrl |= IXGBE_RXDCTL_VME; @@ -5277,29 +5279,6 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); } -static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) -{ - struct ixgbe_adapter *adapter = vadapter->real_adapter; - int rss_i = adapter->num_rx_queues_per_pool; - struct ixgbe_hw *hw = &adapter->hw; - u16 pool = vadapter->pool; - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_L2HDR | - IXGBE_PSRTYPE_IPV6HDR; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - if (rss_i > 3) - psrtype |= 2u << 29; - else if (rss_i > 1) - psrtype |= 1u << 29; - - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); -} - /** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from @@ -5363,7 +5342,6 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, usleep_range(10000, 20000); ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); ixgbe_clean_rx_ring(rx_ring); - rx_ring->l2_accel_priv = NULL; } static int ixgbe_fwd_ring_down(struct net_device *vdev, @@ -5381,10 +5359,8 @@ static int ixgbe_fwd_ring_down(struct net_device *vdev, adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; } - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { - adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->tx_ring[txbase + i]->netdev = adapter->netdev; - } return 0; @@ -5397,14 +5373,13 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, unsigned int rxbase, txbase, queues; int i, baseq, err = 0; - if (!test_bit(accel->pool, &adapter->fwd_bitmask)) + if (!test_bit(accel->pool, adapter->fwd_bitmask)) return 0; baseq = accel->pool * adapter->num_rx_queues_per_pool; - netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + netdev_dbg(vdev, "pool %i:%i queues %i:%i\n", accel->pool, adapter->num_rx_pools, - baseq, baseq + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); + baseq, baseq + adapter->num_rx_queues_per_pool); accel->netdev = vdev; accel->rx_base_queue = rxbase = baseq; @@ -5415,14 +5390,11 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { adapter->rx_ring[rxbase + i]->netdev = vdev; - adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); } - for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { + for (i = 0; i < adapter->num_rx_queues_per_pool; i++) adapter->tx_ring[txbase + i]->netdev = vdev; - adapter->tx_ring[txbase + i]->l2_accel_priv = accel; - } queues = min_t(unsigned int, adapter->num_rx_queues_per_pool, vdev->num_tx_queues); @@ -5435,10 +5407,10 @@ static int ixgbe_fwd_ring_up(struct net_device *vdev, goto fwd_queue_err; if (is_valid_ether_addr(vdev->dev_addr)) - ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); + ixgbe_add_mac_filter(adapter, vdev->dev_addr, + VMDQ_P(accel->pool)); - ixgbe_fwd_psrtype(accel); - ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); + ixgbe_macvlan_set_rx_mode(vdev, VMDQ_P(accel->pool), adapter); return err; fwd_queue_err: ixgbe_fwd_ring_down(vdev, accel); @@ -6302,7 +6274,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, } /* PF holds first pool slot */ - set_bit(0, &adapter->fwd_bitmask); + set_bit(0, adapter->fwd_bitmask); set_bit(__IXGBE_DOWN, &adapter->state); return 0; @@ -6444,6 +6416,11 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; + /* XDP RX-queue info */ + if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, + rx_ring->queue_index) < 0) + goto err; + rx_ring->xdp_prog = adapter->xdp_prog; return 0; @@ -6541,6 +6518,7 @@ void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring) ixgbe_clean_rx_ring(rx_ring); rx_ring->xdp_prog = NULL; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; @@ -6783,7 +6761,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; struct ixgbe_hw *hw = &adapter->hw; - u32 ctrl, fctrl; + u32 ctrl; u32 wufc = adapter->wol; #ifdef CONFIG_PM int retval = 0; @@ -6808,18 +6786,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) hw->mac.ops.stop_link_on_d3(hw); if (wufc) { + u32 fctrl; + ixgbe_set_rx_mode(netdev); /* enable the optics for 82599 SFP+ fiber as we can WoL */ if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); - /* turn on all-multi mode if wake on multicast is enabled */ - if (wufc & IXGBE_WUFC_MC) { - fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); - fctrl |= IXGBE_FCTRL_MPE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - } + /* enable the reception of multicast packets */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ctrl |= IXGBE_CTRL_GIO_DIS; @@ -7655,6 +7633,7 @@ sfp_out: static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + u32 cap_speed; u32 speed; bool autoneg = false; @@ -7667,16 +7646,14 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; - speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) { - hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + hw->mac.ops.get_link_capabilities(hw, &cap_speed, &autoneg); - /* setup the highest link when no autoneg */ - if (!autoneg) { - if (speed & IXGBE_LINK_SPEED_10GB_FULL) - speed = IXGBE_LINK_SPEED_10GB_FULL; - } - } + /* advertise highest capable link speed */ + if (!autoneg && (cap_speed & IXGBE_LINK_SPEED_10GB_FULL)) + speed = IXGBE_LINK_SPEED_10GB_FULL; + else + speed = cap_speed & (IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL); if (hw->mac.ops.setup_link) hw->mac.ops.setup_link(hw, speed, true); @@ -8869,7 +8846,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - bool pools; /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) @@ -8878,10 +8854,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (hw->mac.type == ixgbe_mac_82598EB && tc && tc < MAX_TRAFFIC_CLASS) return -EINVAL; - pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); - if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) - return -EBUSY; - /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the * hardware is not flexible enough to do this dynamically. @@ -9044,6 +9016,7 @@ static int get_macvlan_queue(struct net_device *upper, void *_data) static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, u8 *queue, u64 *action) { + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; unsigned int num_vfs = adapter->num_vfs, vf; struct upper_walk_data data; struct net_device *upper; @@ -9052,11 +9025,7 @@ static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, for (vf = 0; vf < num_vfs; ++vf) { upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); if (upper->ifindex == ifindex) { - if (adapter->num_rx_pools > 1) - *queue = vf * 2; - else - *queue = vf * adapter->num_rx_queues_per_pool; - + *queue = vf * __ALIGN_MASK(1, ~vmdq->mask); *action = vf + 1; *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; return 0; @@ -9823,6 +9792,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) struct ixgbe_fwd_adapter *fwd_adapter = NULL; struct ixgbe_adapter *adapter = netdev_priv(pdev); int used_pools = adapter->num_vfs + adapter->num_rx_pools; + int tcs = netdev_get_num_tc(pdev) ? : 1; unsigned int limit; int pool, err; @@ -9850,7 +9820,7 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) } if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && - adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || + adapter->num_rx_pools >= (MAX_TX_QUEUES / tcs)) || (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) return ERR_PTR(-EBUSY); @@ -9858,10 +9828,9 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) if (!fwd_adapter) return ERR_PTR(-ENOMEM); - pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); - adapter->num_rx_pools++; - set_bit(pool, &adapter->fwd_bitmask); - limit = find_last_bit(&adapter->fwd_bitmask, 32); + pool = find_first_zero_bit(adapter->fwd_bitmask, adapter->num_rx_pools); + set_bit(pool, adapter->fwd_bitmask); + limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools + 1); /* Enable VMDq flag so device will be set in VM mode */ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; @@ -9887,8 +9856,7 @@ fwd_add_err: /* unwind counter and free adapter struct */ netdev_info(pdev, "%s: dfwd hardware acceleration failed\n", vdev->name); - clear_bit(pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; + clear_bit(pool, adapter->fwd_bitmask); kfree(fwd_adapter); return ERR_PTR(err); } @@ -9899,10 +9867,9 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; unsigned int limit; - clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); - adapter->num_rx_pools--; + clear_bit(fwd_adapter->pool, adapter->fwd_bitmask); - limit = find_last_bit(&adapter->fwd_bitmask, 32); + limit = find_last_bit(adapter->fwd_bitmask, adapter->num_rx_pools); adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); @@ -9917,11 +9884,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) } ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); - netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", + netdev_dbg(pdev, "pool %i:%i queues %i:%i\n", fwd_adapter->pool, adapter->num_rx_pools, fwd_adapter->rx_base_queue, - fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, - adapter->fwd_bitmask); + fwd_adapter->rx_base_queue + + adapter->num_rx_queues_per_pool); kfree(fwd_adapter); } @@ -10235,6 +10202,41 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, } /** + * ixgbe_set_fw_version - Set FW version + * @adapter: the adapter private structure + * + * This function is used by probe and ethtool to determine the FW version to + * format to display. The FW version is taken from the EEPROM/NVM. + */ +static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_nvm_version nvm_ver; + + ixgbe_get_oem_prod_version(hw, &nvm_ver); + if (nvm_ver.oem_valid) { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "%x.%x.%x", nvm_ver.oem_major, nvm_ver.oem_minor, + nvm_ver.oem_release); + return; + } + + ixgbe_get_etk_id(hw, &nvm_ver); + ixgbe_get_orom_version(hw, &nvm_ver); + + if (nvm_ver.or_valid) { + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x, %d.%d.%d", nvm_ver.etk_id, nvm_ver.or_major, + nvm_ver.or_build, nvm_ver.or_patch); + return; + } + + /* Set ETrack ID format */ + snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), + "0x%08x", nvm_ver.etk_id); +} + +/** * ixgbe_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in ixgbe_pci_tbl @@ -10570,8 +10572,7 @@ skip_sriov: device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); /* save off EEPROM version number */ - hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); - hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); + ixgbe_set_fw_version(adapter); /* pick up the PCI bus settings for reporting later */ if (ixgbe_pcie_from_parent(hw)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 112d24c6c9ce..0085f4632966 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -227,9 +227,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; - struct ixgbe_hw *hw = &adapter->hw; - u32 gpie; - u32 vmdctl; int rss; /* set num VFs to 0 to prevent access to vfinfo */ @@ -271,18 +268,6 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) pci_disable_sriov(adapter->pdev); #endif - /* turn off device IOV mode */ - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); - gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); - gpie &= ~IXGBE_GPIE_VTMODE_MASK; - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - - /* set default pool back to 0 */ - vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - IXGBE_WRITE_FLUSH(hw); - /* Disable VMDq flag so device will be set in VM mode */ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; @@ -305,10 +290,9 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) { #ifdef CONFIG_PCI_IOV struct ixgbe_adapter *adapter = pci_get_drvdata(dev); - int err = 0; - u8 num_tc; - int i; int pre_existing_vfs = pci_num_vf(dev); + int err = 0, num_rx_pools, i, limit; + u8 num_tc; if (pre_existing_vfs && pre_existing_vfs != num_vfs) err = ixgbe_disable_sriov(adapter); @@ -331,22 +315,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) * other values out of range. */ num_tc = netdev_get_num_tc(adapter->netdev); + num_rx_pools = adapter->num_rx_pools; + limit = (num_tc > 4) ? IXGBE_MAX_VFS_8TC : + (num_tc > 1) ? IXGBE_MAX_VFS_4TC : IXGBE_MAX_VFS_1TC; - if (num_tc > 4) { - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_8TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); - return -EPERM; - } - } else if ((num_tc > 1) && (num_tc <= 4)) { - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_4TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); - return -EPERM; - } - } else { - if ((num_vfs + adapter->num_rx_pools) > IXGBE_MAX_VFS_1TC) { - e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); - return -EPERM; - } + if (num_vfs > (limit - num_rx_pools)) { + e_dev_err("Currently configured with %d TCs, and %d offloaded macvlans. Creating more than %d VFs is not allowed\n", + num_tc, num_rx_pools - 1, limit - num_rx_pools); + return -EPERM; } err = __ixgbe_enable_sriov(adapter, num_vfs); @@ -378,13 +354,15 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) int err; #ifdef CONFIG_PCI_IOV u32 current_flags = adapter->flags; + int prev_num_vf = pci_num_vf(dev); #endif err = ixgbe_disable_sriov(adapter); /* Only reinit if no error and state changed */ #ifdef CONFIG_PCI_IOV - if (!err && current_flags != adapter->flags) + if (!err && (current_flags != adapter->flags || + prev_num_vf != pci_num_vf(dev))) ixgbe_sriov_reinit(adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index ffa0ee5cd0f5..21eb79ae3c30 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -235,6 +235,45 @@ struct ixgbe_thermal_sensor_data { struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; }; +#define NVM_OROM_OFFSET 0x17 +#define NVM_OROM_BLK_LOW 0x83 +#define NVM_OROM_BLK_HI 0x84 +#define NVM_OROM_PATCH_MASK 0xFF +#define NVM_OROM_SHIFT 8 + +#define NVM_VER_MASK 0x00FF /* version mask */ +#define NVM_VER_SHIFT 8 /* version bit shift */ +#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ +#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ +#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ +#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ +#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ +#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ +#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ +#define NVM_ETK_OFF_HI 0x2E /* version high order word */ +#define NVM_ETK_SHIFT 16 /* high version word shift */ +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETK_VALID 0x8000 +#define NVM_INVALID_PTR 0xFFFF +#define NVM_VER_SIZE 32 /* version sting size */ + +struct ixgbe_nvm_version { + u32 etk_id; + u8 nvm_major; + u16 nvm_minor; + u8 nvm_id; + + bool oem_valid; + u8 oem_major; + u8 oem_minor; + u16 oem_release; + + bool or_valid; + u8 or_major; + u16 or_build; + u8 or_patch; +}; + /* Interrupt Registers */ #define IXGBE_EICR 0x00800 #define IXGBE_EICS 0x00808 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1f4a69134ade..573f743b556a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1896,10 +1896,6 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) unsigned int flags = netdev->flags; int xcast_mode; - xcast_mode = (flags & IFF_ALLMULTI) ? IXGBEVF_XCAST_MODE_ALLMULTI : - (flags & (IFF_BROADCAST | IFF_MULTICAST)) ? - IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE; - /* request the most inclusive mode we need */ if (flags & IFF_PROMISC) xcast_mode = IXGBEVF_XCAST_MODE_PROMISC; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index da6fb825afea..ebe5c9148935 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -60,7 +60,7 @@ config MVNETA depends on ARCH_MVEBU || COMPILE_TEST depends on HAS_DMA select MVMDIO - select FIXED_PHY + select PHYLINK ---help--- This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370, ARMADA 38x and diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index a539263cd79c..25e9a551cc8c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -28,7 +28,7 @@ #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> -#include <linux/phy_fixed.h> +#include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <net/hwbm.h> @@ -189,6 +189,7 @@ #define MVNETA_GMAC_CTRL_0 0x2c00 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVNETA_GMAC0_PORT_1000BASE_X BIT(1) #define MVNETA_GMAC0_PORT_ENABLE BIT(0) #define MVNETA_GMAC_CTRL_2 0x2c08 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) @@ -204,13 +205,19 @@ #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) +#define MVNETA_GMAC_AN_COMPLETE BIT(11) +#define MVNETA_GMAC_SYNC_OK BIT(14) #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) +#define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3) +#define MVNETA_GMAC_INBAND_RESTART_AN BIT(4) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_AN_SPEED_EN BIT(7) +#define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8) +#define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9) #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) @@ -237,6 +244,12 @@ #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVNETA_LPI_CTRL_0 0x2cc0 +#define MVNETA_LPI_CTRL_1 0x2cc4 +#define MVNETA_LPI_REQUEST_ENABLE BIT(0) +#define MVNETA_LPI_CTRL_2 0x2cc8 +#define MVNETA_LPI_STATUS 0x2ccc + #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff /* Descriptor ring Macros */ @@ -313,6 +326,11 @@ #define MVNETA_RX_GET_BM_POOL_ID(rxd) \ (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) +enum { + ETHTOOL_STAT_EEE_WAKEUP, + ETHTOOL_MAX_STATS, +}; + struct mvneta_statistic { unsigned short offset; unsigned short type; @@ -321,6 +339,7 @@ struct mvneta_statistic { #define T_REG_32 32 #define T_REG_64 64 +#define T_SW 1 static const struct mvneta_statistic mvneta_statistics[] = { { 0x3000, T_REG_64, "good_octets_received", }, @@ -355,6 +374,7 @@ static const struct mvneta_statistic mvneta_statistics[] = { { 0x304c, T_REG_32, "broadcast_frames_sent", }, { 0x3054, T_REG_32, "fc_sent", }, { 0x300c, T_REG_32, "internal_mac_transmit_err", }, + { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", }, }; struct mvneta_pcpu_stats { @@ -407,20 +427,20 @@ struct mvneta_port { u16 tx_ring_size; u16 rx_ring_size; - struct mii_bus *mii_bus; phy_interface_t phy_interface; - struct device_node *phy_node; - unsigned int link; - unsigned int duplex; - unsigned int speed; + struct device_node *dn; unsigned int tx_csum_limit; - unsigned int use_inband_status:1; + struct phylink *phylink; struct mvneta_bm *bm_priv; struct mvneta_bm_pool *pool_long; struct mvneta_bm_pool *pool_short; int bm_win_id; + bool eee_enabled; + bool eee_active; + bool tx_lpi_enabled; + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; @@ -1214,10 +1234,6 @@ static void mvneta_port_disable(struct mvneta_port *pp) val &= ~MVNETA_GMAC0_PORT_ENABLE; mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); - pp->link = 0; - pp->duplex = -1; - pp->speed = 0; - udelay(200); } @@ -1277,44 +1293,6 @@ static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); } -static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) -{ - u32 val; - - if (enable) { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | - MVNETA_GMAC_FORCE_LINK_DOWN | - MVNETA_GMAC_AN_FLOW_CTRL_EN); - val |= MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - - val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val |= MVNETA_GMAC2_INBAND_AN_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); - } else { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | - MVNETA_GMAC_AN_SPEED_EN | - MVNETA_GMAC_AN_DUPLEX_EN); - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - - val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); - val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); - - val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); - } -} - static void mvneta_percpu_unmask_interrupt(void *arg) { struct mvneta_port *pp = arg; @@ -1467,7 +1445,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp) val &= ~MVNETA_PHY_POLLING_ENABLE; mvreg_write(pp, MVNETA_UNIT_CONTROL, val); - mvneta_set_autoneg(pp, pp->use_inband_status); mvneta_set_ucast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1); @@ -2692,26 +2669,11 @@ static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int mvneta_fixed_link_update(struct mvneta_port *pp, - struct phy_device *phy) +static void mvneta_link_change(struct mvneta_port *pp) { - struct fixed_phy_status status; - struct fixed_phy_status changed = {}; u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); - status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); - if (gmac_stat & MVNETA_GMAC_SPEED_1000) - status.speed = SPEED_1000; - else if (gmac_stat & MVNETA_GMAC_SPEED_100) - status.speed = SPEED_100; - else - status.speed = SPEED_10; - status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); - changed.link = 1; - changed.speed = 1; - changed.duplex = 1; - fixed_phy_update_state(phy, &status, &changed); - return 0; + phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); } /* NAPI handler @@ -2727,7 +2689,6 @@ static int mvneta_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx; int rx_queue; struct mvneta_port *pp = netdev_priv(napi->dev); - struct net_device *ndev = pp->dev; struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); if (!netif_running(pp->dev)) { @@ -2741,12 +2702,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); - if (pp->use_inband_status && (cause_misc & - (MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE))) { - mvneta_fixed_link_update(pp, ndev->phydev); - } + + if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE)) + mvneta_link_change(pp); } /* Release Tx descriptors */ @@ -3060,7 +3019,6 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp) { int cpu; - struct net_device *ndev = pp->dev; mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -3085,19 +3043,17 @@ static void mvneta_start_dev(struct mvneta_port *pp) mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); + MVNETA_CAUSE_LINK_CHANGE); - phy_start(ndev->phydev); + phylink_start(pp->phylink); netif_tx_start_all_queues(pp->dev); } static void mvneta_stop_dev(struct mvneta_port *pp) { unsigned int cpu; - struct net_device *ndev = pp->dev; - phy_stop(ndev->phydev); + phylink_stop(pp->phylink); if (!pp->neta_armada3700) { for_each_online_cpu(cpu) { @@ -3251,103 +3207,260 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) return 0; } -static void mvneta_adjust_link(struct net_device *ndev) +static void mvneta_validate(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + !phy_interface_mode_is_rgmii(state->interface)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + + /* Asymmetric pause is unsupported */ + phylink_set(mask, Pause); + /* Half-duplex at speeds higher than 100Mbit is unsupported */ + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + + if (!phy_interface_mode_is_8023z(state->interface)) { + /* 10M and 100M are only supported in non-802.3z mode */ + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static int mvneta_mac_link_state(struct net_device *ndev, + struct phylink_link_state *state) { struct mvneta_port *pp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - int status_change = 0; + u32 gmac_stat; + + gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); - if (phydev->link) { - if ((pp->speed != phydev->speed) || - (pp->duplex != phydev->duplex)) { - u32 val; + if (gmac_stat & MVNETA_GMAC_SPEED_1000) + state->speed = SPEED_1000; + else if (gmac_stat & MVNETA_GMAC_SPEED_100) + state->speed = SPEED_100; + else + state->speed = SPEED_10; - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | - MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); + state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE); + state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); + state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); + + state->pause = 0; + if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE) + state->pause |= MLO_PAUSE_RX; + if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE) + state->pause |= MLO_PAUSE_TX; + + return 1; +} - if (phydev->duplex) - val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; +static void mvneta_mac_an_restart(struct net_device *ndev) +{ + struct mvneta_port *pp = netdev_priv(ndev); + u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - if (phydev->speed == SPEED_1000) - val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVNETA_GMAC_CONFIG_MII_SPEED; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + gmac_an | MVNETA_GMAC_INBAND_RESTART_AN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN); +} - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); +static void mvneta_mac_config(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mvneta_port *pp = netdev_priv(ndev); + u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); + u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); + u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + + new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X; + new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE | + MVNETA_GMAC2_PORT_RESET); + new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_INBAND_RESTART_AN | + MVNETA_GMAC_CONFIG_MII_SPEED | + MVNETA_GMAC_CONFIG_GMII_SPEED | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL | + MVNETA_GMAC_CONFIG_FLOW_CTRL | + MVNETA_GMAC_AN_FLOW_CTRL_EN | + MVNETA_GMAC_CONFIG_FULL_DUPLEX | + MVNETA_GMAC_AN_DUPLEX_EN); - pp->duplex = phydev->duplex; - pp->speed = phydev->speed; - } + /* Even though it might look weird, when we're configured in + * SGMII or QSGMII mode, the RGMII bit needs to be set. + */ + new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII; + + if (state->interface == PHY_INTERFACE_MODE_QSGMII || + state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) + new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE; + + if (phylink_test(state->advertising, Pause)) + new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL; + if (state->pause & MLO_PAUSE_TXRX_MASK) + new_an |= MVNETA_GMAC_CONFIG_FLOW_CTRL; + + if (!phylink_autoneg_inband(mode)) { + /* Phy or fixed speed */ + if (state->duplex) + new_an |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (state->speed == SPEED_1000) + new_an |= MVNETA_GMAC_CONFIG_GMII_SPEED; + else if (state->speed == SPEED_100) + new_an |= MVNETA_GMAC_CONFIG_MII_SPEED; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + /* SGMII mode receives the state from the PHY */ + new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE; + new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_FORCE_LINK_PASS)) | + MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + } else { + /* 802.3z negotiation - only 1000base-X */ + new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X; + new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN | + MVNETA_GMAC_FORCE_LINK_PASS)) | + MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_CONFIG_GMII_SPEED | + /* The MAC only supports FD mode */ + MVNETA_GMAC_CONFIG_FULL_DUPLEX; + + if (state->pause & MLO_PAUSE_AN && state->an_enabled) + new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN; } - if (phydev->link != pp->link) { - if (!phydev->link) { - pp->duplex = -1; - pp->speed = 0; - } + /* Armada 370 documentation says we can only change the port mode + * and in-band enable when the link is down, so force it down + * while making these changes. We also do this for GMAC_CTRL2 */ + if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X || + (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE || + (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) { + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, + (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) | + MVNETA_GMAC_FORCE_LINK_DOWN); + } - pp->link = phydev->link; - status_change = 1; + if (new_ctrl0 != gmac_ctrl0) + mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); + if (new_ctrl2 != gmac_ctrl2) + mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); + if (new_clk != gmac_clk) + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); + if (new_an != gmac_an) + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); + + if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) { + while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & + MVNETA_GMAC2_PORT_RESET) != 0) + continue; } +} - if (status_change) { - if (phydev->link) { - if (!pp->use_inband_status) { - u32 val = mvreg_read(pp, - MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; - val |= MVNETA_GMAC_FORCE_LINK_PASS; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, - val); - } - mvneta_port_up(pp); - } else { - if (!pp->use_inband_status) { - u32 val = mvreg_read(pp, - MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~MVNETA_GMAC_FORCE_LINK_PASS; - val |= MVNETA_GMAC_FORCE_LINK_DOWN; - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, - val); - } - mvneta_port_down(pp); - } - phy_print_status(phydev); +static void mvneta_set_eee(struct mvneta_port *pp, bool enable) +{ + u32 lpi_ctl1; + + lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); + if (enable) + lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE; + else + lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE; + mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); +} + +static void mvneta_mac_link_down(struct net_device *ndev, unsigned int mode) +{ + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; + + mvneta_port_down(pp); + + if (!phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; + val |= MVNETA_GMAC_FORCE_LINK_DOWN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } + + pp->eee_active = false; + mvneta_set_eee(pp, false); } -static int mvneta_mdio_probe(struct mvneta_port *pp) +static void mvneta_mac_link_up(struct net_device *ndev, unsigned int mode, + struct phy_device *phy) { - struct phy_device *phy_dev; - struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct mvneta_port *pp = netdev_priv(ndev); + u32 val; - phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0, - pp->phy_interface); - if (!phy_dev) { - netdev_err(pp->dev, "could not find the PHY\n"); - return -ENODEV; + if (!phylink_autoneg_inband(mode)) { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; + val |= MVNETA_GMAC_FORCE_LINK_PASS; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); } - phy_ethtool_get_wol(phy_dev, &wol); - device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + mvneta_port_up(pp); - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->advertising = phy_dev->supported; + if (phy && pp->eee_enabled) { + pp->eee_active = phy_init_eee(phy, 0) >= 0; + mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); + } +} - pp->link = 0; - pp->duplex = 0; - pp->speed = 0; +static const struct phylink_mac_ops mvneta_phylink_ops = { + .validate = mvneta_validate, + .mac_link_state = mvneta_mac_link_state, + .mac_an_restart = mvneta_mac_an_restart, + .mac_config = mvneta_mac_config, + .mac_link_down = mvneta_mac_link_down, + .mac_link_up = mvneta_mac_link_up, +}; - return 0; +static int mvneta_mdio_probe(struct mvneta_port *pp) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); + + if (err) + netdev_err(pp->dev, "could not attach PHY: %d\n", err); + + phylink_ethtool_get_wol(pp->phylink, &wol); + device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + + return err; } static void mvneta_mdio_remove(struct mvneta_port *pp) { - struct net_device *ndev = pp->dev; - - phy_disconnect(ndev->phydev); + phylink_disconnect_phy(pp->phylink); } /* Electing a CPU must be done in an atomic way: it should be done @@ -3455,8 +3568,7 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node) on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); + MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); spin_unlock(&pp->lock); return 0; @@ -3497,8 +3609,7 @@ static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node) on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); mvreg_write(pp, MVNETA_INTR_MISC_MASK, MVNETA_CAUSE_PHY_STATUS_CHANGE | - MVNETA_CAUSE_LINK_CHANGE | - MVNETA_CAUSE_PSC_SYNC_CHANGE); + MVNETA_CAUSE_LINK_CHANGE); netif_tx_start_all_queues(pp->dev); return 0; } @@ -3626,10 +3737,9 @@ static int mvneta_stop(struct net_device *dev) static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - if (!dev->phydev) - return -ENOTSUPP; + struct mvneta_port *pp = netdev_priv(dev); - return phy_mii_ioctl(dev->phydev, ifr, cmd); + return phylink_mii_ioctl(pp->phylink, ifr, cmd); } /* Ethtool methods */ @@ -3640,44 +3750,25 @@ mvneta_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd) { struct mvneta_port *pp = netdev_priv(ndev); - struct phy_device *phydev = ndev->phydev; - - if (!phydev) - return -ENODEV; - - if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { - u32 val; - - mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE); - - if (cmd->base.autoneg == AUTONEG_DISABLE) { - val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); - val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | - MVNETA_GMAC_CONFIG_GMII_SPEED | - MVNETA_GMAC_CONFIG_FULL_DUPLEX); - - if (phydev->duplex) - val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; - if (phydev->speed == SPEED_1000) - val |= MVNETA_GMAC_CONFIG_GMII_SPEED; - else if (phydev->speed == SPEED_100) - val |= MVNETA_GMAC_CONFIG_MII_SPEED; + return phylink_ethtool_ksettings_set(pp->phylink, cmd); +} - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); - } +/* Get link ksettings for ethtools */ +static int +mvneta_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct mvneta_port *pp = netdev_priv(ndev); - pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE); - netdev_info(pp->dev, "autoneg status set to %i\n", - pp->use_inband_status); + return phylink_ethtool_ksettings_get(pp->phylink, cmd); +} - if (netif_running(ndev)) { - mvneta_port_down(pp); - mvneta_port_up(pp); - } - } +static int mvneta_ethtool_nway_reset(struct net_device *dev) +{ + struct mvneta_port *pp = netdev_priv(dev); - return phy_ethtool_ksettings_set(ndev->phydev, cmd); + return phylink_ethtool_nway_reset(pp->phylink); } /* Set interrupt coalescing for ethtools */ @@ -3769,6 +3860,22 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev, return 0; } +static void mvneta_ethtool_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvneta_port *pp = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(pp->phylink, pause); +} + +static int mvneta_ethtool_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(pp->phylink, pause); +} + static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) { @@ -3785,26 +3892,35 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) { const struct mvneta_statistic *s; void __iomem *base = pp->base; - u32 high, low, val; - u64 val64; + u32 high, low; + u64 val; int i; for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { + val = 0; + switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); - pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); - val64 = (u64)high << 32 | low; - pp->ethtool_stats[i] += val64; + val = (u64)high << 32 | low; + break; + case T_SW: + switch (s->offset) { + case ETHTOOL_STAT_EEE_WAKEUP: + val = phylink_get_eee_err(pp->phylink); + break; + } break; } + + pp->ethtool_stats[i] += val; } } @@ -3939,28 +4055,81 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, static void mvneta_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { - wol->supported = 0; - wol->wolopts = 0; + struct mvneta_port *pp = netdev_priv(dev); - if (dev->phydev) - phy_ethtool_get_wol(dev->phydev, wol); + phylink_ethtool_get_wol(pp->phylink, wol); } static int mvneta_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { + struct mvneta_port *pp = netdev_priv(dev); int ret; - if (!dev->phydev) - return -EOPNOTSUPP; - - ret = phy_ethtool_set_wol(dev->phydev, wol); + ret = phylink_ethtool_set_wol(pp->phylink, wol); if (!ret) device_set_wakeup_enable(&dev->dev, !!wol->wolopts); return ret; } +static int mvneta_ethtool_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_get_module_info(pp->phylink, modinfo); +} + +static int mvneta_ethtool_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *ee, u8 *buf) +{ + struct mvneta_port *pp = netdev_priv(dev); + + return phylink_ethtool_get_module_eeprom(pp->phylink, ee, buf); +} + +static int mvneta_ethtool_get_eee(struct net_device *dev, + struct ethtool_eee *eee) +{ + struct mvneta_port *pp = netdev_priv(dev); + u32 lpi_ctl0; + + lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + + eee->eee_enabled = pp->eee_enabled; + eee->eee_active = pp->eee_active; + eee->tx_lpi_enabled = pp->tx_lpi_enabled; + eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale; + + return phylink_ethtool_get_eee(pp->phylink, eee); +} + +static int mvneta_ethtool_set_eee(struct net_device *dev, + struct ethtool_eee *eee) +{ + struct mvneta_port *pp = netdev_priv(dev); + u32 lpi_ctl0; + + /* The Armada 37x documents do not give limits for this other than + * it being an 8-bit register. */ + if (eee->tx_lpi_enabled && + (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255)) + return -EINVAL; + + lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); + lpi_ctl0 &= ~(0xff << 8); + lpi_ctl0 |= eee->tx_lpi_timer << 8; + mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); + + pp->eee_enabled = eee->eee_enabled; + pp->tx_lpi_enabled = eee->tx_lpi_enabled; + + mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); + + return phylink_ethtool_set_eee(pp->phylink, eee); +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -3974,13 +4143,15 @@ static const struct net_device_ops mvneta_netdev_ops = { }; static const struct ethtool_ops mvneta_eth_tool_ops = { - .nway_reset = phy_ethtool_nway_reset, + .nway_reset = mvneta_ethtool_nway_reset, .get_link = ethtool_op_get_link, .set_coalesce = mvneta_ethtool_set_coalesce, .get_coalesce = mvneta_ethtool_get_coalesce, .get_drvinfo = mvneta_ethtool_get_drvinfo, .get_ringparam = mvneta_ethtool_get_ringparam, .set_ringparam = mvneta_ethtool_set_ringparam, + .get_pauseparam = mvneta_ethtool_get_pauseparam, + .set_pauseparam = mvneta_ethtool_set_pauseparam, .get_strings = mvneta_ethtool_get_strings, .get_ethtool_stats = mvneta_ethtool_get_stats, .get_sset_count = mvneta_ethtool_get_sset_count, @@ -3988,10 +4159,14 @@ static const struct ethtool_ops mvneta_eth_tool_ops = { .get_rxnfc = mvneta_ethtool_get_rxnfc, .get_rxfh = mvneta_ethtool_get_rxfh, .set_rxfh = mvneta_ethtool_set_rxfh, - .get_link_ksettings = phy_ethtool_get_link_ksettings, + .get_link_ksettings = mvneta_ethtool_get_link_ksettings, .set_link_ksettings = mvneta_ethtool_set_link_ksettings, .get_wol = mvneta_ethtool_get_wol, .set_wol = mvneta_ethtool_set_wol, + .get_module_info = mvneta_ethtool_get_module_info, + .get_module_eeprom = mvneta_ethtool_get_module_eeprom, + .get_eee = mvneta_ethtool_get_eee, + .set_eee = mvneta_ethtool_set_eee, }; /* Initialize hw */ @@ -4091,42 +4266,16 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, /* Power up the port */ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) { - u32 ctrl; - /* MAC Cause register should be cleared */ mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); - ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); - - /* Even though it might look weird, when we're configured in - * SGMII or QSGMII mode, the RGMII bit needs to be set. - */ - switch(phy_mode) { - case PHY_INTERFACE_MODE_QSGMII: + if (phy_mode == PHY_INTERFACE_MODE_QSGMII) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); - ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; - break; - case PHY_INTERFACE_MODE_SGMII: + else if (phy_mode == PHY_INTERFACE_MODE_SGMII || + phy_mode == PHY_INTERFACE_MODE_1000BASEX) mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); - ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; - break; - case PHY_INTERFACE_MODE_RGMII: - case PHY_INTERFACE_MODE_RGMII_ID: - case PHY_INTERFACE_MODE_RGMII_RXID: - case PHY_INTERFACE_MODE_RGMII_TXID: - ctrl |= MVNETA_GMAC2_PORT_RGMII; - break; - default: + else if (!phy_interface_mode_is_rgmii(phy_mode)) return -EINVAL; - } - - /* Cancel Port Reset */ - ctrl &= ~MVNETA_GMAC2_PORT_RESET; - mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); - - while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & - MVNETA_GMAC2_PORT_RESET) != 0) - continue; return 0; } @@ -4136,14 +4285,13 @@ static int mvneta_probe(struct platform_device *pdev) { struct resource *res; struct device_node *dn = pdev->dev.of_node; - struct device_node *phy_node; struct device_node *bm_node; struct mvneta_port *pp; struct net_device *dev; + struct phylink *phylink; const char *dt_mac_addr; char hw_mac_addr[ETH_ALEN]; const char *mac_from; - const char *managed; int tx_csum_limit; int phy_mode; int err; @@ -4159,31 +4307,18 @@ static int mvneta_probe(struct platform_device *pdev) goto err_free_netdev; } - phy_node = of_parse_phandle(dn, "phy", 0); - if (!phy_node) { - if (!of_phy_is_fixed_link(dn)) { - dev_err(&pdev->dev, "no PHY specified\n"); - err = -ENODEV; - goto err_free_irq; - } - - err = of_phy_register_fixed_link(dn); - if (err < 0) { - dev_err(&pdev->dev, "cannot register fixed PHY\n"); - goto err_free_irq; - } - - /* In the case of a fixed PHY, the DT node associated - * to the PHY is the Ethernet MAC DT node. - */ - phy_node = of_node_get(dn); - } - phy_mode = of_get_phy_mode(dn); if (phy_mode < 0) { dev_err(&pdev->dev, "incorrect phy-mode\n"); err = -EINVAL; - goto err_put_phy_node; + goto err_free_irq; + } + + phylink = phylink_create(dev, pdev->dev.fwnode, phy_mode, + &mvneta_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto err_free_irq; } dev->tx_queue_len = MVNETA_MAX_TXD; @@ -4194,12 +4329,9 @@ static int mvneta_probe(struct platform_device *pdev) pp = netdev_priv(dev); spin_lock_init(&pp->lock); - pp->phy_node = phy_node; + pp->phylink = phylink; pp->phy_interface = phy_mode; - - err = of_property_read_string(dn, "managed", &managed); - pp->use_inband_status = (err == 0 && - strcmp(managed, "in-band-status") == 0); + pp->dn = dn; pp->rxq_def = rxq_def; @@ -4221,7 +4353,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); - goto err_put_phy_node; + goto err_free_phylink; } clk_prepare_enable(pp->clk); @@ -4358,14 +4490,6 @@ static int mvneta_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pp->dev); - if (pp->use_inband_status) { - struct phy_device *phy = of_phy_find_device(dn); - - mvneta_fixed_link_update(pp, phy); - - put_device(&phy->mdio.dev); - } - return 0; err_netdev: @@ -4382,10 +4506,9 @@ err_free_ports: err_clk: clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); -err_put_phy_node: - of_node_put(phy_node); - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); +err_free_phylink: + if (pp->phylink) + phylink_destroy(pp->phylink); err_free_irq: irq_dispose_mapping(dev->irq); err_free_netdev: @@ -4397,7 +4520,6 @@ err_free_netdev: static int mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); - struct device_node *dn = pdev->dev.of_node; struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); @@ -4405,10 +4527,8 @@ static int mvneta_remove(struct platform_device *pdev) clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); irq_dispose_mapping(dev->irq); - of_node_put(pp->phy_node); + phylink_destroy(pp->phylink); free_netdev(dev); if (pp->bm_priv) { @@ -4426,8 +4546,10 @@ static int mvneta_suspend(struct device *device) struct net_device *dev = dev_get_drvdata(device); struct mvneta_port *pp = netdev_priv(dev); + rtnl_lock(); if (netif_running(dev)) mvneta_stop(dev); + rtnl_unlock(); netif_device_detach(dev); clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); @@ -4460,14 +4582,13 @@ static int mvneta_resume(struct device *device) return err; } - if (pp->use_inband_status) - mvneta_fixed_link_update(pp, dev->phydev); - netif_device_attach(dev); + rtnl_lock(); if (netif_running(dev)) { mvneta_open(dev); mvneta_set_rx_mode(dev); } + rtnl_unlock(); return 0; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 9efe1771423c..9fe85300e7b6 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4287,7 +4287,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy) dev_err(&hw->pdev->dev, "VPD cycle timed out\n"); return -ETIMEDOUT; } - mdelay(1); + msleep(1); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index 5f41dc92aa68..1a0c3bf86ead 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -310,6 +310,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) } switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: case IEEE_8021QAZ_TSA_STRICT: break; case IEEE_8021QAZ_TSA_ETS: @@ -347,6 +348,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, /* higher TC means higher priority => lower pg */ for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + pg[i] = MLX4_EN_TC_VENDOR; + tc_tx_bw[i] = MLX4_EN_BW_MAX; + break; case IEEE_8021QAZ_TSA_STRICT: pg[i] = num_strict++; tc_tx_bw[i] = MLX4_EN_BW_MAX; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bf1f04164885..ebc1f566a4d9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1094,12 +1094,21 @@ static int mlx4_en_set_ringparam(struct net_device *dev, if (param->rx_jumbo_pending || param->rx_mini_pending) return -EINVAL; + if (param->rx_pending < MLX4_EN_MIN_RX_SIZE) { + en_warn(priv, "%s: rx_pending (%d) < min (%d)\n", + __func__, param->rx_pending, + MLX4_EN_MIN_RX_SIZE); + return -EINVAL; + } + if (param->tx_pending < MLX4_EN_MIN_TX_SIZE) { + en_warn(priv, "%s: tx_pending (%d) < min (%lu)\n", + __func__, param->tx_pending, + MLX4_EN_MIN_TX_SIZE); + return -EINVAL; + } + rx_size = roundup_pow_of_two(param->rx_pending); - rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); - rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); tx_size = roundup_pow_of_two(param->tx_pending); - tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); - tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 99051a294fa6..8fc51bc29003 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2172,8 +2172,9 @@ static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], prof->rx_ring_size, priv->stride, - node)) + node, i)) goto err; + } #ifdef CONFIG_RFS_ACCEL @@ -3336,6 +3337,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + u8 prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { + priv->ets.prio_tc[prio] = prio; + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; + } + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; priv->flags |= MLX4_EN_DCB_ENABLED; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 85e28efcda33..b4d144e67514 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -262,7 +262,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, - u32 size, u16 stride, int node) + u32 size, u16 stride, int node, int queue_index) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring; @@ -286,6 +286,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->log_stride = ffs(ring->stride) - 1; ring->buf_size = ring->size * ring->stride + TXBB_SIZE; + if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0) + goto err_ring; + tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS * sizeof(struct mlx4_en_rx_alloc)); ring->rx_info = vzalloc_node(tmp, node); @@ -293,7 +296,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring->rx_info = vzalloc(tmp); if (!ring->rx_info) { err = -ENOMEM; - goto err_ring; + goto err_xdp_info; } } @@ -317,6 +320,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, err_info: vfree(ring->rx_info); ring->rx_info = NULL; +err_xdp_info: + xdp_rxq_info_unreg(&ring->xdp_rxq); err_ring: kfree(ring); *pring = NULL; @@ -440,6 +445,7 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, lockdep_is_held(&mdev->state_lock)); if (old_prog) bpf_prog_put(old_prog); + xdp_rxq_info_unreg(&ring->xdp_rxq); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; @@ -617,6 +623,10 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, return 0; } #endif + +/* We reach this function only after checking that any of + * the (IPv4 | IPv6) bits are set in cqe->status. + */ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, netdev_features_t dev_features) { @@ -632,13 +642,11 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, hdr += sizeof(struct vlan_hdr); } - if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4)) - return get_fixed_ipv4_csum(hw_checksum, skb, hdr); #if IS_ENABLED(CONFIG_IPV6) if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6)) return get_fixed_ipv6_csum(hw_checksum, skb, hdr); #endif - return 0; + return get_fixed_ipv4_csum(hw_checksum, skb, hdr); } int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) @@ -650,6 +658,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud int cq_ring = cq->ring; bool doorbell_pending; struct mlx4_cqe *cqe; + struct xdp_buff xdp; int polled = 0; int index; @@ -664,6 +673,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */ rcu_read_lock(); xdp_prog = rcu_dereference(ring->xdp_prog); + xdp.rxq = &ring->xdp_rxq; doorbell_pending = 0; /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx @@ -748,7 +758,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * read bytes but not past the end of the frag. */ if (xdp_prog) { - struct xdp_buff xdp; dma_addr_t dma; void *orig_data; u32 act; @@ -814,33 +823,33 @@ xdp_drop_no_cnt: if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { - if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && - cqe->checksum == cpu_to_be16(0xffff)) { - bool l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && - (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); - - ip_summed = CHECKSUM_UNNECESSARY; - hash_type = PKT_HASH_TYPE_L4; - if (l2_tunnel) - skb->csum_level = 1; - ring->csum_ok++; - } else { + bool l2_tunnel; + + if (!((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && + cqe->checksum == cpu_to_be16(0xffff))) goto csum_none; - } + + l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && + (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + ip_summed = CHECKSUM_UNNECESSARY; + hash_type = PKT_HASH_TYPE_L4; + if (l2_tunnel) + skb->csum_level = 1; + ring->csum_ok++; } else { - if (priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && - (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | - MLX4_CQE_STATUS_IPV6))) { - if (check_csum(cqe, skb, va, dev->features)) { - goto csum_none; - } else { - ip_summed = CHECKSUM_COMPLETE; - hash_type = PKT_HASH_TYPE_L3; - ring->csum_complete++; - } - } else { + if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP && + (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | +#if IS_ENABLED(CONFIG_IPV6) + MLX4_CQE_STATUS_IPV6)))) +#else + 0)))) +#endif goto csum_none; - } + if (check_csum(cqe, skb, va, dev->features)) + goto csum_none; + ip_summed = CHECKSUM_COMPLETE; + hash_type = PKT_HASH_TYPE_L3; + ring->csum_complete++; } } else { csum_none: diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 2b72677eccd4..f470ae37d937 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -46,6 +46,7 @@ #endif #include <linux/cpu_rmap.h> #include <linux/ptp_clock_kernel.h> +#include <net/xdp.h> #include <linux/mlx4/device.h> #include <linux/mlx4/qp.h> @@ -356,6 +357,7 @@ struct mlx4_en_rx_ring { unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; + struct xdp_rxq_info xdp_rxq; }; struct mlx4_en_cq { @@ -479,6 +481,7 @@ struct mlx4_en_frag_info { #define MLX4_EN_BW_MIN 1 #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ +#define MLX4_EN_TC_VENDOR 0 #define MLX4_EN_TC_ETS 7 enum dcb_pfc_type { @@ -719,7 +722,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, - u32 size, u16 stride, int node); + u32 size, u16 stride, int node, int queue_index); void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index c7c0764991c9..2e84f10f59ba 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -1103,30 +1103,16 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable); void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u32 *lkey, u32 *rkey) { - struct mlx4_cmd_mailbox *mailbox; - int err; - if (!fmr->maps) return; - fmr->maps = 0; + /* To unmap: it is sufficient to take back ownership from HW */ + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; - mailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(mailbox)) { - err = PTR_ERR(mailbox); - pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err); - return; - } + /* Make sure MPT status is visible */ + wmb(); - err = mlx4_HW2SW_MPT(dev, NULL, - key_to_hw_index(fmr->mr.key) & - (dev->caps.num_mpts - 1)); - mlx4_free_cmd_mailbox(dev, mailbox); - if (err) { - pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err); - return; - } - fmr->mr.enabled = MLX4_MPT_EN_SW; + fmr->maps = 0; } EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); @@ -1136,6 +1122,22 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr) if (fmr->maps) return -EBUSY; + if (fmr->mr.enabled == MLX4_MPT_EN_HW) { + /* In case of FMR was enabled and unmapped + * make sure to give ownership of MPT back to HW + * so HW2SW_MPT command will success. + */ + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_SW; + /* Make sure MPT status is visible before changing MPT fields */ + wmb(); + fmr->mpt->length = 0; + fmr->mpt->start = 0; + /* Make sure MPT data is visible after changing MPT status */ + wmb(); + *(u8 *)fmr->mpt = MLX4_MPT_STATUS_HW; + /* make sure MPT status is visible */ + wmb(); + } ret = mlx4_mr_free(dev, &fmr->mr); if (ret) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 19b21b40ab07..c805769d92a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \ + en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ en_arfs.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 543060c305a0..d629da213511 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -46,6 +46,8 @@ #include <linux/mlx5/transobj.h> #include <linux/rhashtable.h> #include <net/switchdev.h> +#include <net/xdp.h> +#include <linux/net_dim.h> #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" @@ -226,12 +228,6 @@ enum mlx5e_priv_flag { #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #endif -struct mlx5e_cq_moder { - u16 usec; - u16 pkts; - u8 cq_period_mode; -}; - struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@ -242,8 +238,8 @@ struct mlx5e_params { u16 num_channels; u8 num_tc; bool rx_cqe_compress_def; - struct mlx5e_cq_moder rx_cq_moderation; - struct mlx5e_cq_moder tx_cq_moderation; + struct net_dim_cq_moder rx_cq_moderation; + struct net_dim_cq_moder tx_cq_moderation; bool lro_en; u32 lro_wqe_sz; u16 tx_max_inline; @@ -253,7 +249,7 @@ struct mlx5e_params { u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; bool vlan_strip_disable; bool scatter_fcs_en; - bool rx_am_enabled; + bool rx_dim_enabled; u32 lro_timeout; u32 pflags; struct bpf_prog *xdp_prog; @@ -472,32 +468,6 @@ struct mlx5e_mpw_info { u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; }; -struct mlx5e_rx_am_stats { - int ppms; /* packets per msec */ - int bpms; /* bytes per msec */ - int epms; /* events per msec */ -}; - -struct mlx5e_rx_am_sample { - ktime_t time; - u32 pkt_ctr; - u32 byte_ctr; - u16 event_ctr; -}; - -struct mlx5e_rx_am { /* Adaptive Moderation */ - u8 state; - struct mlx5e_rx_am_stats prev_stats; - struct mlx5e_rx_am_sample start_sample; - struct work_struct work; - u8 profile_ix; - u8 mode; - u8 tune_state; - u8 steps_right; - u8 steps_left; - u8 tired; -}; - /* a single cache unit is capable to serve one napi call (for non-striding rq) * or a MPWQE (for striding rq). */ @@ -558,7 +528,7 @@ struct mlx5e_rq { unsigned long state; int ix; - struct mlx5e_rx_am am; /* Adaptive Moderation */ + struct net_dim dim; /* Dynamic Interrupt Moderation */ /* XDP */ struct bpf_prog *xdp_prog; @@ -571,6 +541,9 @@ struct mlx5e_rq { u32 rqn; struct mlx5_core_dev *mdev; struct mlx5_core_mkey umr_mkey; + + /* XDP read-mostly */ + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; struct mlx5e_channel { @@ -655,6 +628,7 @@ struct mlx5e_tc_table { struct rhashtable ht; DECLARE_HASHTABLE(mod_hdr_tbl, 8); + DECLARE_HASHTABLE(hairpin_tbl, 8); }; struct mlx5e_vlan_table { @@ -860,10 +834,6 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); -void mlx5e_rx_am(struct mlx5e_rq *rq); -void mlx5e_rx_am_work(struct work_struct *work); -struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); - void mlx5e_update_stats(struct mlx5e_priv *priv, bool full); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); @@ -1110,4 +1080,5 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); +void mlx5e_rx_dim_work(struct work_struct *work); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c new file mode 100644 index 000000000000..602851ab5b14 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/net_dim.h> +#include "en.h" + +void mlx5e_rx_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, + work); + struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim); + struct net_dim_cq_moder cur_profile = net_dim_get_profile(dim->mode, + dim->profile_ix); + + mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, + cur_profile.usec, cur_profile.pkts); + + dim->state = NET_DIM_START_MEASURE; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8f05efa5c829..bd5af7f37198 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -296,7 +296,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, struct mlx5e_channels new_channels = {}; u32 rx_pending_wqes; u32 min_rq_size; - u32 max_rq_size; u8 log_rq_size; u8 log_sq_size; u32 num_mtts; @@ -315,8 +314,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, 1 << mlx5_min_log_rq_size(rq_wq_type)); - max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, - 1 << mlx5_max_log_rq_size(rq_wq_type)); rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, param->rx_pending); @@ -326,12 +323,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, min_rq_size); return -EINVAL; } - if (param->rx_pending > max_rq_size) { - netdev_info(priv->netdev, "%s: rx_pending (%d) > max (%d)\n", - __func__, param->rx_pending, - max_rq_size); - return -EINVAL; - } num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes); if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && @@ -347,12 +338,6 @@ int mlx5e_ethtool_set_ringparam(struct mlx5e_priv *priv, 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE); return -EINVAL; } - if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) { - netdev_info(priv->netdev, "%s: tx_pending (%d) > max (%d)\n", - __func__, param->tx_pending, - 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE); - return -EINVAL; - } log_rq_size = order_base_2(rx_pending_wqes); log_sq_size = order_base_2(param->tx_pending); @@ -480,7 +465,7 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts; coal->tx_coalesce_usecs = priv->channels.params.tx_cq_moderation.usec; coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts; - coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled; + coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; return 0; } @@ -534,7 +519,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; - new_channels.params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + new_channels.params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -542,7 +527,7 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } /* we are opened */ - reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled; + reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; if (!reset) { mlx5e_set_priv_channels_coalesce(priv, coal); priv->channels.params = new_channels.params; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3aa1c90e7c86..bbbdb5c0086b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -582,6 +582,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } + err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix); + if (err < 0) + goto err_rq_wq_destroy; + rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = params->rq_headroom; @@ -674,8 +678,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, wqe->data.lkey = rq->mkey_be; } - INIT_WORK(&rq->am.work, mlx5e_rx_am_work); - rq->am.mode = params->rx_cq_moderation.cq_period_mode; + INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); + + switch (params->rx_cq_moderation.cq_period_mode) { + case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: + rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE; + break; + case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: + default: + rq->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + rq->page_cache.head = 0; rq->page_cache.tail = 0; @@ -687,6 +700,7 @@ err_destroy_umr_mkey: err_rq_wq_destroy: if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); + xdp_rxq_info_unreg(&rq->xdp_rxq); mlx5_wq_destroy(&rq->wq_ctrl); return err; @@ -699,6 +713,8 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq) if (rq->xdp_prog) bpf_prog_put(rq->xdp_prog); + xdp_rxq_info_unreg(&rq->xdp_rxq); + switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: mlx5e_rq_free_mpwqe_info(rq); @@ -919,7 +935,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_destroy_rq; - if (params->rx_am_enabled) + if (params->rx_dim_enabled) c->rq.state |= BIT(MLX5E_RQ_STATE_AM); return 0; @@ -952,7 +968,7 @@ static void mlx5e_deactivate_rq(struct mlx5e_rq *rq) static void mlx5e_close_rq(struct mlx5e_rq *rq) { - cancel_work_sync(&rq->am.work); + cancel_work_sync(&rq->dim.work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); mlx5e_free_rq(rq); @@ -1565,7 +1581,7 @@ static void mlx5e_destroy_cq(struct mlx5e_cq *cq) } static int mlx5e_open_cq(struct mlx5e_channel *c, - struct mlx5e_cq_moder moder, + struct net_dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_cq *cq) { @@ -1747,7 +1763,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { - struct mlx5e_cq_moder icocq_moder = {0, 0}; + struct net_dim_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; @@ -1999,7 +2015,7 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, mlx5e_build_common_cq_param(priv, param); - param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + param->cq_period_mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@ -2766,6 +2782,9 @@ static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev, if (err) return err; + /* Mark as unused given "Drop-RQ" packets never reach XDP */ + xdp_rxq_info_unused(&rq->xdp_rxq); + rq->mdev = mdev; return 0; @@ -4038,9 +4057,18 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) params->rx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - if (params->rx_am_enabled) - params->rx_cq_moderation = - mlx5e_am_get_def_profile(cq_period_mode); + if (params->rx_dim_enabled) { + switch (cq_period_mode) { + case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: + params->rx_cq_moderation = + net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE); + break; + case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: + default: + params->rx_cq_moderation = + net_dim_get_def_profile(NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE); + } + } MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, params->rx_cq_moderation.cq_period_mode == @@ -4102,7 +4130,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); mlx5e_set_tx_cq_mode_params(params, cq_period_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2c43606c26b5..4d1b0ff4b6e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) return 0; } +static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_sq *rep_sq, *tmp; + struct mlx5e_rep_priv *rpriv; + + if (esw->mode != SRIOV_OFFLOADS) + return; + + rpriv = mlx5e_rep_to_rep_priv(rep); + list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + list_del(&rep_sq->list); + kfree(rep_sq); + } +} + +static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + u16 *sqns_array, int sqns_num) +{ + struct mlx5_flow_handle *flow_rule; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_sq *rep_sq; + int err; + int i; + + if (esw->mode != SRIOV_OFFLOADS) + return 0; + + rpriv = mlx5e_rep_to_rep_priv(rep); + for (i = 0; i < sqns_num; i++) { + rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); + if (!rep_sq) { + err = -ENOMEM; + goto out_err; + } + + /* Add re-inject rule to the PF/representor sqs */ + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, + rep->vport, + sqns_array[i]); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + kfree(rep_sq); + goto out_err; + } + rep_sq->send_to_vport_rule = flow_rule; + list_add(&rep_sq->list, &rpriv->vport_sqs_list); + } + return 0; + +out_err: + mlx5e_sqs2vport_stop(esw, rep); + return err; +} + int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) sqs[num_sqs++] = c->sq[tc].sqn; } - err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); + err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs); kfree(sqs); out: @@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; - mlx5_eswitch_sqs2vport_stop(esw, rep); + mlx5e_sqs2vport_stop(esw, rep); } static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) @@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) #endif unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); - struct net_device *netdev = rpriv->rep->netdev; + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); @@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work) { struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, neigh_update.neigh_stats_work.work); - struct net_device *netdev = rpriv->rep->netdev; + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_neigh_hash_entry *nhe; @@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb, struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, neigh_update.netevent_nb); struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct net_device *netdev = rpriv->rep->netdev; + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_neigh_hash_entry *nhe = NULL; struct mlx5e_neigh m_neigh = {}; @@ -483,7 +540,7 @@ out_err: static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) { struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; - struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); unregister_netevent_notifier(&neigh_update->netevent_nb); @@ -827,7 +884,7 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; - params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); @@ -904,7 +961,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err = PTR_ERR(flow_rule); goto err_destroy_direct_tirs; } - rep->vport_rx_rule = flow_rule; + rpriv->vport_rx_rule = flow_rule; err = mlx5e_tc_init(priv); if (err) @@ -913,7 +970,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) return 0; err_del_flow_rule: - mlx5_del_flow_rules(rep->vport_rx_rule); + mlx5_del_flow_rules(rpriv->vport_rx_rule); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: @@ -924,10 +981,9 @@ err_destroy_direct_rqts: static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; mlx5e_tc_cleanup(priv); - mlx5_del_flow_rules(rep->vport_rx_rule); + mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_rqts(priv); } @@ -967,10 +1023,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = { /* e-Switch vport representors */ static int -mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(rep->netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); int err; @@ -992,10 +1048,10 @@ err_remove_sqs: } static void -mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep) { - struct mlx5e_priv *priv = netdev_priv(rep->netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_remove_sqs_fwd_rules(priv); @@ -1008,8 +1064,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) } static int -mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { + struct mlx5e_rep_priv *uplink_rpriv; struct mlx5e_rep_priv *rpriv; struct net_device *netdev; struct mlx5e_priv *upriv; @@ -1019,7 +1076,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) if (!rpriv) return -ENOMEM; - netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv); + netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", rep->vport); @@ -1027,8 +1084,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) return -EINVAL; } - rep->netdev = netdev; + rpriv->netdev = netdev; rpriv->rep = rep; + rep->rep_if[REP_ETH].priv = rpriv; + INIT_LIST_HEAD(&rpriv->vport_sqs_list); err = mlx5e_attach_netdev(netdev_priv(netdev)); if (err) { @@ -1044,7 +1103,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) goto err_detach_netdev; } - upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); + uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH); + upriv = netdev_priv(uplink_rpriv->netdev); err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb, upriv); if (err) @@ -1076,16 +1136,19 @@ err_destroy_netdev: } static void -mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) { - struct net_device *netdev = rep->netdev; + struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); + struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_rep_priv *uplink_rpriv; void *ppriv = priv->ppriv; struct mlx5e_priv *upriv; - unregister_netdev(rep->netdev); - upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); + unregister_netdev(netdev); + uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, + REP_ETH); + upriv = netdev_priv(uplink_rpriv->netdev); tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, upriv); mlx5e_rep_neigh_cleanup(rpriv); @@ -1100,18 +1163,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = mdev->priv.eswitch; int total_vfs = MLX5_TOTAL_VPORTS(mdev); int vport; - u8 mac[ETH_ALEN]; - - mlx5_query_nic_vport_mac_address(mdev, 0, mac); for (vport = 1; vport < total_vfs; vport++) { - struct mlx5_eswitch_rep rep; + struct mlx5_eswitch_rep_if rep_if = {}; - rep.load = mlx5e_vport_rep_load; - rep.unload = mlx5e_vport_rep_unload; - rep.vport = vport; - ether_addr_copy(rep.hw_id, mac); - mlx5_eswitch_register_vport_rep(esw, vport, &rep); + rep_if.load = mlx5e_vport_rep_load; + rep_if.unload = mlx5e_vport_rep_unload; + mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH); } } @@ -1123,21 +1181,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv) int vport; for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); + mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH); } void mlx5e_register_vport_reps(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_eswitch_rep rep; + struct mlx5_eswitch_rep_if rep_if; + struct mlx5e_rep_priv *rpriv; + + rpriv = priv->ppriv; + rpriv->netdev = priv->netdev; - mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); - rep.load = mlx5e_nic_rep_load; - rep.unload = mlx5e_nic_rep_unload; - rep.vport = FDB_UPLINK_VPORT; - rep.netdev = priv->netdev; - mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/ + rep_if.load = mlx5e_nic_rep_load; + rep_if.unload = mlx5e_nic_rep_unload; + rep_if.priv = rpriv; + INIT_LIST_HEAD(&rpriv->vport_sqs_list); + mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/ mlx5e_rep_register_vf_vports(priv); /* VFs vports */ } @@ -1148,7 +1209,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = mdev->priv.eswitch; mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ - mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ + mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/ } void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 5659ed9f51e6..b9b481f2833a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table { struct mlx5e_rep_priv { struct mlx5_eswitch_rep *rep; struct mlx5e_neigh_update_table neigh_update; + struct net_device *netdev; + struct mlx5_flow_handle *vport_rx_rule; + struct list_head vport_sqs_list; }; +static inline +struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep) +{ + return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv; +} + struct mlx5e_neigh { struct net_device *dev; union { @@ -124,6 +133,11 @@ struct mlx5e_encap_entry { int encap_size; }; +struct mlx5e_rep_sq { + struct mlx5_flow_handle *send_to_vport_rule; + struct list_head list; +}; + void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void mlx5e_register_vport_reps(struct mlx5e_priv *priv); void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 5b499c7a698f..ff234dfefc27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -495,8 +495,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, mlx5_cqwq_pop(&cq->wq); if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { - WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", - cqe->op_own); + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); return; } @@ -506,9 +506,8 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, } if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) - WARN_ONCE(true, - "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", - icowi->opcode); + netdev_WARN_ONCE(cq->channel->netdev, + "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode); } static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) @@ -812,6 +811,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + *len; xdp.data_hard_start = va; + xdp.rxq = &rq->xdp_rxq; act = bpf_prog_run_xdp(prog, &xdp); switch (act) { @@ -1175,7 +1175,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { + struct hwtstamp_config *tstamp; struct net_device *netdev; + struct mlx5e_priv *priv; char *pseudo_header; u32 qpn; u8 *dgid; @@ -1194,6 +1196,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, return; } + priv = mlx5i_epriv(netdev); + tstamp = &priv->tstamp; + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; if ((!g) || dgid[0] != 0xff) @@ -1214,7 +1219,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); - if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) + if (unlikely(mlx5e_rx_hw_stamp(tstamp))) skb_hwtstamps(skb)->hwtstamp = mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c deleted file mode 100644 index e401d9d245f3..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2016, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include "en.h" - -/* Adaptive moderation profiles */ -#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 -#define MLX5E_RX_AM_DEF_PROFILE_CQE 1 -#define MLX5E_RX_AM_DEF_PROFILE_EQE 1 -#define MLX5E_PARAMS_AM_NUM_PROFILES 5 - -/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */ -#define MLX5_AM_EQE_PROFILES { \ - {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ - {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ -} - -#define MLX5_AM_CQE_PROFILES { \ - {2, 256}, \ - {8, 128}, \ - {16, 64}, \ - {32, 64}, \ - {64, 64} \ -} - -static const struct mlx5e_cq_moder -profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = { - MLX5_AM_EQE_PROFILES, - MLX5_AM_CQE_PROFILES, -}; - -static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix) -{ - struct mlx5e_cq_moder cq_moder; - - cq_moder = profile[cq_period_mode][ix]; - cq_moder.cq_period_mode = cq_period_mode; - return cq_moder; -} - -struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) -{ - int default_profile_ix; - - if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) - default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE; - else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */ - default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE; - - return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix); -} - -/* Adaptive moderation logic */ -enum { - MLX5E_AM_START_MEASURE, - MLX5E_AM_MEASURE_IN_PROGRESS, - MLX5E_AM_APPLY_NEW_PROFILE, -}; - -enum { - MLX5E_AM_PARKING_ON_TOP, - MLX5E_AM_PARKING_TIRED, - MLX5E_AM_GOING_RIGHT, - MLX5E_AM_GOING_LEFT, -}; - -enum { - MLX5E_AM_STATS_WORSE, - MLX5E_AM_STATS_SAME, - MLX5E_AM_STATS_BETTER, -}; - -enum { - MLX5E_AM_STEPPED, - MLX5E_AM_TOO_TIRED, - MLX5E_AM_ON_EDGE, -}; - -static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) -{ - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - case MLX5E_AM_PARKING_TIRED: - return true; - case MLX5E_AM_GOING_RIGHT: - return (am->steps_left > 1) && (am->steps_right == 1); - default: /* MLX5E_AM_GOING_LEFT */ - return (am->steps_right > 1) && (am->steps_left == 1); - } -} - -static void mlx5e_am_turn(struct mlx5e_rx_am *am) -{ - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - case MLX5E_AM_PARKING_TIRED: - break; - case MLX5E_AM_GOING_RIGHT: - am->tune_state = MLX5E_AM_GOING_LEFT; - am->steps_left = 0; - break; - case MLX5E_AM_GOING_LEFT: - am->tune_state = MLX5E_AM_GOING_RIGHT; - am->steps_right = 0; - break; - } -} - -static int mlx5e_am_step(struct mlx5e_rx_am *am) -{ - if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2)) - return MLX5E_AM_TOO_TIRED; - - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - case MLX5E_AM_PARKING_TIRED: - break; - case MLX5E_AM_GOING_RIGHT: - if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) - return MLX5E_AM_ON_EDGE; - am->profile_ix++; - am->steps_right++; - break; - case MLX5E_AM_GOING_LEFT: - if (am->profile_ix == 0) - return MLX5E_AM_ON_EDGE; - am->profile_ix--; - am->steps_left++; - break; - } - - am->tired++; - return MLX5E_AM_STEPPED; -} - -static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am) -{ - am->steps_right = 0; - am->steps_left = 0; - am->tired = 0; - am->tune_state = MLX5E_AM_PARKING_ON_TOP; -} - -static void mlx5e_am_park_tired(struct mlx5e_rx_am *am) -{ - am->steps_right = 0; - am->steps_left = 0; - am->tune_state = MLX5E_AM_PARKING_TIRED; -} - -static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) -{ - am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT : - MLX5E_AM_GOING_RIGHT; - mlx5e_am_step(am); -} - -#define IS_SIGNIFICANT_DIFF(val, ref) \ - (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ - -static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, - struct mlx5e_rx_am_stats *prev) -{ - if (!prev->bpms) - return curr->bpms ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_SAME; - - if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) - return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; - - if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) - return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; - - if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) - return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; - - return MLX5E_AM_STATS_SAME; -} - -static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats, - struct mlx5e_rx_am *am) -{ - int prev_state = am->tune_state; - int prev_ix = am->profile_ix; - int stats_res; - int step_res; - - switch (am->tune_state) { - case MLX5E_AM_PARKING_ON_TOP: - stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); - if (stats_res != MLX5E_AM_STATS_SAME) - mlx5e_am_exit_parking(am); - break; - - case MLX5E_AM_PARKING_TIRED: - am->tired--; - if (!am->tired) - mlx5e_am_exit_parking(am); - break; - - case MLX5E_AM_GOING_RIGHT: - case MLX5E_AM_GOING_LEFT: - stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); - if (stats_res != MLX5E_AM_STATS_BETTER) - mlx5e_am_turn(am); - - if (mlx5e_am_on_top(am)) { - mlx5e_am_park_on_top(am); - break; - } - - step_res = mlx5e_am_step(am); - switch (step_res) { - case MLX5E_AM_ON_EDGE: - mlx5e_am_park_on_top(am); - break; - case MLX5E_AM_TOO_TIRED: - mlx5e_am_park_tired(am); - break; - } - - break; - } - - if ((prev_state != MLX5E_AM_PARKING_ON_TOP) || - (am->tune_state != MLX5E_AM_PARKING_ON_TOP)) - am->prev_stats = *curr_stats; - - return am->profile_ix != prev_ix; -} - -static void mlx5e_am_sample(struct mlx5e_rq *rq, - struct mlx5e_rx_am_sample *s) -{ - s->time = ktime_get(); - s->pkt_ctr = rq->stats.packets; - s->byte_ctr = rq->stats.bytes; - s->event_ctr = rq->cq.event_ctr; -} - -#define MLX5E_AM_NEVENTS 64 -#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) -#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) - -static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, - struct mlx5e_rx_am_sample *end, - struct mlx5e_rx_am_stats *curr_stats) -{ - /* u32 holds up to 71 minutes, should be enough */ - u32 delta_us = ktime_us_delta(end->time, start->time); - u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); - u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, - start->byte_ctr); - - if (!delta_us) - return; - - curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); - curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); - curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, - delta_us); -} - -void mlx5e_rx_am_work(struct work_struct *work) -{ - struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am, - work); - struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am); - struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix]; - - mlx5_core_modify_cq_moderation(rq->mdev, &rq->cq.mcq, - cur_profile.usec, cur_profile.pkts); - - am->state = MLX5E_AM_START_MEASURE; -} - -void mlx5e_rx_am(struct mlx5e_rq *rq) -{ - struct mlx5e_rx_am *am = &rq->am; - struct mlx5e_rx_am_sample end_sample; - struct mlx5e_rx_am_stats curr_stats; - u16 nevents; - - switch (am->state) { - case MLX5E_AM_MEASURE_IN_PROGRESS: - nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, - am->start_sample.event_ctr); - if (nevents < MLX5E_AM_NEVENTS) - break; - mlx5e_am_sample(rq, &end_sample); - mlx5e_am_calc_stats(&am->start_sample, &end_sample, - &curr_stats); - if (mlx5e_am_decision(&curr_stats, am)) { - am->state = MLX5E_AM_APPLY_NEW_PROFILE; - schedule_work(&am->work); - break; - } - /* fall through */ - case MLX5E_AM_START_MEASURE: - mlx5e_am_sample(rq, &am->start_sample); - am->state = MLX5E_AM_MEASURE_IN_PROGRESS; - break; - case MLX5E_AM_APPLY_NEW_PROFILE: - break; - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3e03d2e8f96a..cf528da51243 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -56,12 +56,14 @@ struct mlx5_nic_flow_attr { u32 action; u32 flow_tag; u32 mod_hdr_id; + u32 hairpin_tirn; }; enum { MLX5E_TC_FLOW_ESWITCH = BIT(0), MLX5E_TC_FLOW_NIC = BIT(1), MLX5E_TC_FLOW_OFFLOADED = BIT(2), + MLX5E_TC_FLOW_HAIRPIN = BIT(3), }; struct mlx5e_tc_flow { @@ -71,6 +73,7 @@ struct mlx5e_tc_flow { struct mlx5_flow_handle *rule; struct list_head encap; /* flows sharing the same encap ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ + struct list_head hairpin; /* flows sharing the same hairpin */ union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -93,6 +96,25 @@ enum { #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16) +struct mlx5e_hairpin { + struct mlx5_hairpin *pair; + + struct mlx5_core_dev *func_mdev; + u32 tdn; + u32 tirn; +}; + +struct mlx5e_hairpin_entry { + /* a node of a hash table which keeps all the hairpin entries */ + struct hlist_node hairpin_hlist; + + /* flows sharing the same hairpin */ + struct list_head flows; + + int peer_ifindex; + struct mlx5e_hairpin *hp; +}; + struct mod_hdr_key { int num_actions; void *actions; @@ -222,6 +244,187 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, } } +static +struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + + netdev = __dev_get_by_index(net, ifindex); + priv = netdev_priv(netdev); + return priv->mdev; +} + +static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) +{ + u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0}; + void *tirc; + int err; + + err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); + if (err) + goto alloc_tdn_err; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn); + MLX5_SET(tirc, tirc, transport_domain, hp->tdn); + + err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn); + if (err) + goto create_tir_err; + + return 0; + +create_tir_err: + mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); +alloc_tdn_err: + return err; +} + +static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) +{ + mlx5_core_destroy_tir(hp->func_mdev, hp->tirn); + mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); +} + +static struct mlx5e_hairpin * +mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params, + int peer_ifindex) +{ + struct mlx5_core_dev *func_mdev, *peer_mdev; + struct mlx5e_hairpin *hp; + struct mlx5_hairpin *pair; + int err; + + hp = kzalloc(sizeof(*hp), GFP_KERNEL); + if (!hp) + return ERR_PTR(-ENOMEM); + + func_mdev = priv->mdev; + peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex); + + pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params); + if (IS_ERR(pair)) { + err = PTR_ERR(pair); + goto create_pair_err; + } + hp->pair = pair; + hp->func_mdev = func_mdev; + + err = mlx5e_hairpin_create_transport(hp); + if (err) + goto create_transport_err; + + return hp; + +create_transport_err: + mlx5_core_hairpin_destroy(hp->pair); +create_pair_err: + kfree(hp); + return ERR_PTR(err); +} + +static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp) +{ + mlx5e_hairpin_destroy_transport(hp); + mlx5_core_hairpin_destroy(hp->pair); + kvfree(hp); +} + +static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, + int peer_ifindex) +{ + struct mlx5e_hairpin_entry *hpe; + + hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, + hairpin_hlist, peer_ifindex) { + if (hpe->peer_ifindex == peer_ifindex) + return hpe; + } + + return NULL; +} + +static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow, + struct mlx5e_tc_flow_parse_attr *parse_attr) +{ + int peer_ifindex = parse_attr->mirred_ifindex; + struct mlx5_hairpin_params params; + struct mlx5e_hairpin_entry *hpe; + struct mlx5e_hairpin *hp; + int err; + + if (!MLX5_CAP_GEN(priv->mdev, hairpin)) { + netdev_warn(priv->netdev, "hairpin is not supported\n"); + return -EOPNOTSUPP; + } + + hpe = mlx5e_hairpin_get(priv, peer_ifindex); + if (hpe) + goto attach_flow; + + hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); + if (!hpe) + return -ENOMEM; + + INIT_LIST_HEAD(&hpe->flows); + hpe->peer_ifindex = peer_ifindex; + + params.log_data_size = 15; + params.log_data_size = min_t(u8, params.log_data_size, + MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); + params.log_data_size = max_t(u8, params.log_data_size, + MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz)); + params.q_counter = priv->q_counter; + + hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex); + if (IS_ERR(hp)) { + err = PTR_ERR(hp); + goto create_hairpin_err; + } + + netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x log data size %d\n", + hp->tirn, hp->pair->rqn, hp->pair->peer_mdev->priv.name, + hp->pair->sqn, params.log_data_size); + + hpe->hp = hp; + hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, peer_ifindex); + +attach_flow: + flow->nic_attr->hairpin_tirn = hpe->hp->tirn; + list_add(&flow->hairpin, &hpe->flows); + return 0; + +create_hairpin_err: + kfree(hpe); + return err; +} + +static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + struct list_head *next = flow->hairpin.next; + + list_del(&flow->hairpin); + + /* no more hairpin flows for us, release the hairpin pair */ + if (list_empty(next)) { + struct mlx5e_hairpin_entry *hpe; + + hpe = list_entry(next, struct mlx5e_hairpin_entry, flows); + + netdev_dbg(priv->netdev, "del hairpin: peer %s\n", + hpe->hp->pair->peer_mdev->priv.name); + + mlx5e_hairpin_destroy(hpe->hp); + hash_del(&hpe->hairpin_hlist); + kfree(hpe); + } +} + static struct mlx5_flow_handle * mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, @@ -229,7 +432,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, { struct mlx5_nic_flow_attr *attr = flow->nic_attr; struct mlx5_core_dev *dev = priv->mdev; - struct mlx5_flow_destination dest = {}; + struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = { .action = attr->action, .flow_tag = attr->flow_tag, @@ -238,18 +441,33 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, struct mlx5_fc *counter = NULL; struct mlx5_flow_handle *rule; bool table_created = false; - int err; + int err, dest_ix = 0; if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.vlan.ft.t; - } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { - counter = mlx5_fc_create(dev, true); - if (IS_ERR(counter)) - return ERR_CAST(counter); + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) { + err = mlx5e_hairpin_flow_add(priv, flow, parse_attr); + if (err) { + rule = ERR_PTR(err); + goto err_add_hairpin_flow; + } + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest[dest_ix].tir_num = attr->hairpin_tirn; + } else { + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[dest_ix].ft = priv->fs.vlan.ft.t; + } + dest_ix++; + } - dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; - dest.counter = counter; + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + counter = mlx5_fc_create(dev, true); + if (IS_ERR(counter)) { + rule = ERR_CAST(counter); + goto err_fc_create; + } + dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[dest_ix].counter = counter; + dest_ix++; } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { @@ -292,7 +510,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec, - &flow_act, &dest, 1); + &flow_act, dest, dest_ix); if (IS_ERR(rule)) goto err_add_rule; @@ -309,7 +527,10 @@ err_create_ft: mlx5e_detach_mod_hdr(priv, flow); err_create_mod_hdr_id: mlx5_fc_destroy(dev, counter); - +err_fc_create: + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + mlx5e_hairpin_flow_del(priv, flow); +err_add_hairpin_flow: return rule; } @@ -330,6 +551,9 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); + + if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) + mlx5e_hairpin_flow_del(priv, flow); } static void mlx5e_detach_encap(struct mlx5e_priv *priv, @@ -617,7 +841,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, FLOW_DISSECTOR_KEY_ENC_PORTS, f->mask); struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); + struct net_device *up_dev = uplink_rpriv->netdev; struct mlx5e_priv *up_priv = netdev_priv(up_dev); /* Full udp dst port must be given */ @@ -1421,6 +1646,20 @@ static bool actions_match_supported(struct mlx5e_priv *priv, return true; } +static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) +{ + struct mlx5_core_dev *fmdev, *pmdev; + u16 func_id, peer_id; + + fmdev = priv->mdev; + pmdev = peer_priv->mdev; + + func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn)); + peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn)); + + return (func_id == peer_id); +} + static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow) @@ -1465,6 +1704,23 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EOPNOTSUPP; } + if (is_tcf_mirred_egress_redirect(a)) { + struct net_device *peer_dev = tcf_mirred_dev(a); + + if (priv->netdev->netdev_ops == peer_dev->netdev_ops && + same_hw_devs(priv, netdev_priv(peer_dev))) { + parse_attr->mirred_ifindex = peer_dev->ifindex; + flow->flags |= MLX5E_TC_FLOW_HAIRPIN; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + } else { + netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", + peer_dev->name); + return -EINVAL; + } + continue; + } + if (is_tcf_skbedit_mark(a)) { u32 mark = tcf_skbedit_mark(a); @@ -1507,6 +1763,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, int *out_ttl) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5e_rep_priv *uplink_rpriv; struct rtable *rt; struct neighbour *n = NULL; @@ -1520,9 +1777,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, #else return -EOPNOTSUPP; #endif + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); /* if the egress device isn't on the same HW e-switch, we use the uplink */ if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) - *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + *out_dev = uplink_rpriv->netdev; else *out_dev = rt->dst.dev; @@ -1547,6 +1805,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, struct dst_entry *dst; #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + struct mlx5e_rep_priv *uplink_rpriv; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int ret; @@ -1557,9 +1816,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, *out_ttl = ip6_dst_hoplimit(dst); + uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); /* if the egress device isn't on the same HW e-switch, we use the uplink */ if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) - *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + *out_dev = uplink_rpriv->netdev; else *out_dev = dst->dev; #else @@ -1859,7 +2119,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); + struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, + REP_ETH); + struct net_device *up_dev = uplink_rpriv->netdev; unsigned short family = ip_tunnel_info_af(tun_info); struct mlx5e_priv *up_priv = netdev_priv(up_dev); struct mlx5_esw_flow_attr *attr = flow->esw_attr; @@ -2181,6 +2443,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv) struct mlx5e_tc_table *tc = &priv->fs.tc; hash_init(tc->mod_hdr_tbl); + hash_init(tc->hairpin_tbl); tc->ht_params = mlx5e_tc_flow_ht_params; return rhashtable_init(&tc->ht, &tc->ht_params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index ab92298eafc3..f292bb346985 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -78,8 +78,14 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); - if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) - mlx5e_rx_am(&c->rq); + if (MLX5E_TEST_BIT(c->rq.state, MLX5E_RQ_STATE_AM)) { + struct net_dim_sample dim_sample; + net_dim_sample(c->rq.cq.event_ctr, + c->rq.stats.packets, + c->rq.stats.bytes, + &dim_sample); + net_dim(&c->rq.dim, dim_sample); + } mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index bbb140f517c4..5ecf2cddc16d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -37,6 +37,7 @@ #include <linux/mlx5/fs.h> #include "mlx5_core.h" #include "eswitch.h" +#include "fs_core.h" #define UPLINK_VPORT 0xFFFF @@ -867,9 +868,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS, + vport->vport); if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); + esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport); return -EOPNOTSUPP; } @@ -984,9 +986,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); - root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); + root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport->vport); if (!root_ns) { - esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); + esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport); return -EOPNOTSUPP; } @@ -1121,8 +1124,12 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_fc *counter = vport->ingress.drop_counter; + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; + int dest_num = 0; int err = 0; u8 *smac_v; @@ -1186,9 +1193,18 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, memset(spec, 0, sizeof(*spec)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + /* Attach drop flow counter */ + if (counter) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_ctr_dst.counter = counter; + dst = &drop_ctr_dst; + dest_num++; + } vport->ingress.drop_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, - &flow_act, NULL, 0); + &flow_act, dst, dest_num); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); esw_warn(esw->dev, @@ -1208,8 +1224,12 @@ out: static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_fc *counter = vport->egress.drop_counter; + struct mlx5_flow_destination drop_ctr_dst = {0}; + struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; + int dest_num = 0; int err = 0; esw_vport_cleanup_egress_rules(esw, vport); @@ -1260,9 +1280,18 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, /* Drop others rule (star rule) */ memset(spec, 0, sizeof(*spec)); flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + + /* Attach egress drop flow counter */ + if (counter) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + drop_ctr_dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + drop_ctr_dst.counter = counter; + dst = &drop_ctr_dst; + dest_num++; + } vport->egress.drop_rule = mlx5_add_flow_rules(vport->egress.acl, spec, - &flow_act, NULL, 0); + &flow_act, dst, dest_num); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); esw_warn(esw->dev, @@ -1290,7 +1319,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw) err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - &tsar_ctx, + tsar_ctx, &esw->qos.root_tsar_id); if (err) { esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); @@ -1333,20 +1362,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, if (vport->qos.enabled) return -EEXIST; - MLX5_SET(scheduling_context, &sched_ctx, element_type, + MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); MLX5_SET(vport_element, vport_elem, vport_number, vport_num); - MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, initial_max_rate); - MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); + MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share); err = mlx5_create_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - &sched_ctx, + sched_ctx, &vport->qos.esw_tsar_ix); if (err) { esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", @@ -1392,22 +1421,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, if (!vport->qos.enabled) return -EIO; - MLX5_SET(scheduling_context, &sched_ctx, element_type, + MLX5_SET(scheduling_context, sched_ctx, element_type, SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); MLX5_SET(vport_element, vport_elem, vport_number, vport_num); - MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); - MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; err = mlx5_modify_scheduling_element_cmd(dev, SCHEDULING_HIERARCHY_E_SWITCH, - &sched_ctx, + sched_ctx, vport->qos.esw_tsar_ix, bitmask); if (err) { @@ -1455,6 +1484,41 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, } } +static void esw_vport_create_drop_counters(struct mlx5_vport *vport) +{ + struct mlx5_core_dev *dev = vport->dev; + + if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) { + vport->ingress.drop_counter = mlx5_fc_create(dev, false); + if (IS_ERR(vport->ingress.drop_counter)) { + esw_warn(dev, + "vport[%d] configure ingress drop rule counter failed\n", + vport->vport); + vport->ingress.drop_counter = NULL; + } + } + + if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) { + vport->egress.drop_counter = mlx5_fc_create(dev, false); + if (IS_ERR(vport->egress.drop_counter)) { + esw_warn(dev, + "vport[%d] configure egress drop rule counter failed\n", + vport->vport); + vport->egress.drop_counter = NULL; + } + } +} + +static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport) +{ + struct mlx5_core_dev *dev = vport->dev; + + if (vport->ingress.drop_counter) + mlx5_fc_destroy(dev, vport->ingress.drop_counter); + if (vport->egress.drop_counter) + mlx5_fc_destroy(dev, vport->egress.drop_counter); +} + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { @@ -1481,6 +1545,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, if (!vport_num) vport->info.trusted = true; + /* create steering drop counters for ingress and egress ACLs */ + if (vport_num && esw->mode == SRIOV_LEGACY) + esw_vport_create_drop_counters(vport); + esw_vport_change_handle_locked(vport); esw->enabled_vports++; @@ -1519,6 +1587,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) MLX5_ESW_VPORT_ADMIN_STATE_DOWN); esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); + esw_vport_destroy_drop_counters(vport); } esw->enabled_vports--; mutex_unlock(&esw->state_lock); @@ -1644,13 +1713,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } - esw->offloads.vport_reps = - kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep), - GFP_KERNEL); - if (!esw->offloads.vport_reps) { - err = -ENOMEM; + err = esw_offloads_init_reps(esw); + if (err) goto abort; - } hash_init(esw->offloads.encap_tbl); hash_init(esw->offloads.mod_hdr_tbl); @@ -1681,8 +1746,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); + esw_offloads_cleanup_reps(esw); kfree(esw->vports); - kfree(esw->offloads.vport_reps); kfree(esw); return err; } @@ -1696,7 +1761,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - kfree(esw->offloads.vport_reps); + esw_offloads_cleanup_reps(esw); kfree(esw->vports); kfree(esw); } @@ -2018,12 +2083,36 @@ unlock: return err; } +static void mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev, + int vport_idx, + struct mlx5_vport_drop_stats *stats) +{ + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_vport *vport = &esw->vports[vport_idx]; + u64 bytes = 0; + u16 idx = 0; + + if (!vport->enabled || esw->mode != SRIOV_LEGACY) + return; + + if (vport->egress.drop_counter) { + idx = vport->egress.drop_counter->id; + mlx5_fc_query(dev, idx, &stats->rx_dropped, &bytes); + } + + if (vport->ingress.drop_counter) { + idx = vport->ingress.drop_counter->id; + mlx5_fc_query(dev, idx, &stats->tx_dropped, &bytes); + } +} + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats) { int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0}; + struct mlx5_vport_drop_stats stats = {0}; int err = 0; u32 *out; @@ -2078,6 +2167,10 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, vf_stats->broadcast = MLX5_GET_CTR(out, received_eth_broadcast.packets); + mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats); + vf_stats->rx_dropped = stats.rx_dropped; + vf_stats->tx_dropped = stats.tx_dropped; + free_out: kvfree(out); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 565c8b7a399a..2fa037066b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -45,6 +45,11 @@ enum { SRIOV_OFFLOADS }; +enum { + REP_ETH, + NUM_REP_TYPES, +}; + #ifdef CONFIG_MLX5_ESWITCH #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -68,6 +73,7 @@ struct vport_ingress { struct mlx5_flow_group *drop_grp; struct mlx5_flow_handle *allow_rule; struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; }; struct vport_egress { @@ -76,6 +82,12 @@ struct vport_egress { struct mlx5_flow_group *drop_grp; struct mlx5_flow_handle *allowed_vlan; struct mlx5_flow_handle *drop_rule; + struct mlx5_fc *drop_counter; +}; + +struct mlx5_vport_drop_stats { + u64 rx_dropped; + u64 tx_dropped; }; struct mlx5_vport_info { @@ -133,25 +145,21 @@ struct mlx5_eswitch_fdb { }; }; -struct mlx5_esw_sq { - struct mlx5_flow_handle *send_to_vport_rule; - struct list_head list; +struct mlx5_eswitch_rep; +struct mlx5_eswitch_rep_if { + int (*load)(struct mlx5_core_dev *dev, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch_rep *rep); + void *priv; + bool valid; }; struct mlx5_eswitch_rep { - int (*load)(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); - void (*unload)(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); + struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES]; u16 vport; u8 hw_id[ETH_ALEN]; - struct net_device *netdev; - - struct mlx5_flow_handle *vport_rx_rule; - struct list_head vport_sqs_list; u16 vlan; u32 vlan_refcount; - bool valid; }; struct mlx5_esw_offload { @@ -197,6 +205,8 @@ struct mlx5_eswitch { void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); +void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); +int esw_offloads_init_reps(struct mlx5_eswitch *esw); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@ -221,6 +231,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats); +struct mlx5_flow_handle * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, + u32 sqn); +void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); struct mlx5_flow_spec; struct mlx5_esw_flow_attr; @@ -257,12 +271,6 @@ struct mlx5_esw_flow_attr { struct mlx5e_tc_flow_parse_attr *parse_attr; }; -int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep, - u16 *sqns_array, int sqns_num); -void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep); - int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); @@ -272,10 +280,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, - struct mlx5_eswitch_rep *rep); + struct mlx5_eswitch_rep_if *rep_if, + u8 rep_type); void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index); -struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw); + int vport_index, + u8 rep_type); +void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 1143d80119bd..99f583a15cc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { rep = &esw->offloads.vport_reps[vf_vport]; - if (!rep->valid) + if (!rep->rep_if[REP_ETH].valid) continue; err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); @@ -302,7 +302,7 @@ out: return err; } -static struct mlx5_flow_handle * +struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) { struct mlx5_flow_act flow_act = {0}; @@ -339,57 +339,9 @@ out: return flow_rule; } -void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep) -{ - struct mlx5_esw_sq *esw_sq, *tmp; - - if (esw->mode != SRIOV_OFFLOADS) - return; - - list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { - mlx5_del_flow_rules(esw_sq->send_to_vport_rule); - list_del(&esw_sq->list); - kfree(esw_sq); - } -} - -int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, - struct mlx5_eswitch_rep *rep, - u16 *sqns_array, int sqns_num) +void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) { - struct mlx5_flow_handle *flow_rule; - struct mlx5_esw_sq *esw_sq; - int err; - int i; - - if (esw->mode != SRIOV_OFFLOADS) - return 0; - - for (i = 0; i < sqns_num; i++) { - esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL); - if (!esw_sq) { - err = -ENOMEM; - goto out_err; - } - - /* Add re-inject rule to the PF/representor sqs */ - flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, - rep->vport, - sqns_array[i]); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); - kfree(esw_sq); - goto out_err; - } - esw_sq->send_to_vport_rule = flow_rule; - list_add(&esw_sq->list, &rep->vport_sqs_list); - } - return 0; - -out_err: - mlx5_eswitch_sqs2vport_stop(esw, rep); - return err; + mlx5_del_flow_rules(rule); } static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) @@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) return err; } -int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw) +{ + kfree(esw->offloads.vport_reps); +} + +int esw_offloads_init_reps(struct mlx5_eswitch *esw) +{ + int total_vfs = MLX5_TOTAL_VPORTS(esw->dev); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_esw_offload *offloads; + struct mlx5_eswitch_rep *rep; + u8 hw_id[ETH_ALEN]; + int vport; + + esw->offloads.vport_reps = kcalloc(total_vfs, + sizeof(struct mlx5_eswitch_rep), + GFP_KERNEL); + if (!esw->offloads.vport_reps) + return -ENOMEM; + + offloads = &esw->offloads; + mlx5_query_nic_vport_mac_address(dev, 0, hw_id); + + for (vport = 0; vport < total_vfs; vport++) { + rep = &offloads->vport_reps[vport]; + + rep->vport = vport; + ether_addr_copy(rep->hw_id, hw_id); + } + + offloads->vport_reps[0].vport = FDB_UPLINK_VPORT; + + return 0; +} + +static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int vport; + + for (vport = nvports - 1; vport >= 0; vport--) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->rep_if[rep_type].valid) + continue; + + rep->rep_if[rep_type].unload(rep); + } +} + +static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = NUM_REP_TYPES; + + while (rep_type-- > 0) + esw_offloads_unload_reps_type(esw, nvports, rep_type); +} + +static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) { struct mlx5_eswitch_rep *rep; int vport; int err; + for (vport = 0; vport < nvports; vport++) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->rep_if[rep_type].valid) + continue; + + err = rep->rep_if[rep_type].load(esw->dev, rep); + if (err) + goto err_reps; + } + + return 0; + +err_reps: + esw_offloads_unload_reps_type(esw, vport, rep_type); + return err; +} + +static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = 0; + int err; + + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = esw_offloads_load_reps_type(esw, nvports, rep_type); + if (err) + goto err_reps; + } + + return err; + +err_reps: + while (rep_type-- > 0) + esw_offloads_unload_reps_type(esw, nvports, rep_type); + return err; +} + +int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +{ + int err; + /* disable PF RoCE so missed packets don't go through RoCE steering */ mlx5_dev_list_lock(); mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); @@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) if (err) goto create_fg_err; - for (vport = 0; vport < nvports; vport++) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->valid) - continue; - - err = rep->load(esw, rep); - if (err) - goto err_reps; - } + err = esw_offloads_load_reps(esw, nvports); + if (err) + goto err_reps; return 0; err_reps: - for (vport--; vport >= 0; vport--) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->valid) - continue; - rep->unload(esw, rep); - } esw_destroy_vport_rx_group(esw); create_fg_err: @@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) { - struct mlx5_eswitch_rep *rep; - int vport; - - for (vport = nvports - 1; vport >= 0; vport--) { - rep = &esw->offloads.vport_reps[vport]; - if (!rep->valid) - continue; - rep->unload(esw, rep); - } - + esw_offloads_unload_reps(esw, nvports); esw_destroy_vport_rx_group(esw); esw_destroy_offloads_table(esw); esw_destroy_offloads_fdb_tables(esw); @@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, int vport_index, - struct mlx5_eswitch_rep *__rep) + struct mlx5_eswitch_rep_if *__rep_if, + u8 rep_type) { struct mlx5_esw_offload *offloads = &esw->offloads; - struct mlx5_eswitch_rep *rep; - - rep = &offloads->vport_reps[vport_index]; + struct mlx5_eswitch_rep_if *rep_if; - memset(rep, 0, sizeof(*rep)); + rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type]; - rep->load = __rep->load; - rep->unload = __rep->unload; - rep->vport = __rep->vport; - rep->netdev = __rep->netdev; - ether_addr_copy(rep->hw_id, __rep->hw_id); + rep_if->load = __rep_if->load; + rep_if->unload = __rep_if->unload; + rep_if->priv = __rep_if->priv; - INIT_LIST_HEAD(&rep->vport_sqs_list); - rep->valid = true; + rep_if->valid = true; } void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, - int vport_index) + int vport_index, u8 rep_type) { struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; @@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, rep = &offloads->vport_reps[vport_index]; if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) - rep->unload(esw, rep); + rep->rep_if[rep_type].unload(rep); - rep->valid = false; + rep->rep_if[rep_type].valid = false; } -struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw) +void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type) { #define UPLINK_REP_INDEX 0 struct mlx5_esw_offload *offloads = &esw->offloads; struct mlx5_eswitch_rep *rep; rep = &offloads->vport_reps[UPLINK_REP_INDEX]; - return rep->netdev; + return rep->rep_if[rep_type].priv; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index dfaad9ecb2b8..cc4f6ab9374a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2026,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &steering->fdb_root_ns->ns; else return NULL; - case MLX5_FLOW_NAMESPACE_ESW_EGRESS: - if (steering->esw_egress_root_ns) - return &steering->esw_egress_root_ns->ns; - else - return NULL; - case MLX5_FLOW_NAMESPACE_ESW_INGRESS: - if (steering->esw_ingress_root_ns) - return &steering->esw_ingress_root_ns->ns; - else - return NULL; case MLX5_FLOW_NAMESPACE_SNIFFER_RX: if (steering->sniffer_rx_root_ns) return &steering->sniffer_rx_root_ns->ns; @@ -2066,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, } EXPORT_SYMBOL(mlx5_get_flow_namespace); +struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + + if (!steering || vport >= MLX5_TOTAL_VPORTS(dev)) + return NULL; + + switch (type) { + case MLX5_FLOW_NAMESPACE_ESW_EGRESS: + if (steering->esw_egress_root_ns && + steering->esw_egress_root_ns[vport]) + return &steering->esw_egress_root_ns[vport]->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_ESW_INGRESS: + if (steering->esw_ingress_root_ns && + steering->esw_ingress_root_ns[vport]) + return &steering->esw_ingress_root_ns[vport]->ns; + else + return NULL; + default: + return NULL; + } +} + static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, unsigned int prio, int num_levels) { @@ -2343,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) clean_tree(&root_ns->ns.node); } +static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_egress_root_ns) + return; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + + kfree(steering->esw_egress_root_ns); +} + +static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int i; + + if (!steering->esw_ingress_root_ns) + return; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + + kfree(steering->esw_ingress_root_ns); +} + void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering = dev->priv.steering; cleanup_root_ns(steering->root_ns); - cleanup_root_ns(steering->esw_egress_root_ns); - cleanup_root_ns(steering->esw_ingress_root_ns); + cleanup_egress_acls_root_ns(dev); + cleanup_ingress_acls_root_ns(dev); cleanup_root_ns(steering->fdb_root_ns); cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); @@ -2418,34 +2463,86 @@ out_err: return PTR_ERR(prio); } -static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering) +static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) { struct fs_prio *prio; - steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); - if (!steering->esw_egress_root_ns) + steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); + if (!steering->esw_egress_root_ns[vport]) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0, - MLX5_TOTAL_VPORTS(steering->dev)); + prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1); return PTR_ERR_OR_ZERO(prio); } -static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering) +static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport) { struct fs_prio *prio; - steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); - if (!steering->esw_ingress_root_ns) + steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); + if (!steering->esw_ingress_root_ns[vport]) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0, - MLX5_TOTAL_VPORTS(steering->dev)); + prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1); return PTR_ERR_OR_ZERO(prio); } +static int init_egress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int err; + int i; + + steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev), + sizeof(*steering->esw_egress_root_ns), + GFP_KERNEL); + if (!steering->esw_egress_root_ns) + return -ENOMEM; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) { + err = init_egress_acl_root_ns(steering, i); + if (err) + goto cleanup_root_ns; + } + + return 0; + +cleanup_root_ns: + for (i--; i >= 0; i--) + cleanup_root_ns(steering->esw_egress_root_ns[i]); + kfree(steering->esw_egress_root_ns); + return err; +} + +static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev) +{ + struct mlx5_flow_steering *steering = dev->priv.steering; + int err; + int i; + + steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev), + sizeof(*steering->esw_ingress_root_ns), + GFP_KERNEL); + if (!steering->esw_ingress_root_ns) + return -ENOMEM; + + for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) { + err = init_ingress_acl_root_ns(steering, i); + if (err) + goto cleanup_root_ns; + } + + return 0; + +cleanup_root_ns: + for (i--; i >= 0; i--) + cleanup_root_ns(steering->esw_ingress_root_ns[i]); + kfree(steering->esw_ingress_root_ns); + return err; +} + int mlx5_init_fs(struct mlx5_core_dev *dev) { struct mlx5_flow_steering *steering; @@ -2488,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) goto err; } if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { - err = init_egress_acl_root_ns(steering); + err = init_egress_acls_root_ns(dev); if (err) goto err; } if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { - err = init_ingress_acl_root_ns(steering); + err = init_ingress_acls_root_ns(dev); if (err) goto err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 397d24a621a4..05262708f14b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -71,8 +71,8 @@ struct mlx5_flow_steering { struct kmem_cache *ftes_cache; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; - struct mlx5_flow_root_namespace *esw_egress_root_ns; - struct mlx5_flow_root_namespace *esw_ingress_root_ns; + struct mlx5_flow_root_namespace **esw_egress_root_ns; + struct mlx5_flow_root_namespace **esw_ingress_root_ns; struct mlx5_flow_root_namespace *sniffer_tx_root_ns; struct mlx5_flow_root_namespace *sniffer_rx_root_ns; }; @@ -233,6 +233,8 @@ void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev, unsigned long delay); void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev, unsigned long interval); +int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes); int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 89d1f8650033..b7ab929d5f8e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -312,6 +312,12 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) } } +int mlx5_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes) +{ + return mlx5_cmd_fc_query(dev, id, packets, bytes); +} + void mlx5_fc_query_cached(struct mlx5_fc *counter, u64 *bytes, u64 *packets, u64 *lastuse) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 6f338a9219c8..90cb50fe17fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -254,4 +254,5 @@ const struct ethtool_ops mlx5i_ethtool_ops = { const struct ethtool_ops mlx5i_pkey_ethtool_ops = { .get_drvinfo = mlx5i_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = mlx5i_get_ts_info, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 8812d7208e8f..3b2363e93ba5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -41,7 +41,6 @@ static int mlx5i_open(struct net_device *netdev); static int mlx5i_close(struct net_device *netdev); static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); -static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, @@ -396,7 +395,7 @@ int mlx5i_dev_init(struct net_device *dev) return 0; } -static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mlx5e_priv *priv = mlx5i_epriv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 49008022c306..6d9053bcbe95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -76,9 +76,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn); /* Get the net-device corresponding to the given underlay QPN */ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); -/* Shared ndo functionts */ +/* Shared ndo functions */ int mlx5i_dev_init(struct net_device *dev); void mlx5i_dev_cleanup(struct net_device *dev); +int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Parent profile functions */ void mlx5i_init(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 531b02cc979b..b69e9d847a6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -140,6 +140,7 @@ static int mlx5i_pkey_close(struct net_device *netdev); static int mlx5i_pkey_dev_init(struct net_device *dev); static void mlx5i_pkey_dev_cleanup(struct net_device *netdev); static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu); +static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, @@ -147,6 +148,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_init = mlx5i_pkey_dev_init, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, + .ndo_do_ioctl = mlx5i_pkey_ioctl, }; /* Child NDOs */ @@ -174,6 +176,11 @@ static int mlx5i_pkey_dev_init(struct net_device *dev) return mlx5i_dev_init(dev); } +static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + return mlx5i_ioctl(dev, ifr, cmd); +} + static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) { return mlx5i_dev_cleanup(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index f26f97fe4666..582b2f18010a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -137,6 +137,17 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag); +static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, + bool reset, void *out, int out_size) +{ + u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { }; + + MLX5_SET(query_cong_statistics_in, in, opcode, + MLX5_CMD_OP_QUERY_CONG_STATISTICS); + MLX5_SET(query_cong_statistics_in, in, clear, reset); + return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); +} + static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev) { return dev->priv.lag; @@ -633,3 +644,48 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) /* If bonded, we do not add an IB device for PF1. */ return false; } + +int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, + u64 *values, + int num_counters, + size_t *offsets) +{ + int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out); + struct mlx5_core_dev *mdev[MLX5_MAX_PORTS]; + struct mlx5_lag *ldev; + int num_ports; + int ret, i, j; + void *out; + + out = kvzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + memset(values, 0, sizeof(*values) * num_counters); + + mutex_lock(&lag_mutex); + ldev = mlx5_lag_dev_get(dev); + if (ldev && mlx5_lag_is_bonded(ldev)) { + num_ports = MLX5_MAX_PORTS; + mdev[0] = ldev->pf[0].dev; + mdev[1] = ldev->pf[1].dev; + } else { + num_ports = 1; + mdev[0] = dev; + } + + for (i = 0; i < num_ports; ++i) { + ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen); + if (ret) + goto unlock; + + for (j = 0; j < num_counters; ++j) + values[j] += be64_to_cpup((__be64 *)(out + offsets[j])); + } + +unlock: + mutex_unlock(&lag_mutex); + kvfree(out); + return ret; +} +EXPORT_SYMBOL(mlx5_lag_query_cong_counters); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 5e128d7a9ffd..a09ebbaf3b68 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -398,3 +398,187 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } EXPORT_SYMBOL(mlx5_core_destroy_rqt); + +static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev, + struct mlx5_hairpin_params *params, u32 *rqn) +{ + u32 in[MLX5_ST_SZ_DW(create_rq_in)] = {0}; + void *rqc, *wq; + + rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); + wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(rqc, rqc, hairpin, 1); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); + MLX5_SET(rqc, rqc, counter_set_id, params->q_counter); + + MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); + + return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn); +} + +static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev, + struct mlx5_hairpin_params *params, u32 *sqn) +{ + u32 in[MLX5_ST_SZ_DW(create_sq_in)] = {0}; + void *sqc, *wq; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + MLX5_SET(sqc, sqc, hairpin, 1); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + + MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); + + return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn); +} + +static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp, + struct mlx5_hairpin_params *params) +{ + int err; + + err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn); + if (err) + goto out_err_rq; + + err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn); + if (err) + goto out_err_sq; + + return 0; + +out_err_sq: + mlx5_core_destroy_rq(hp->func_mdev, hp->rqn); +out_err_rq: + return err; +} + +static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp) +{ + mlx5_core_destroy_rq(hp->func_mdev, hp->rqn); + mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn); +} + +static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn, + int curr_state, int next_state, + u16 peer_vhca, u32 peer_sq) +{ + u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {0}; + void *rqc; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + if (next_state == MLX5_RQC_STATE_RDY) { + MLX5_SET(rqc, rqc, hairpin_peer_sq, peer_sq); + MLX5_SET(rqc, rqc, hairpin_peer_vhca, peer_vhca); + } + + MLX5_SET(modify_rq_in, in, rq_state, curr_state); + MLX5_SET(rqc, rqc, state, next_state); + + return mlx5_core_modify_rq(func_mdev, rqn, + in, MLX5_ST_SZ_BYTES(modify_rq_in)); +} + +static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, + int curr_state, int next_state, + u16 peer_vhca, u32 peer_rq) +{ + u32 in[MLX5_ST_SZ_DW(modify_sq_in)] = {0}; + void *sqc; + + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + + if (next_state == MLX5_RQC_STATE_RDY) { + MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); + MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); + } + + MLX5_SET(modify_sq_in, in, sq_state, curr_state); + MLX5_SET(sqc, sqc, state, next_state); + + return mlx5_core_modify_sq(peer_mdev, sqn, + in, MLX5_ST_SZ_BYTES(modify_sq_in)); +} + +static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp) +{ + int err; + + /* set peer SQ */ + err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, + MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn); + if (err) + goto err_modify_sq; + + /* set func RQ */ + err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, + MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY, + MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn); + + if (err) + goto err_modify_rq; + + return 0; + +err_modify_rq: + mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RST, 0, 0); +err_modify_sq: + return err; +} + +static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp) +{ + /* unset func RQ */ + mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, MLX5_RQC_STATE_RDY, + MLX5_RQC_STATE_RST, 0, 0); + + /* unset peer SQ */ + mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RST, 0, 0); +} + +struct mlx5_hairpin * +mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, + struct mlx5_core_dev *peer_mdev, + struct mlx5_hairpin_params *params) +{ + struct mlx5_hairpin *hp; + int size, err; + + size = sizeof(*hp); + hp = kzalloc(size, GFP_KERNEL); + if (!hp) + return ERR_PTR(-ENOMEM); + + hp->func_mdev = func_mdev; + hp->peer_mdev = peer_mdev; + + /* alloc and pair func --> peer hairpin */ + err = mlx5_hairpin_create_queues(hp, params); + if (err) + goto err_create_queues; + + err = mlx5_hairpin_pair_queues(hp); + if (err) + goto err_pair_queues; + + return hp; + +err_pair_queues: + mlx5_hairpin_destroy_queues(hp); +err_create_queues: + kfree(hp); + return ERR_PTR(err); +} + +void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp) +{ + mlx5_hairpin_unpair_queues(hp); + mlx5_hairpin_destroy_queues(hp); + kfree(hp); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d373df7b11bd..54c7d9202e81 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -358,7 +358,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev)) return 0; - dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n", + dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is out of date\n", rev->major, rev->minor, rev->subminor); dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n", MLXSW_SP_FW_FILENAME); @@ -3085,6 +3085,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_fids_init; } + err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n", + mlxsw_sp_port->local_port); + goto err_port_qdiscs_init; + } + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", @@ -3113,6 +3120,8 @@ err_register_netdev: mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); err_port_vlan_get: + mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); +err_port_qdiscs_init: mlxsw_sp_port_fids_fini(mlxsw_sp_port); err_port_fids_init: mlxsw_sp_port_dcb_fini(mlxsw_sp_port); @@ -3148,6 +3157,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_vlan_flush(mlxsw_sp_port); + mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); mlxsw_sp_port_fids_fini(mlxsw_sp_port); mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); @@ -4422,7 +4432,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, } if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) { + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) { NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; @@ -4550,6 +4563,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct netdev_notifier_changeupper_info *info = ptr; struct netlink_ext_ack *extack; struct net_device *upper_dev; @@ -4566,7 +4580,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, } if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) { + if (netdev_has_any_upper_dev(upper_dev) && + (!netif_is_bridge_master(upper_dev) || + !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, + upper_dev))) { NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a0adcd886589..b6f475e83474 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -204,29 +204,6 @@ struct mlxsw_sp_port_vlan { struct list_head bridge_vlan_node; }; -enum mlxsw_sp_qdisc_type { - MLXSW_SP_QDISC_NO_QDISC, - MLXSW_SP_QDISC_RED, -}; - -struct mlxsw_sp_qdisc { - u32 handle; - enum mlxsw_sp_qdisc_type type; - struct red_stats xstats_base; - union { - struct { - u64 tail_drop_base; - u64 ecn_base; - u64 wred_drop_base; - } red; - } xstats; - - u64 tx_bytes; - u64 tx_packets; - u64 drops; - u64 overlimits; -}; - /* No need an internal lock; At worse - miss a single periodic iteration */ struct mlxsw_sp_port_xstats { u64 ecn; @@ -269,7 +246,7 @@ struct mlxsw_sp_port { } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; - struct mlxsw_sp_qdisc root_qdisc; + struct mlxsw_sp_qdisc *root_qdisc; unsigned acl_rule_count; }; @@ -366,6 +343,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev); +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev); /* spectrum.c */ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -582,6 +561,8 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); /* spectrum_qdisc.c */ +int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_red_qopt_offload *p); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index c33beac5def0..273300b75a68 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -41,6 +41,138 @@ #include "spectrum.h" #include "reg.h" +enum mlxsw_sp_qdisc_type { + MLXSW_SP_QDISC_NO_QDISC, + MLXSW_SP_QDISC_RED, +}; + +struct mlxsw_sp_qdisc_ops { + enum mlxsw_sp_qdisc_type type; + int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params); + int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params); + int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); + int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_qopt_offload_stats *stats_ptr); + int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *xstats_ptr); + void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc); +}; + +struct mlxsw_sp_qdisc { + u32 handle; + u8 tclass_num; + union { + struct red_stats red; + } xstats_base; + struct mlxsw_sp_qdisc_stats { + u64 tx_bytes; + u64 tx_packets; + u64 drops; + u64 overlimits; + } stats_base; + + struct mlxsw_sp_qdisc_ops *ops; +}; + +static bool +mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle, + enum mlxsw_sp_qdisc_type type) +{ + return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && + mlxsw_sp_qdisc->ops->type == type && + mlxsw_sp_qdisc->handle == handle; +} + +static int +mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) +{ + int err = 0; + + if (!mlxsw_sp_qdisc) + return 0; + + if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy) + err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port, + mlxsw_sp_qdisc); + + mlxsw_sp_qdisc->handle = TC_H_UNSPEC; + mlxsw_sp_qdisc->ops = NULL; + return err; +} + +static int +mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct mlxsw_sp_qdisc_ops *ops, void *params) +{ + int err; + + if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type) + /* In case this location contained a different qdisc of the + * same type we can override the old qdisc configuration. + * Otherwise, we need to remove the old qdisc before setting the + * new one. + */ + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params); + if (err) + goto err_bad_param; + + err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params); + if (err) + goto err_config; + + if (mlxsw_sp_qdisc->handle != handle) { + mlxsw_sp_qdisc->ops = ops; + if (ops->clean_stats) + ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc); + } + + mlxsw_sp_qdisc->handle = handle; + return 0; + +err_bad_param: +err_config: + mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); + return err; +} + +static int +mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + struct tc_qopt_offload_stats *stats_ptr) +{ + if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && + mlxsw_sp_qdisc->ops->get_stats) + return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port, + mlxsw_sp_qdisc, + stats_ptr); + + return -EOPNOTSUPP; +} + +static int +mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *xstats_ptr) +{ + if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops && + mlxsw_sp_qdisc->ops->get_xstats) + return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port, + mlxsw_sp_qdisc, + xstats_ptr); + + return -EOPNOTSUPP; +} + static int mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, int tclass_num, u32 min, u32 max, @@ -79,80 +211,76 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, } static void -mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num) +mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; struct rtnl_link_stats64 *stats; + struct red_stats *red_base; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; stats = &mlxsw_sp_port->periodic_hw_stats.stats; + stats_base = &mlxsw_sp_qdisc->stats_base; + red_base = &mlxsw_sp_qdisc->xstats_base.red; - mlxsw_sp_qdisc->tx_packets = stats->tx_packets; - mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes; + stats_base->tx_packets = stats->tx_packets; + stats_base->tx_bytes = stats->tx_bytes; - switch (mlxsw_sp_qdisc->type) { - case MLXSW_SP_QDISC_RED: - xstats_base->prob_mark = xstats->ecn; - xstats_base->prob_drop = xstats->wred_drop[tclass_num]; - xstats_base->pdrop = xstats->tail_drop[tclass_num]; + red_base->prob_mark = xstats->ecn; + red_base->prob_drop = xstats->wred_drop[tclass_num]; + red_base->pdrop = xstats->tail_drop[tclass_num]; - mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop + - xstats_base->prob_mark; - mlxsw_sp_qdisc->drops = xstats_base->prob_drop + - xstats_base->pdrop; - break; - default: - break; - } + stats_base->overlimits = red_base->prob_drop + red_base->prob_mark; + stats_base->drops = red_base->prob_drop + red_base->pdrop; } static int -mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num) +mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc) { - int err; - - if (mlxsw_sp_qdisc->handle != handle) - return 0; - - err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num); - mlxsw_sp_qdisc->handle = TC_H_UNSPEC; - mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC; - - return err; + return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, + mlxsw_sp_qdisc->tclass_num); } static int -mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, - struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num, - struct tc_red_qopt_offload_params *p) +mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u32 min, max; - u64 prob; - int err = 0; + struct tc_red_qopt_offload_params *p = params; if (p->min > p->max) { dev_err(mlxsw_sp->bus_info->dev, "spectrum: RED: min %u is bigger then max %u\n", p->min, p->max); - goto err_bad_param; + return -EINVAL; } if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { dev_err(mlxsw_sp->bus_info->dev, "spectrum: RED: max value %u is too big\n", p->max); - goto err_bad_param; + return -EINVAL; } if (p->min == 0 || p->max == 0) { dev_err(mlxsw_sp->bus_info->dev, "spectrum: RED: 0 value is illegal for min and max\n"); - goto err_bad_param; + return -EINVAL; } + return 0; +} + +static int +mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + void *params) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct tc_red_qopt_offload_params *p = params; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + u32 min, max; + u64 prob; /* calculate probability in percentage */ prob = p->probability; @@ -161,116 +289,132 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, prob = DIV_ROUND_UP(prob, 1 << 16); min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); - err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, - max, prob, p->is_ecn); - if (err) - goto err_config; - - mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED; - if (mlxsw_sp_qdisc->handle != handle) - mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port, - mlxsw_sp_qdisc, - tclass_num); - - mlxsw_sp_qdisc->handle = handle; - return 0; - -err_bad_param: - err = -EINVAL; -err_config: - mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle, - mlxsw_sp_qdisc, tclass_num); - return err; + return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, + max, prob, p->is_ecn); } static int -mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, +mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num, struct red_stats *res) + void *xstats_ptr) { - struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; struct mlxsw_sp_port_xstats *xstats; - - if (mlxsw_sp_qdisc->handle != handle || - mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) - return -EOPNOTSUPP; + struct red_stats *res = xstats_ptr; + int early_drops, marks, pdrops; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; - res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; - res->prob_mark = xstats->ecn - xstats_base->prob_mark; - res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; + marks = xstats->ecn - xstats_base->prob_mark; + pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + + res->pdrop += pdrops; + res->prob_drop += early_drops; + res->prob_mark += marks; + + xstats_base->pdrop += pdrops; + xstats_base->prob_drop += early_drops; + xstats_base->prob_mark += marks; return 0; } static int -mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, +mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, - int tclass_num, - struct tc_red_qopt_offload_stats *res) + struct tc_qopt_offload_stats *stats_ptr) { u64 tx_bytes, tx_packets, overlimits, drops; + u8 tclass_num = mlxsw_sp_qdisc->tclass_num; + struct mlxsw_sp_qdisc_stats *stats_base; struct mlxsw_sp_port_xstats *xstats; struct rtnl_link_stats64 *stats; - if (mlxsw_sp_qdisc->handle != handle || - mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) - return -EOPNOTSUPP; - xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; stats = &mlxsw_sp_port->periodic_hw_stats.stats; + stats_base = &mlxsw_sp_qdisc->stats_base; - tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes; - tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets; + tx_bytes = stats->tx_bytes - stats_base->tx_bytes; + tx_packets = stats->tx_packets - stats_base->tx_packets; overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - - mlxsw_sp_qdisc->overlimits; + stats_base->overlimits; drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - - mlxsw_sp_qdisc->drops; - - _bstats_update(res->bstats, tx_bytes, tx_packets); - res->qstats->overlimits += overlimits; - res->qstats->drops += drops; - res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, - xstats->backlog[tclass_num]); - - mlxsw_sp_qdisc->drops += drops; - mlxsw_sp_qdisc->overlimits += overlimits; - mlxsw_sp_qdisc->tx_bytes += tx_bytes; - mlxsw_sp_qdisc->tx_packets += tx_packets; + stats_base->drops; + + _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets); + stats_ptr->qstats->overlimits += overlimits; + stats_ptr->qstats->drops += drops; + stats_ptr->qstats->backlog += + mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + xstats->backlog[tclass_num]); + + stats_base->drops += drops; + stats_base->overlimits += overlimits; + stats_base->tx_bytes += tx_bytes; + stats_base->tx_packets += tx_packets; return 0; } #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 +static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = { + .type = MLXSW_SP_QDISC_RED, + .check_params = mlxsw_sp_qdisc_red_check_params, + .replace = mlxsw_sp_qdisc_red_replace, + .destroy = mlxsw_sp_qdisc_red_destroy, + .get_stats = mlxsw_sp_qdisc_get_red_stats, + .get_xstats = mlxsw_sp_qdisc_get_red_xstats, + .clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats, +}; + int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_red_qopt_offload *p) { struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; - int tclass_num; if (p->parent != TC_H_ROOT) return -EOPNOTSUPP; - mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc; - tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc; + + if (p->command == TC_RED_REPLACE) + return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, + &mlxsw_sp_qdisc_ops_red, + &p->set); + + if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle, + MLXSW_SP_QDISC_RED)) + return -EOPNOTSUPP; switch (p->command) { - case TC_RED_REPLACE: - return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num, - &p->set); case TC_RED_DESTROY: - return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num); + return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc); case TC_RED_XSTATS: - return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num, - p->xstats); + return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc, + p->xstats); case TC_RED_STATS: - return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle, - mlxsw_sp_qdisc, tclass_num, - &p->stats); + return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc, + &p->stats); default: return -EOPNOTSUPP; } } + +int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc), + GFP_KERNEL); + if (!mlxsw_sp_port->root_qdisc) + return -ENOMEM; + + mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + + return 0; +} + +void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->root_qdisc); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index be657b8533f0..434b3922b34f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3228,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, { if (!removing) nh->should_offload = 1; - else if (nh->offloaded) + else nh->should_offload = 0; nh->update = 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 7b8548e25ae7..593ad31be749 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, return NULL; } +bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); +} + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, struct net_device *br_dev) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 214b02a3acdd..4b63167906ca 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -84,6 +84,33 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) return nfp_net_ebpf_capable(nn) ? "BPF" : ""; } +static int +nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) +{ + int err; + + nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL); + if (!nn->app_priv) + return -ENOMEM; + + err = nfp_app_nic_vnic_alloc(app, nn, id); + if (err) + goto err_free_priv; + + return 0; +err_free_priv: + kfree(nn->app_priv); + return err; +} + +static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) +{ + struct nfp_bpf_vnic *bv = nn->app_priv; + + WARN_ON(bv->tc_prog); + kfree(bv); +} + static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { @@ -286,7 +313,8 @@ const struct nfp_app_type app_bpf = { .extra_cap = nfp_bpf_extra_cap, - .vnic_alloc = nfp_app_nic_vnic_alloc, + .vnic_alloc = nfp_bpf_vnic_alloc, + .vnic_free = nfp_bpf_vnic_free, .setup_tc = nfp_bpf_setup_tc, .tc_busy = nfp_bpf_tc_busy, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index aae1be9ed056..89a9b6393882 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -238,7 +238,7 @@ struct nfp_bpf_vnic { int nfp_bpf_jit(struct nfp_prog *prog); -extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; +extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; struct netdev_bpf; struct nfp_app; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 9c2608445bd8..d8870c2f11f3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -260,6 +260,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return 0; } -const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { +const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { .insn_hook = nfp_verify_insn, }; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index e98bb9cdb6a3..615314d9e7c6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -125,6 +125,27 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) return 0; } +int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists) +{ + struct nfp_flower_cmsg_portreify *msg; + struct sk_buff *skb; + + skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), + NFP_FLOWER_CMSG_TYPE_PORT_REIFY, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + msg = nfp_flower_cmsg_get_data(skb); + msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id); + msg->reserved = 0; + msg->info = cpu_to_be16(exists); + + nfp_ctrl_tx(repr->app->ctrl, skb); + + return 0; +} + static void nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) { @@ -161,6 +182,28 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) } static void +nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_flower_cmsg_portreify *msg; + bool exists; + + msg = nfp_flower_cmsg_get_data(skb); + + rcu_read_lock(); + exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum)); + rcu_read_unlock(); + if (!exists) { + nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n", + be32_to_cpu(msg->portnum)); + return; + } + + atomic_inc(&priv->reify_replies); + wake_up_interruptible(&priv->reify_wait_queue); +} + +static void nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_cmsg_hdr *cmsg_hdr; @@ -176,6 +219,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) type = cmsg_hdr->type; switch (type) { + case NFP_FLOWER_CMSG_TYPE_PORT_REIFY: + nfp_flower_cmsg_portreify_rx(app, skb); + break; case NFP_FLOWER_CMSG_TYPE_PORT_MOD: nfp_flower_cmsg_portmod_rx(app, skb); break; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 992d2eec1019..adfe474c2cf0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -350,6 +350,7 @@ struct nfp_flower_cmsg_hdr { enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, + NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, @@ -386,6 +387,15 @@ struct nfp_flower_cmsg_portmod { #define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0) +/* NFP_FLOWER_CMSG_TYPE_PORT_REIFY */ +struct nfp_flower_cmsg_portreify { + __be32 portnum; + u16 reserved; + __be16 info; +}; + +#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0) + enum nfp_flower_cmsg_port_type { NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1, @@ -444,6 +454,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, unsigned int nbi, unsigned int nbi_port, unsigned int phys_port); int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); +int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists); void nfp_flower_cmsg_process_rx(struct work_struct *work); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); struct sk_buff * diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 63160e9754d4..67c406815365 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -32,6 +32,7 @@ */ #include <linux/etherdevice.h> +#include <linux/lockdep.h> #include <linux/pci.h> #include <linux/skbuff.h> #include <linux/vmalloc.h> @@ -102,6 +103,52 @@ nfp_flower_repr_get(struct nfp_app *app, u32 port_id) } static int +nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, + bool exists) +{ + struct nfp_reprs *reprs; + int i, err, count = 0; + + reprs = rcu_dereference_protected(app->reprs[type], + lockdep_is_held(&app->pf->lock)); + if (!reprs) + return 0; + + for (i = 0; i < reprs->num_reprs; i++) + if (reprs->reprs[i]) { + struct nfp_repr *repr = netdev_priv(reprs->reprs[i]); + + err = nfp_flower_cmsg_portreify(repr, exists); + if (err) + return err; + count++; + } + + return count; +} + +static int +nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) +{ + struct nfp_flower_priv *priv = app->priv; + int err; + + if (!tot_repl) + return 0; + + lockdep_assert_held(&app->pf->lock); + err = wait_event_interruptible_timeout(priv->reify_wait_queue, + atomic_read(replies) >= tot_repl, + msecs_to_jiffies(10)); + if (err <= 0) { + nfp_warn(app->cpp, "Not all reprs responded to reify\n"); + return -EIO; + } + + return 0; +} + +static int nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) { int err; @@ -110,7 +157,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) if (err) return err; - netif_carrier_on(repr->netdev); netif_tx_wake_all_queues(repr->netdev); return 0; @@ -119,7 +165,6 @@ nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) static int nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) { - netif_carrier_off(repr->netdev); netif_tx_disable(repr->netdev); return nfp_flower_cmsg_portmod(repr, false); @@ -140,6 +185,24 @@ nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) netdev_priv(netdev)); } +static void +nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_repr *repr = netdev_priv(netdev); + struct nfp_flower_priv *priv = app->priv; + atomic_t *replies = &priv->reify_replies; + int err; + + atomic_set(replies, 0); + err = nfp_flower_cmsg_portreify(repr, false); + if (err) { + nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); + return; + } + + nfp_flower_wait_repr_reify(app, replies, 1); +} + static void nfp_flower_sriov_disable(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -157,10 +220,11 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, { u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; + atomic_t *replies = &priv->reify_replies; enum nfp_port_type port_type; struct nfp_reprs *reprs; + int i, err, reify_cnt; const u8 queue = 0; - int i, err; port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : NFP_PORT_VF_PORT; @@ -211,7 +275,21 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, nfp_app_reprs_set(app, repr_type, reprs); + atomic_set(replies, 0); + reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); + if (reify_cnt < 0) { + err = reify_cnt; + nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); + goto err_reprs_remove; + } + + err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); + if (err) + goto err_reprs_remove; + return 0; +err_reprs_remove: + reprs = nfp_app_reprs_set(app, repr_type, NULL); err_reprs_clean: nfp_reprs_clean_and_free(reprs); return err; @@ -233,10 +311,11 @@ static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; + atomic_t *replies = &priv->reify_replies; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; + int err, reify_cnt; unsigned int i; - int err; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) @@ -293,16 +372,30 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); - /* The MAC_REPR control message should be sent after the MAC + /* The REIFY/MAC_REPR control messages should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is * because the firmware may respond with control messages for the * MAC representors, f.e. to provide the driver with information * about their state, and without registration the driver will drop * any such messages. */ + atomic_set(replies, 0); + reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); + if (reify_cnt < 0) { + err = reify_cnt; + nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); + goto err_reprs_remove; + } + + err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); + if (err) + goto err_reprs_remove; + nfp_ctrl_tx(app->ctrl, ctrl_skb); return 0; +err_reprs_remove: + reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); err_reprs_clean: nfp_reprs_clean_and_free(reprs); err_free_ctrl_skb: @@ -419,6 +512,7 @@ static int nfp_flower_init(struct nfp_app *app) app_priv->app = app; skb_queue_head_init(&app_priv->cmsg_skbs); INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); + init_waitqueue_head(&app_priv->reify_wait_queue); err = nfp_flower_metadata_init(app); if (err) @@ -476,6 +570,7 @@ const struct nfp_app_type app_flower = { .vnic_clean = nfp_flower_vnic_clean, .repr_init = nfp_flower_repr_netdev_init, + .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, .repr_open = nfp_flower_repr_netdev_open, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 6e3937a0b708..332ff0fdc038 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -102,6 +102,9 @@ struct nfp_fl_stats_id { * @nfp_mac_off_count: Number of MACs in address list * @nfp_tun_mac_nb: Notifier to monitor link state * @nfp_tun_neigh_nb: Notifier to monitor neighbour state + * @reify_replies: atomically stores the number of replies received + * from firmware for repr reify + * @reify_wait_queue: wait queue for repr reify response counting */ struct nfp_flower_priv { struct nfp_app *app; @@ -127,6 +130,8 @@ struct nfp_flower_priv { int nfp_mac_off_count; struct notifier_block nfp_tun_mac_nb; struct notifier_block nfp_tun_neigh_nb; + atomic_t reify_replies; + wait_queue_head_t reify_wait_queue; }; struct nfp_fl_key_ls { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 0e5e0305ad1c..3af1943a8521 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -77,6 +77,8 @@ extern const struct nfp_app_type app_flower; * @vnic_init: vNIC netdev was registered * @vnic_clean: vNIC netdev about to be unregistered * @repr_init: representor about to be registered + * @repr_preclean: representor about to unregistered, executed before app + * reference to the it is removed * @repr_clean: representor about to be unregistered * @repr_open: representor netdev open callback * @repr_stop: representor netdev stop callback @@ -112,6 +114,7 @@ struct nfp_app_type { void (*vnic_clean)(struct nfp_app *app, struct nfp_net *nn); int (*repr_init)(struct nfp_app *app, struct net_device *netdev); + void (*repr_preclean)(struct nfp_app *app, struct net_device *netdev); void (*repr_clean)(struct nfp_app *app, struct net_device *netdev); int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); @@ -226,6 +229,13 @@ nfp_app_repr_init(struct nfp_app *app, struct net_device *netdev) } static inline void +nfp_app_repr_preclean(struct nfp_app *app, struct net_device *netdev) +{ + if (app->type->repr_preclean) + app->type->repr_preclean(app, netdev); +} + +static inline void nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev) { if (app->type->repr_clean) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 3801c52098d5..0e564cfabe7e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -47,6 +47,7 @@ #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/io-64-nonatomic-hi-lo.h> +#include <net/xdp.h> #include "nfp_net_ctrl.h" @@ -350,6 +351,7 @@ struct nfp_net_rx_buf { * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) + * @xdp_rxq: RX-ring info avail for XDP */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -361,13 +363,14 @@ struct nfp_net_rx_ring { u32 idx; int fl_qcidx; + unsigned int size; u8 __iomem *qcp_fl; struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_desc *rxds; dma_addr_t dma; - unsigned int size; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned; /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 0add4870ce2e..05e071b3dc5b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1608,11 +1608,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) unsigned int true_bufsz; struct sk_buff *skb; int pkts_polled = 0; + struct xdp_buff xdp; int idx; rcu_read_lock(); xdp_prog = READ_ONCE(dp->xdp_prog); true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz; + xdp.rxq = &rx_ring->xdp_rxq; tx_ring = r_vec->xdp_ring; while (pkts_polled < budget) { @@ -1703,7 +1705,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) dp->bpf_offload_xdp) && !meta.portid) { void *orig_data = rxbuf->frag + pkt_off; unsigned int dma_off; - struct xdp_buff xdp; int act; xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; @@ -2252,6 +2253,7 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; + xdp_rxq_info_unreg(&rx_ring->xdp_rxq); kfree(rx_ring->rxbufs); if (rx_ring->rxds) @@ -2275,7 +2277,11 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) static int nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) { - int sz; + int sz, err; + + err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx); + if (err < 0) + return err; rx_ring->cnt = dp->rxd_cnt; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; @@ -2850,6 +2856,11 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) new_ctrl = nn->dp.ctrl; + if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI) + new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC; + else + new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC; + if (netdev->flags & IFF_PROMISC) { if (nn->cap & NFP_NET_CFG_CTRL_PROMISC) new_ctrl |= NFP_NET_CFG_CTRL_PROMISC; @@ -3787,8 +3798,6 @@ int nfp_net_init(struct nfp_net *nn) /* Allow L2 Broadcast and Multicast through by default, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_L2BC) nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; - if (nn->cap & NFP_NET_CFG_CTRL_L2MC) - nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2MC; /* Allow IRQ moderation, if supported */ if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 78b36c67c232..f50aa119570a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -336,6 +336,8 @@ struct net_device *nfp_repr_alloc(struct nfp_app *app) if (!netdev) return NULL; + netif_carrier_off(netdev); + repr = netdev_priv(netdev); repr->netdev = netdev; repr->app = app; @@ -375,11 +377,22 @@ nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type) { struct nfp_reprs *reprs; + int i; - reprs = nfp_app_reprs_set(app, type, NULL); + reprs = rcu_dereference_protected(app->reprs[type], + lockdep_is_held(&app->pf->lock)); if (!reprs) return; + /* Preclean must happen before we remove the reprs reference from the + * app below. + */ + for (i = 0; i < reprs->num_reprs; i++) + if (reprs->reprs[i]) + nfp_app_repr_preclean(app, reprs->reprs[i]); + + reprs = nfp_app_reprs_set(app, type, NULL); + synchronize_rcu(); nfp_reprs_clean_and_free(reprs); } @@ -418,8 +431,10 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app) continue; repr = netdev_priv(old_reprs->reprs[i]); - if (repr->port->type == NFP_PORT_INVALID) + if (repr->port->type == NFP_PORT_INVALID) { + nfp_app_repr_preclean(app, old_reprs->reprs[i]); continue; + } reprs->reprs[i] = old_reprs->reprs[i]; } @@ -436,7 +451,6 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app) if (repr->port->type != NFP_PORT_INVALID) continue; - nfp_app_repr_stop(app, repr); nfp_repr_clean(repr); } diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 49d6d789459e..21e15cb2f62e 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -793,7 +793,7 @@ struct fe_priv { /* rx specific fields. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ - union ring_type get_rx, put_rx, first_rx, last_rx; + union ring_type get_rx, put_rx, last_rx; struct nv_skb_map *get_rx_ctx, *put_rx_ctx; struct nv_skb_map *first_rx_ctx, *last_rx_ctx; struct nv_skb_map *rx_skb; @@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev) struct ring_desc *less_rx; less_rx = np->get_rx.orig; - if (less_rx-- == np->first_rx.orig) + if (less_rx-- == np->rx_ring.orig) less_rx = np->last_rx.orig; while (np->put_rx.orig != less_rx) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); - if (skb) { + if (likely(skb)) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, @@ -1833,7 +1833,7 @@ static int nv_alloc_rx(struct net_device *dev) wmb(); np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) - np->put_rx.orig = np->first_rx.orig; + np->put_rx.orig = np->rx_ring.orig; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; } else { @@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) struct ring_desc_ex *less_rx; less_rx = np->get_rx.ex; - if (less_rx-- == np->first_rx.ex) + if (less_rx-- == np->rx_ring.ex) less_rx = np->last_rx.ex; while (np->put_rx.ex != less_rx) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); - if (skb) { + if (likely(skb)) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, @@ -1875,7 +1875,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev) wmb(); np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) - np->put_rx.ex = np->first_rx.ex; + np->put_rx.ex = np->rx_ring.ex; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) np->put_rx_ctx = np->first_rx_ctx; } else { @@ -1903,7 +1903,8 @@ static void nv_init_rx(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); int i; - np->get_rx = np->put_rx = np->first_rx = np->rx_ring; + np->get_rx = np->rx_ring; + np->put_rx = np->rx_ring; if (!nv_optimized(np)) np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; @@ -2911,7 +2912,7 @@ static int nv_rx_process(struct net_device *dev, int limit) u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) - np->get_rx.orig = np->first_rx.orig; + np->get_rx.orig = np->rx_ring.orig; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) np->get_rx_ctx = np->first_rx_ctx; @@ -3000,7 +3001,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } next_pkt: if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) - np->get_rx.ex = np->first_rx.ex; + np->get_rx.ex = np->rx_ring.ex; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) np->get_rx_ctx = np->first_rx_ctx; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 26ddf092e3ec..0ee2490db729 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -85,6 +85,7 @@ config QED tristate "QLogic QED 25/40/100Gb core driver" depends on PCI select ZLIB_INFLATE + select CRC8 ---help--- This enables the support for ... diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 91003bc6f00b..69488554f4b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -52,10 +52,10 @@ extern const struct qed_common_ops qed_common_ops_pass; -#define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 10 -#define QED_REVISION_VERSION 11 -#define QED_ENGINEERING_VERSION 21 +#define QED_MAJOR_VERSION 8 +#define QED_MINOR_VERSION 33 +#define QED_REVISION_VERSION 0 +#define QED_ENGINEERING_VERSION 20 #define QED_VERSION \ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \ @@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, return sw_fid; } -#define PURE_LB_TC 8 -#define PKT_LB_TC 9 +#define PKT_LB_TC 9 +#define MAX_NUM_VOQS_E4 20 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index afd07ad91631..6f546e869d8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -86,22 +86,22 @@ /* connection context union */ union conn_context { - struct core_conn_context core_ctx; - struct eth_conn_context eth_ctx; - struct iscsi_conn_context iscsi_ctx; - struct fcoe_conn_context fcoe_ctx; - struct roce_conn_context roce_ctx; + struct e4_core_conn_context core_ctx; + struct e4_eth_conn_context eth_ctx; + struct e4_iscsi_conn_context iscsi_ctx; + struct e4_fcoe_conn_context fcoe_ctx; + struct e4_roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { - struct iscsi_task_context iscsi_ctx; - struct fcoe_task_context fcoe_ctx; + struct e4_iscsi_task_context iscsi_ctx; + struct e4_fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { - struct rdma_task_context roce_ctx; + struct e4_rdma_task_context roce_ctx; }; struct src_ent { @@ -109,8 +109,8 @@ struct src_ent { u64 next; }; -#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ -#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12) #define CONN_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) @@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); qed_cxt_qm_iids(p_hwfn, &qm_iids); - total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, + total = qed_qm_pf_mem_size(qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs); @@ -1055,11 +1055,10 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 size; size = min_t(u32, sz_left, p_blk->real_size_in_page); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - size, &p_phys, GFP_KERNEL); + p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_phys, GFP_KERNEL); if (!p_virt) return -ENOMEM; - memset(p_virt, 0, size); ilt_shadow[line].p_phys = p_phys; ilt_shadow[line].p_virt = p_virt; @@ -1496,20 +1495,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) } } -void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool is_pf_loading) { - struct qed_qm_pf_rt_init_params params; struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct qed_qm_pf_rt_init_params params; + struct qed_mcp_link_state *p_link; struct qed_qm_iids iids; memset(&iids, 0, sizeof(iids)); qed_cxt_qm_iids(p_hwfn, &iids); + p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; + memset(¶ms, 0, sizeof(params)); params.port_id = p_hwfn->port_id; params.pf_id = p_hwfn->rel_pf_id; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; - params.is_first_pf = p_hwfn->first_on_engine; + params.is_pf_loading = is_pf_loading; params.num_pf_cids = iids.cids; params.num_vf_cids = iids.vf_cids; params.num_tids = iids.tids; @@ -1520,6 +1523,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; + params.link_speed = p_link->speed; params.pq_params = qm_info->qm_pq_params; params.vport_params = qm_info->qm_vport_params; @@ -1883,7 +1887,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - qed_qm_init_pf(p_hwfn, p_ptt); + qed_qm_init_pf(p_hwfn, p_ptt, true); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); qed_cdu_init_pf(p_hwfn); @@ -2303,14 +2307,13 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, goto out0; } - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_blk->real_size_in_page, - &p_phys, GFP_KERNEL); + p_virt = dma_zalloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, &p_phys, + GFP_KERNEL); if (!p_virt) { rc = -ENOMEM; goto out1; } - memset(p_virt, 0, p_blk->real_size_in_page); /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, * to compensate for a HW bug, but it is configured even if DIF is not @@ -2326,7 +2329,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, for (elem_i = 0; elem_i < elems_per_p; elem_i++) { elem = (union type1_task_context *)elem_start; SET_FIELD(elem->roce_ctx.tdif_context.flags1, - TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); + TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 17836349a274..a4e95869889f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); * * @param p_hwfn * @param p_ptt + * @param is_pf_loading */ -void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool is_pf_loading); /** * @brief Reconfigures QM pf on the fly diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index fe7c1f230028..449777f21237 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, struct pf_update_ramrod_data *p_dest) { struct protocol_dcb_data *p_dcb_data; - bool update_flag = false; - - p_dest->pf_id = p_src->pf_id; + u8 update_flag; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; p_dest->update_fcoe_dcb_data_mode = update_flag; diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 03c3cf77aaff..f2633ec87a6a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -21,25 +21,26 @@ enum mem_groups { MEM_GROUP_DMAE_MEM, MEM_GROUP_CM_MEM, MEM_GROUP_QM_MEM, - MEM_GROUP_TM_MEM, + MEM_GROUP_DORQ_MEM, MEM_GROUP_BRB_RAM, MEM_GROUP_BRB_MEM, MEM_GROUP_PRS_MEM, - MEM_GROUP_SDM_MEM, MEM_GROUP_IOR, - MEM_GROUP_RAM, MEM_GROUP_BTB_RAM, - MEM_GROUP_RDIF_CTX, - MEM_GROUP_TDIF_CTX, - MEM_GROUP_CFC_MEM, MEM_GROUP_CONN_CFC_MEM, MEM_GROUP_TASK_CFC_MEM, MEM_GROUP_CAU_PI, MEM_GROUP_CAU_MEM, MEM_GROUP_PXP_ILT, + MEM_GROUP_TM_MEM, + MEM_GROUP_SDM_MEM, MEM_GROUP_PBUF, + MEM_GROUP_RAM, MEM_GROUP_MULD_MEM, MEM_GROUP_BTB_MEM, + MEM_GROUP_RDIF_CTX, + MEM_GROUP_TDIF_CTX, + MEM_GROUP_CFC_MEM, MEM_GROUP_IGU_MEM, MEM_GROUP_IGU_MSIX, MEM_GROUP_CAU_SB, @@ -54,25 +55,26 @@ static const char * const s_mem_group_names[] = { "DMAE_MEM", "CM_MEM", "QM_MEM", - "TM_MEM", + "DORQ_MEM", "BRB_RAM", "BRB_MEM", "PRS_MEM", - "SDM_MEM", "IOR", - "RAM", "BTB_RAM", - "RDIF_CTX", - "TDIF_CTX", - "CFC_MEM", "CONN_CFC_MEM", "TASK_CFC_MEM", "CAU_PI", "CAU_MEM", "PXP_ILT", + "TM_MEM", + "SDM_MEM", "PBUF", + "RAM", "MULD_MEM", "BTB_MEM", + "RDIF_CTX", + "TDIF_CTX", + "CFC_MEM", "IGU_MEM", "IGU_MSIX", "CAU_SB", @@ -92,11 +94,6 @@ static u32 cond7(const u32 *r, const u32 *imm) return ((r[0] >> imm[0]) & imm[1]) != imm[2]; } -static u32 cond14(const u32 *r, const u32 *imm) -{ - return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]); -} - static u32 cond6(const u32 *r, const u32 *imm) { return (r[0] & imm[0]) != imm[1]; @@ -174,7 +171,6 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { cond11, cond12, cond13, - cond14, }; /******************************* Data Types **********************************/ @@ -203,6 +199,8 @@ struct chip_defs { struct platform_defs { const char *name; u32 delay_factor; + u32 dmae_thresh; + u32 log_thresh; }; /* Storm constant definitions. @@ -234,7 +232,7 @@ struct storm_defs { /* Block constant definitions */ struct block_defs { const char *name; - bool has_dbg_bus[MAX_CHIP_IDS]; + bool exists[MAX_CHIP_IDS]; bool associated_to_storm; /* Valid only if associated_to_storm is true */ @@ -258,8 +256,8 @@ struct block_defs { /* Reset register definitions */ struct reset_reg_defs { u32 addr; - u32 unreset_val; bool exists[MAX_CHIP_IDS]; + u32 unreset_val[MAX_CHIP_IDS]; }; struct grc_param_defs { @@ -276,8 +274,8 @@ struct rss_mem_defs { const char *mem_name; const char *type_name; u32 addr; + u32 entry_width; u32 num_entries[MAX_CHIP_IDS]; - u32 entry_width[MAX_CHIP_IDS]; }; struct vfc_ram_defs { @@ -294,7 +292,9 @@ struct big_ram_defs { enum dbg_grc_params grc_param; u32 addr_reg_addr; u32 data_reg_addr; - u32 num_of_blocks[MAX_CHIP_IDS]; + u32 is_256b_reg_addr; + u32 is_256b_bit_offset[MAX_CHIP_IDS]; + u32 ram_size[MAX_CHIP_IDS]; /* In dwords */ }; struct phy_defs { @@ -358,20 +358,14 @@ struct phy_defs { (arr)[i] = qed_rd(dev, ptt, addr); \ } while (0) -#ifndef DWORDS_TO_BYTES #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) -#endif -#ifndef BYTES_TO_DWORDS #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) -#endif -/* extra lines include a signature line + optional latency events line */ -#ifndef NUM_DBG_LINES +/* Extra lines include a signature line + optional latency events line */ #define NUM_EXTRA_DBG_LINES(block_desc) \ (1 + ((block_desc)->has_latency_events ? 1 : 0)) #define NUM_DBG_LINES(block_desc) \ ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) -#endif #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_BYTES(lines) \ @@ -424,9 +418,6 @@ struct phy_defs { #define NUM_RSS_MEM_TYPES 5 #define NUM_BIG_RAM_TYPES 3 -#define BIG_RAM_BLOCK_SIZE_BYTES 128 -#define BIG_RAM_BLOCK_SIZE_DWORDS \ - BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES) #define NUM_PHY_TBUS_ADDRESSES 2048 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) @@ -441,23 +432,17 @@ struct phy_defs { #define FW_IMG_MAIN 1 -#ifndef REG_FIFO_ELEMENT_DWORDS #define REG_FIFO_ELEMENT_DWORDS 2 -#endif #define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_DWORDS \ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) -#ifndef IGU_FIFO_ELEMENT_DWORDS #define IGU_FIFO_ELEMENT_DWORDS 4 -#endif #define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_DWORDS \ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) -#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 -#endif #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ @@ -491,6 +476,11 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0}, {0, 0, 0}, + {0, 0, 0} } }, + { "reserved", + {{0, 0, 0}, + {0, 0, 0}, + {0, 0, 0}, {0, 0, 0} } } }; @@ -498,7 +488,8 @@ static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { static struct storm_defs s_storm_defs[] = { /* Tstorm */ {'T', BLOCK_TSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCT}, true, TSEM_REG_FAST_MEMORY, TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2, TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -511,7 +502,8 @@ static struct storm_defs s_storm_defs[] = { /* Mstorm */ {'M', BLOCK_MSEM, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCM}, false, MSEM_REG_FAST_MEMORY, MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2, MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -524,7 +516,8 @@ static struct storm_defs s_storm_defs[] = { /* Ustorm */ {'U', BLOCK_USEM, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCU}, false, USEM_REG_FAST_MEMORY, USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2, USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2, @@ -537,7 +530,8 @@ static struct storm_defs s_storm_defs[] = { /* Xstorm */ {'X', BLOCK_XSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCX}, false, XSEM_REG_FAST_MEMORY, XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2, XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -550,7 +544,8 @@ static struct storm_defs s_storm_defs[] = { /* Ystorm */ {'Y', BLOCK_YSEM, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCY}, false, YSEM_REG_FAST_MEMORY, YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2, YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -563,7 +558,8 @@ static struct storm_defs s_storm_defs[] = { /* Pstorm */ {'P', BLOCK_PSEM, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCS}, true, PSEM_REG_FAST_MEMORY, PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2, PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2, @@ -579,8 +575,8 @@ static struct storm_defs s_storm_defs[] = { static struct block_defs block_grc_defs = { "grc", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE, GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID, GRC_REG_DBG_FORCE_FRAME, @@ -588,30 +584,30 @@ static struct block_defs block_grc_defs = { }; static struct block_defs block_miscs_defs = { - "miscs", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "miscs", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_misc_defs = { - "misc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "misc", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_dbu_defs = { - "dbu", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "dbu", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_pglue_b_defs = { "pglue_b", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH}, PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE, PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID, PGLUE_B_REG_DBG_FORCE_FRAME, @@ -620,25 +616,26 @@ static struct block_defs block_pglue_b_defs = { static struct block_defs block_cnig_defs = { "cnig", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2, - CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2, - CNIG_REG_DBG_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCW}, + CNIG_REG_DBG_SELECT_K2_E5, CNIG_REG_DBG_DWORD_ENABLE_K2_E5, + CNIG_REG_DBG_SHIFT_K2_E5, CNIG_REG_DBG_FORCE_VALID_K2_E5, + CNIG_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 0 }; static struct block_defs block_cpmu_defs = { - "cpmu", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "cpmu", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 8 }; static struct block_defs block_ncsi_defs = { "ncsi", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE, NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID, NCSI_REG_DBG_FORCE_FRAME, @@ -646,16 +643,16 @@ static struct block_defs block_ncsi_defs = { }; static struct block_defs block_opte_defs = { - "opte", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "opte", {true, true, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 4 }; static struct block_defs block_bmb_defs = { "bmb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB, DBG_BUS_CLIENT_RBCB}, BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE, BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID, BMB_REG_DBG_FORCE_FRAME, @@ -664,27 +661,28 @@ static struct block_defs block_bmb_defs = { static struct block_defs block_pcie_defs = { "pcie", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT_K2, - PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, - PCIE_REG_DBG_COMMON_SHIFT_K2, - PCIE_REG_DBG_COMMON_FORCE_VALID_K2, - PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCH}, + PCIE_REG_DBG_COMMON_SELECT_K2_E5, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5, + PCIE_REG_DBG_COMMON_SHIFT_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_mcp_defs = { - "mcp", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "mcp", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_mcp2_defs = { "mcp2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE, MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID, MCP2_REG_DBG_FORCE_FRAME, @@ -693,8 +691,8 @@ static struct block_defs block_mcp2_defs = { static struct block_defs block_pswhst_defs = { "pswhst", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE, PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID, PSWHST_REG_DBG_FORCE_FRAME, @@ -703,8 +701,8 @@ static struct block_defs block_pswhst_defs = { static struct block_defs block_pswhst2_defs = { "pswhst2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE, PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID, PSWHST2_REG_DBG_FORCE_FRAME, @@ -713,8 +711,8 @@ static struct block_defs block_pswhst2_defs = { static struct block_defs block_pswrd_defs = { "pswrd", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE, PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID, PSWRD_REG_DBG_FORCE_FRAME, @@ -723,8 +721,8 @@ static struct block_defs block_pswrd_defs = { static struct block_defs block_pswrd2_defs = { "pswrd2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE, PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID, PSWRD2_REG_DBG_FORCE_FRAME, @@ -733,8 +731,8 @@ static struct block_defs block_pswrd2_defs = { static struct block_defs block_pswwr_defs = { "pswwr", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE, PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID, PSWWR_REG_DBG_FORCE_FRAME, @@ -742,16 +740,16 @@ static struct block_defs block_pswwr_defs = { }; static struct block_defs block_pswwr2_defs = { - "pswwr2", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "pswwr2", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISC_PL_HV, 3 }; static struct block_defs block_pswrq_defs = { "pswrq", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE, PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID, PSWRQ_REG_DBG_FORCE_FRAME, @@ -760,8 +758,8 @@ static struct block_defs block_pswrq_defs = { static struct block_defs block_pswrq2_defs = { "pswrq2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE, PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID, PSWRQ2_REG_DBG_FORCE_FRAME, @@ -770,18 +768,19 @@ static struct block_defs block_pswrq2_defs = { static struct block_defs block_pglcs_defs = { "pglcs", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2, - PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2, - PGLCS_REG_DBG_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCH}, + PGLCS_REG_DBG_SELECT_K2_E5, PGLCS_REG_DBG_DWORD_ENABLE_K2_E5, + PGLCS_REG_DBG_SHIFT_K2_E5, PGLCS_REG_DBG_FORCE_VALID_K2_E5, + PGLCS_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 2 }; static struct block_defs block_ptu_defs = { "ptu", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE, PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID, PTU_REG_DBG_FORCE_FRAME, @@ -790,8 +789,8 @@ static struct block_defs block_ptu_defs = { static struct block_defs block_dmae_defs = { "dmae", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE, DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID, DMAE_REG_DBG_FORCE_FRAME, @@ -800,8 +799,8 @@ static struct block_defs block_dmae_defs = { static struct block_defs block_tcm_defs = { "tcm", - {true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE, TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID, TCM_REG_DBG_FORCE_FRAME, @@ -810,8 +809,8 @@ static struct block_defs block_tcm_defs = { static struct block_defs block_mcm_defs = { "mcm", - {true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE, MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID, MCM_REG_DBG_FORCE_FRAME, @@ -820,8 +819,8 @@ static struct block_defs block_mcm_defs = { static struct block_defs block_ucm_defs = { "ucm", - {true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE, UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID, UCM_REG_DBG_FORCE_FRAME, @@ -830,8 +829,8 @@ static struct block_defs block_ucm_defs = { static struct block_defs block_xcm_defs = { "xcm", - {true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE, XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID, XCM_REG_DBG_FORCE_FRAME, @@ -840,8 +839,8 @@ static struct block_defs block_xcm_defs = { static struct block_defs block_ycm_defs = { "ycm", - {true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE, YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID, YCM_REG_DBG_FORCE_FRAME, @@ -850,8 +849,8 @@ static struct block_defs block_ycm_defs = { static struct block_defs block_pcm_defs = { "pcm", - {true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE, PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID, PCM_REG_DBG_FORCE_FRAME, @@ -860,8 +859,8 @@ static struct block_defs block_pcm_defs = { static struct block_defs block_qm_defs = { "qm", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ, DBG_BUS_CLIENT_RBCQ}, QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE, QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID, QM_REG_DBG_FORCE_FRAME, @@ -870,8 +869,8 @@ static struct block_defs block_qm_defs = { static struct block_defs block_tm_defs = { "tm", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE, TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID, TM_REG_DBG_FORCE_FRAME, @@ -880,8 +879,8 @@ static struct block_defs block_tm_defs = { static struct block_defs block_dorq_defs = { "dorq", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE, DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID, DORQ_REG_DBG_FORCE_FRAME, @@ -890,8 +889,8 @@ static struct block_defs block_dorq_defs = { static struct block_defs block_brb_defs = { "brb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE, BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID, BRB_REG_DBG_FORCE_FRAME, @@ -900,8 +899,8 @@ static struct block_defs block_brb_defs = { static struct block_defs block_src_defs = { "src", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE, SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID, SRC_REG_DBG_FORCE_FRAME, @@ -910,8 +909,8 @@ static struct block_defs block_src_defs = { static struct block_defs block_prs_defs = { "prs", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR}, PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE, PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID, PRS_REG_DBG_FORCE_FRAME, @@ -920,8 +919,8 @@ static struct block_defs block_prs_defs = { static struct block_defs block_tsdm_defs = { "tsdm", - {true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE, TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID, TSDM_REG_DBG_FORCE_FRAME, @@ -930,8 +929,8 @@ static struct block_defs block_tsdm_defs = { static struct block_defs block_msdm_defs = { "msdm", - {true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE, MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID, MSDM_REG_DBG_FORCE_FRAME, @@ -940,8 +939,8 @@ static struct block_defs block_msdm_defs = { static struct block_defs block_usdm_defs = { "usdm", - {true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE, USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID, USDM_REG_DBG_FORCE_FRAME, @@ -950,8 +949,8 @@ static struct block_defs block_usdm_defs = { static struct block_defs block_xsdm_defs = { "xsdm", - {true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE, XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID, XSDM_REG_DBG_FORCE_FRAME, @@ -960,8 +959,8 @@ static struct block_defs block_xsdm_defs = { static struct block_defs block_ysdm_defs = { "ysdm", - {true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE, YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID, YSDM_REG_DBG_FORCE_FRAME, @@ -970,8 +969,8 @@ static struct block_defs block_ysdm_defs = { static struct block_defs block_psdm_defs = { "psdm", - {true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE, PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID, PSDM_REG_DBG_FORCE_FRAME, @@ -980,8 +979,8 @@ static struct block_defs block_psdm_defs = { static struct block_defs block_tsem_defs = { "tsem", - {true, true}, true, DBG_TSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, true, DBG_TSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE, TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID, TSEM_REG_DBG_FORCE_FRAME, @@ -990,8 +989,8 @@ static struct block_defs block_tsem_defs = { static struct block_defs block_msem_defs = { "msem", - {true, true}, true, DBG_MSTORM_ID, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, true, DBG_MSTORM_ID, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE, MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID, MSEM_REG_DBG_FORCE_FRAME, @@ -1000,8 +999,8 @@ static struct block_defs block_msem_defs = { static struct block_defs block_usem_defs = { "usem", - {true, true}, true, DBG_USTORM_ID, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, true, DBG_USTORM_ID, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE, USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID, USEM_REG_DBG_FORCE_FRAME, @@ -1010,8 +1009,8 @@ static struct block_defs block_usem_defs = { static struct block_defs block_xsem_defs = { "xsem", - {true, true}, true, DBG_XSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, true, DBG_XSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE, XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID, XSEM_REG_DBG_FORCE_FRAME, @@ -1020,8 +1019,8 @@ static struct block_defs block_xsem_defs = { static struct block_defs block_ysem_defs = { "ysem", - {true, true}, true, DBG_YSTORM_ID, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + {true, true, true}, true, DBG_YSTORM_ID, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY, DBG_BUS_CLIENT_RBCY}, YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE, YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID, YSEM_REG_DBG_FORCE_FRAME, @@ -1030,8 +1029,8 @@ static struct block_defs block_ysem_defs = { static struct block_defs block_psem_defs = { "psem", - {true, true}, true, DBG_PSTORM_ID, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, true, DBG_PSTORM_ID, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE, PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID, PSEM_REG_DBG_FORCE_FRAME, @@ -1040,8 +1039,8 @@ static struct block_defs block_psem_defs = { static struct block_defs block_rss_defs = { "rss", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE, RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID, RSS_REG_DBG_FORCE_FRAME, @@ -1050,8 +1049,8 @@ static struct block_defs block_rss_defs = { static struct block_defs block_tmld_defs = { "tmld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE, TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID, TMLD_REG_DBG_FORCE_FRAME, @@ -1060,8 +1059,8 @@ static struct block_defs block_tmld_defs = { static struct block_defs block_muld_defs = { "muld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE, MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID, MULD_REG_DBG_FORCE_FRAME, @@ -1070,8 +1069,9 @@ static struct block_defs block_muld_defs = { static struct block_defs block_yuld_defs = { "yuld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + {true, true, false}, false, 0, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, + MAX_DBG_BUS_CLIENTS}, YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2, YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2, YULD_REG_DBG_FORCE_FRAME_BB_K2, @@ -1081,18 +1081,40 @@ static struct block_defs block_yuld_defs = { static struct block_defs block_xyld_defs = { "xyld", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE, XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID, XYLD_REG_DBG_FORCE_FRAME, true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 }; +static struct block_defs block_ptld_defs = { + "ptld", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCT}, + PTLD_REG_DBG_SELECT_E5, PTLD_REG_DBG_DWORD_ENABLE_E5, + PTLD_REG_DBG_SHIFT_E5, PTLD_REG_DBG_FORCE_VALID_E5, + PTLD_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + 28 +}; + +static struct block_defs block_ypld_defs = { + "ypld", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCS}, + YPLD_REG_DBG_SELECT_E5, YPLD_REG_DBG_DWORD_ENABLE_E5, + YPLD_REG_DBG_SHIFT_E5, YPLD_REG_DBG_FORCE_VALID_E5, + YPLD_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + 27 +}; + static struct block_defs block_prm_defs = { "prm", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE, PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID, PRM_REG_DBG_FORCE_FRAME, @@ -1101,8 +1123,8 @@ static struct block_defs block_prm_defs = { static struct block_defs block_pbf_pb1_defs = { "pbf_pb1", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE, PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID, PBF_PB1_REG_DBG_FORCE_FRAME, @@ -1112,8 +1134,8 @@ static struct block_defs block_pbf_pb1_defs = { static struct block_defs block_pbf_pb2_defs = { "pbf_pb2", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE, PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID, PBF_PB2_REG_DBG_FORCE_FRAME, @@ -1123,8 +1145,8 @@ static struct block_defs block_pbf_pb2_defs = { static struct block_defs block_rpb_defs = { "rpb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE, RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID, RPB_REG_DBG_FORCE_FRAME, @@ -1133,8 +1155,8 @@ static struct block_defs block_rpb_defs = { static struct block_defs block_btb_defs = { "btb", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE, BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID, BTB_REG_DBG_FORCE_FRAME, @@ -1143,8 +1165,8 @@ static struct block_defs block_btb_defs = { static struct block_defs block_pbf_defs = { "pbf", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV, DBG_BUS_CLIENT_RBCV}, PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE, PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID, PBF_REG_DBG_FORCE_FRAME, @@ -1153,8 +1175,8 @@ static struct block_defs block_pbf_defs = { static struct block_defs block_rdif_defs = { "rdif", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM, DBG_BUS_CLIENT_RBCM}, RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE, RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID, RDIF_REG_DBG_FORCE_FRAME, @@ -1163,8 +1185,8 @@ static struct block_defs block_rdif_defs = { static struct block_defs block_tdif_defs = { "tdif", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE, TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID, TDIF_REG_DBG_FORCE_FRAME, @@ -1173,8 +1195,8 @@ static struct block_defs block_tdif_defs = { static struct block_defs block_cdu_defs = { "cdu", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE, CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID, CDU_REG_DBG_FORCE_FRAME, @@ -1183,8 +1205,8 @@ static struct block_defs block_cdu_defs = { static struct block_defs block_ccfc_defs = { "ccfc", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE, CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID, CCFC_REG_DBG_FORCE_FRAME, @@ -1193,8 +1215,8 @@ static struct block_defs block_ccfc_defs = { static struct block_defs block_tcfc_defs = { "tcfc", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF}, TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE, TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID, TCFC_REG_DBG_FORCE_FRAME, @@ -1203,8 +1225,8 @@ static struct block_defs block_tcfc_defs = { static struct block_defs block_igu_defs = { "igu", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE, IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID, IGU_REG_DBG_FORCE_FRAME, @@ -1213,42 +1235,79 @@ static struct block_defs block_igu_defs = { static struct block_defs block_cau_defs = { "cau", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP}, CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE, CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID, CAU_REG_DBG_FORCE_FRAME, true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 }; +static struct block_defs block_rgfs_defs = { + "rgfs", {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 29 +}; + +static struct block_defs block_rgsrc_defs = { + "rgsrc", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, + RGSRC_REG_DBG_SELECT_E5, RGSRC_REG_DBG_DWORD_ENABLE_E5, + RGSRC_REG_DBG_SHIFT_E5, RGSRC_REG_DBG_FORCE_VALID_E5, + RGSRC_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, + 30 +}; + +static struct block_defs block_tgfs_defs = { + "tgfs", {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + 0, 0, 0, 0, 0, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 30 +}; + +static struct block_defs block_tgsrc_defs = { + "tgsrc", + {false, false, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCV}, + TGSRC_REG_DBG_SELECT_E5, TGSRC_REG_DBG_DWORD_ENABLE_E5, + TGSRC_REG_DBG_SHIFT_E5, TGSRC_REG_DBG_FORCE_VALID_E5, + TGSRC_REG_DBG_FORCE_FRAME_E5, + true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, + 31 +}; + static struct block_defs block_umac_defs = { "umac", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2, - UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2, - UMAC_REG_DBG_FORCE_FRAME_K2, + {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_RBCZ}, + UMAC_REG_DBG_SELECT_K2_E5, UMAC_REG_DBG_DWORD_ENABLE_K2_E5, + UMAC_REG_DBG_SHIFT_K2_E5, UMAC_REG_DBG_FORCE_VALID_K2_E5, + UMAC_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 6 }; static struct block_defs block_xmac_defs = { - "xmac", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "xmac", {true, false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_dbg_defs = { - "dbg", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "dbg", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3 }; static struct block_defs block_nig_defs = { "nig", - {true, true}, false, 0, - {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, + {true, true, true}, false, 0, + {DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN}, NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE, NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID, NIG_REG_DBG_FORCE_FRAME, @@ -1257,139 +1316,106 @@ static struct block_defs block_nig_defs = { static struct block_defs block_wol_defs = { "wol", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2, - WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2, - WOL_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + WOL_REG_DBG_SELECT_K2_E5, WOL_REG_DBG_DWORD_ENABLE_K2_E5, + WOL_REG_DBG_SHIFT_K2_E5, WOL_REG_DBG_FORCE_VALID_K2_E5, + WOL_REG_DBG_FORCE_FRAME_K2_E5, true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7 }; static struct block_defs block_bmbn_defs = { "bmbn", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB}, - BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2, - BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2, - BMBN_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCB}, + BMBN_REG_DBG_SELECT_K2_E5, BMBN_REG_DBG_DWORD_ENABLE_K2_E5, + BMBN_REG_DBG_SHIFT_K2_E5, BMBN_REG_DBG_FORCE_VALID_K2_E5, + BMBN_REG_DBG_FORCE_FRAME_K2_E5, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_ipc_defs = { - "ipc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "ipc", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_UA, 8 }; static struct block_defs block_nwm_defs = { "nwm", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2, - NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2, - NWM_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW}, + NWM_REG_DBG_SELECT_K2_E5, NWM_REG_DBG_DWORD_ENABLE_K2_E5, + NWM_REG_DBG_SHIFT_K2_E5, NWM_REG_DBG_FORCE_VALID_K2_E5, + NWM_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0 }; static struct block_defs block_nws_defs = { "nws", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW}, - NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2, - NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2, - NWS_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW, DBG_BUS_CLIENT_RBCW}, + NWS_REG_DBG_SELECT_K2_E5, NWS_REG_DBG_DWORD_ENABLE_K2_E5, + NWS_REG_DBG_SHIFT_K2_E5, NWS_REG_DBG_FORCE_VALID_K2_E5, + NWS_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 12 }; static struct block_defs block_ms_defs = { "ms", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ}, - MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2, - MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2, - MS_REG_DBG_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ}, + MS_REG_DBG_SELECT_K2_E5, MS_REG_DBG_DWORD_ENABLE_K2_E5, + MS_REG_DBG_SHIFT_K2_E5, MS_REG_DBG_FORCE_VALID_K2_E5, + MS_REG_DBG_FORCE_FRAME_K2_E5, true, false, DBG_RESET_REG_MISCS_PL_HV, 13 }; static struct block_defs block_phy_pcie_defs = { "phy_pcie", - {false, true}, false, 0, - {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH}, - PCIE_REG_DBG_COMMON_SELECT_K2, - PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2, - PCIE_REG_DBG_COMMON_SHIFT_K2, - PCIE_REG_DBG_COMMON_FORCE_VALID_K2, - PCIE_REG_DBG_COMMON_FORCE_FRAME_K2, + {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCH}, + PCIE_REG_DBG_COMMON_SELECT_K2_E5, + PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5, + PCIE_REG_DBG_COMMON_SHIFT_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5, + PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_led_defs = { - "led", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "led", {false, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_HV, 14 }; static struct block_defs block_avs_wrap_defs = { - "avs_wrap", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "avs_wrap", {false, true, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, true, false, DBG_RESET_REG_MISCS_PL_UA, 11 }; -static struct block_defs block_rgfs_defs = { - "rgfs", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_rgsrc_defs = { - "rgsrc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_tgfs_defs = { - "tgfs", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_tgsrc_defs = { - "tgsrc", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_ptld_defs = { - "ptld", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, - 0, 0, 0, 0, 0, - false, false, MAX_DBG_RESET_REGS, 0 -}; - -static struct block_defs block_ypld_defs = { - "ypld", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, +static struct block_defs block_pxpreqbus_defs = { + "pxpreqbus", {false, false, false}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_misc_aeu_defs = { - "misc_aeu", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "misc_aeu", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; static struct block_defs block_bar0_map_defs = { - "bar0_map", {false, false}, false, 0, - {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, + "bar0_map", {true, true, true}, false, 0, + {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, 0, 0, 0, 0, 0, false, false, MAX_DBG_RESET_REGS, 0 }; @@ -1480,164 +1506,160 @@ static struct block_defs *s_block_defs[MAX_BLOCK_ID] = { &block_phy_pcie_defs, &block_led_defs, &block_avs_wrap_defs, + &block_pxpreqbus_defs, &block_misc_aeu_defs, &block_bar0_map_defs, }; static struct platform_defs s_platform_defs[] = { - {"asic", 1}, - {"reserved", 0}, - {"reserved2", 0}, - {"reserved3", 0} + {"asic", 1, 256, 32768}, + {"reserved", 0, 0, 0}, + {"reserved2", 0, 0, 0}, + {"reserved3", 0, 0, 0} }; static struct grc_param_defs s_grc_param_defs[] = { /* DBG_GRC_PARAM_DUMP_TSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_MSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_USTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_XSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_YSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_PSTORM */ - {{1, 1}, 0, 1, false, 1, 1}, + {{1, 1, 1}, 0, 1, false, 1, 1}, /* DBG_GRC_PARAM_DUMP_REGS */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RAM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PBUF */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IOR */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_VFC */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CM_CTX */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_ILT */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_RSS */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CAU */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_QM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MCP */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_RESERVED */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_CFC */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_IGU */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BRB */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BTB */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_BMB */ - {{0, 0}, 0, 1, false, 0, 1}, + {{0, 0, 0}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_NIG */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_MULD */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PRS */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DMAE */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_TM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_SDM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_DIF */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_STATIC */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_UNSTALL */ - {{0, 0}, 0, 1, false, 0, 0}, + {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NUM_LCIDS */ - {{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, + {{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS, MAX_LCIDS}, /* DBG_GRC_PARAM_NUM_LTIDS */ - {{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, + {{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS, MAX_LTIDS}, /* DBG_GRC_PARAM_EXCLUDE_ALL */ - {{0, 0}, 0, 1, true, 0, 0}, + {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_CRASH */ - {{0, 0}, 0, 1, true, 0, 0}, + {{0, 0, 0}, 0, 1, true, 0, 0}, /* DBG_GRC_PARAM_PARITY_SAFE */ - {{0, 0}, 0, 1, false, 1, 0}, + {{0, 0, 0}, 0, 1, false, 1, 0}, /* DBG_GRC_PARAM_DUMP_CM */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_DUMP_PHY */ - {{1, 1}, 0, 1, false, 0, 1}, + {{1, 1, 1}, 0, 1, false, 0, 1}, /* DBG_GRC_PARAM_NO_MCP */ - {{0, 0}, 0, 1, false, 0, 0}, + {{0, 0, 0}, 0, 1, false, 0, 0}, /* DBG_GRC_PARAM_NO_FW_VER */ - {{0, 0}, 0, 1, false, 0, 0} + {{0, 0, 0}, 0, 1, false, 0, 0} }; static struct rss_mem_defs s_rss_mem_defs[] = { - { "rss_mem_cid", "rss_cid", 0, - {256, 320}, - {32, 32} }, + { "rss_mem_cid", "rss_cid", 0, 32, + {256, 320, 512} }, - { "rss_mem_key_msb", "rss_key", 1024, - {128, 208}, - {256, 256} }, + { "rss_mem_key_msb", "rss_key", 1024, 256, + {128, 208, 257} }, - { "rss_mem_key_lsb", "rss_key", 2048, - {128, 208}, - {64, 64} }, + { "rss_mem_key_lsb", "rss_key", 2048, 64, + {128, 208, 257} }, - { "rss_mem_info", "rss_info", 3072, - {128, 208}, - {16, 16} }, + { "rss_mem_info", "rss_info", 3072, 16, + {128, 208, 256} }, - { "rss_mem_ind", "rss_ind", 4096, - {16384, 26624}, - {16, 16} } + { "rss_mem_ind", "rss_ind", 4096, 16, + {16384, 26624, 32768} } }; static struct vfc_ram_defs s_vfc_ram_defs[] = { @@ -1650,72 +1672,75 @@ static struct vfc_ram_defs s_vfc_ram_defs[] = { static struct big_ram_defs s_big_ram_defs[] = { { "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, - {4800, 5632} }, + MISC_REG_BLOCK_256B_EN, {0, 0, 0}, + {153600, 180224, 282624} }, { "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, - {2880, 3680} }, + MISC_REG_BLOCK_256B_EN, {0, 1, 1}, + {92160, 117760, 168960} }, { "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, - {1152, 1152} } + MISCS_REG_BLOCK_256B_EN, {0, 0, 0}, + {36864, 36864, 36864} } }; static struct reset_reg_defs s_reset_regs_defs[] = { /* DBG_RESET_REG_MISCS_PL_UA */ - { MISCS_REG_RESET_PL_UA, 0x0, - {true, true} }, + { MISCS_REG_RESET_PL_UA, + {true, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISCS_PL_HV */ - { MISCS_REG_RESET_PL_HV, 0x0, - {true, true} }, + { MISCS_REG_RESET_PL_HV, + {true, true, true}, {0x0, 0x400, 0x600} }, /* DBG_RESET_REG_MISCS_PL_HV_2 */ - { MISCS_REG_RESET_PL_HV_2_K2, 0x0, - {false, true} }, + { MISCS_REG_RESET_PL_HV_2_K2_E5, + {false, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISC_PL_UA */ - { MISC_REG_RESET_PL_UA, 0x0, - {true, true} }, + { MISC_REG_RESET_PL_UA, + {true, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISC_PL_HV */ - { MISC_REG_RESET_PL_HV, 0x0, - {true, true} }, + { MISC_REG_RESET_PL_HV, + {true, true, true}, {0x0, 0x0, 0x0} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */ - { MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040, - {true, true} }, + { MISC_REG_RESET_PL_PDA_VMAIN_1, + {true, true, true}, {0x4404040, 0x4404040, 0x404040} }, /* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */ - { MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007, - {true, true} }, + { MISC_REG_RESET_PL_PDA_VMAIN_2, + {true, true, true}, {0x7, 0x7c00007, 0x5c08007} }, /* DBG_RESET_REG_MISC_PL_PDA_VAUX */ - { MISC_REG_RESET_PL_PDA_VAUX, 0x2, - {true, true} }, + { MISC_REG_RESET_PL_PDA_VAUX, + {true, true, true}, {0x2, 0x2, 0x2} }, }; static struct phy_defs s_phy_defs[] = { {"nw_phy", NWS_REG_NWS_CMU_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, - PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, - {"sgmii_phy", MS_REG_MS_CMU_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, - PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, - {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, - {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, - PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5}, + {"sgmii_phy", MS_REG_MS_CMU_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5}, }; /**************************** Private Functions ******************************/ @@ -1774,7 +1799,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn, /* Initializes the GRC parameters */ qed_dbg_grc_init_params(p_hwfn); - dev_data->initialized = true; + dev_data->use_dmae = true; + dev_data->num_regs_read = 0; + dev_data->initialized = 1; return DBG_STATUS_OK; } @@ -1807,7 +1834,7 @@ static void qed_read_fw_info(struct qed_hwfn *p_hwfn, * The address is located in the last line of the Storm RAM. */ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + - DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE_BB_K2) - sizeof(fw_info_location); dest = (u32 *)&fw_info_location; @@ -2071,8 +2098,7 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, /* Writes the "last" section (including CRC) to the specified buffer at the * given offset. Returns the dumped size in dwords. */ -static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn, - u32 *dump_buf, u32 offset, bool dump) +static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) { u32 start_offset = offset; @@ -2235,7 +2261,8 @@ static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, case MEM_GROUP_CFC_MEM: case MEM_GROUP_CONN_CFC_MEM: case MEM_GROUP_TASK_CFC_MEM: - return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC); + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || + qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX); case MEM_GROUP_IGU_MEM: case MEM_GROUP_IGU_MSIX: return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); @@ -2293,7 +2320,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { struct block_defs *block = s_block_defs[block_id]; - if (block->has_reset_bit && block->unreset) + if (block->exists[dev_data->chip_id] && block->has_reset_bit && + block->unreset) reg_val[block->reset_reg] |= BIT(block->reset_bit_offset); } @@ -2303,7 +2331,8 @@ static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, if (!s_reset_regs_defs[i].exists[dev_data->chip_id]) continue; - reg_val[i] |= s_reset_regs_defs[i].unreset_val; + reg_val[i] |= + s_reset_regs_defs[i].unreset_val[dev_data->chip_id]; if (reg_val[i]) qed_wr(p_hwfn, @@ -2413,6 +2442,18 @@ static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, return offset; } +/* Reads the specified registers into the specified buffer. + * The addr and len arguments are specified in dwords. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i)); +} + /* Dumps the GRC registers in the specified address range. * Returns the dumped size in dwords. * The addr and len arguments are specified in dwords. @@ -2422,15 +2463,39 @@ static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 addr, u32 len, bool wide_bus) { - u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i; + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; if (!dump) return len; - for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++) - *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr); + /* Print log if needed */ + dev_data->num_regs_read += len; + if (dev_data->num_regs_read >= + s_platform_defs[dev_data->platform_id].log_thresh) { + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Dumping %d registers...\n", + dev_data->num_regs_read); + dev_data->num_regs_read = 0; + } - return offset; + /* Try reading using DMAE */ + if (dev_data->use_dmae && + (len >= s_platform_defs[dev_data->platform_id].dmae_thresh || + wide_bus)) { + if (!qed_dmae_grc2host(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr), + (u64)(uintptr_t)(dump_buf), len, 0)) + return len; + dev_data->use_dmae = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Failed reading from chip using DMAE, using GRC instead\n"); + } + + /* Read registers */ + qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); + + return len; } /* Dumps GRC registers sequence header. Returns the dumped size in dwords. @@ -2630,9 +2695,6 @@ static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, chip = &s_chip_defs[dev_data->chip_id]; chip_platform = &chip->per_platform[dev_data->platform_id]; - if (dump) - DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n"); - while (input_offset < s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) { const struct dbg_dump_split_hdr *split_hdr; @@ -2966,22 +3028,12 @@ static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "name", buf); - if (dump) - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "Dumping %d registers from %s...\n", - len, buf); } else { /* Dump address */ u32 addr_in_bytes = DWORDS_TO_BYTES(addr); offset += qed_dump_num_param(dump_buf + offset, dump, "addr", addr_in_bytes); - if (dump && len > 64) - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, - "Dumping %d registers from address 0x%x...\n", - len, addr_in_bytes); } /* Dump len */ @@ -3530,17 +3582,16 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, u8 rss_mem_id; for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { - u32 rss_addr, num_entries, entry_width, total_dwords, i; + u32 rss_addr, num_entries, total_dwords; struct rss_mem_defs *rss_defs; - u32 addr, size; + u32 addr, num_dwords_to_read; bool packed; rss_defs = &s_rss_mem_defs[rss_mem_id]; rss_addr = rss_defs->addr; num_entries = rss_defs->num_entries[dev_data->chip_id]; - entry_width = rss_defs->entry_width[dev_data->chip_id]; - total_dwords = (num_entries * entry_width) / 32; - packed = (entry_width == 16); + total_dwords = (num_entries * rss_defs->entry_width) / 32; + packed = (rss_defs->entry_width == 16); offset += qed_grc_dump_mem_hdr(p_hwfn, dump_buf + offset, @@ -3548,7 +3599,7 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, rss_defs->mem_name, 0, total_dwords, - entry_width, + rss_defs->entry_width, packed, rss_defs->type_name, false, 0); @@ -3559,16 +3610,20 @@ static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, } addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); - size = RSS_REG_RSS_RAM_DATA_SIZE; - for (i = 0; i < total_dwords; i += size, rss_addr++) { + while (total_dwords) { + num_dwords_to_read = min_t(u32, + RSS_REG_RSS_RAM_DATA_SIZE, + total_dwords); qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, dump, addr, - size, + num_dwords_to_read, false); + total_dwords -= num_dwords_to_read; + rss_addr++; } } @@ -3581,14 +3636,18 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u8 big_ram_id) { struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; - u32 total_blocks, ram_size, offset = 0, i; + u32 block_size, ram_size, offset = 0, reg_val, i; char mem_name[12] = "???_BIG_RAM"; char type_name[8] = "???_RAM"; struct big_ram_defs *big_ram; big_ram = &s_big_ram_defs[big_ram_id]; - total_blocks = big_ram->num_of_blocks[dev_data->chip_id]; - ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS; + ram_size = big_ram->ram_size[dev_data->chip_id]; + + reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr); + block_size = reg_val & + BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 + : 128; strncpy(type_name, big_ram->instance_name, strlen(big_ram->instance_name)); @@ -3602,7 +3661,7 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, mem_name, 0, ram_size, - BIG_RAM_BLOCK_SIZE_BYTES * 8, + block_size * 8, false, type_name, false, 0); /* Read and dump Big RAM data */ @@ -3610,12 +3669,13 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, return offset + ram_size; /* Dump Big RAM */ - for (i = 0; i < total_blocks / 2; i++) { + for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); + i++) { u32 addr, len; qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i); addr = BYTES_TO_DWORDS(big_ram->data_reg_addr); - len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS; + len = BRB_REG_BIG_RAM_DATA_SIZE; offset += qed_grc_dump_addr_range(p_hwfn, p_ptt, dump_buf + offset, @@ -3649,7 +3709,7 @@ static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, dump, NULL, BYTES_TO_DWORDS(MCP_REG_SCRATCH), - MCP_REG_SCRATCH_SIZE, + MCP_REG_SCRATCH_SIZE_BB_K2, false, 0, false, "MCP", false, 0); /* Dump MCP cpu_reg_file */ @@ -3710,7 +3770,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, phy_defs->tbus_data_lo_addr; data_hi_addr = phy_defs->base_addr + phy_defs->tbus_data_hi_addr; - bytes_buf = (u8 *)(dump_buf + offset); if (snprintf(mem_name, sizeof(mem_name), "tbus_%s", phy_defs->phy_name) < 0) @@ -3730,6 +3789,7 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, continue; } + bytes_buf = (u8 *)(dump_buf + offset); for (tbus_hi_offset = 0; tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); tbus_hi_offset++) { @@ -3778,19 +3838,17 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; u32 block_id, line_id, offset = 0; - /* Skip static debug if a debug bus recording is in progress */ - if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) + /* Don't dump static debug if a debug bus recording is in progress */ + if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) return 0; if (dump) { - DP_VERBOSE(p_hwfn, - QED_MSG_DEBUG, "Dumping static debug data...\n"); - /* Disable all blocks debug output */ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { struct block_defs *block = s_block_defs[block_id]; - if (block->has_dbg_bus[dev_data->chip_id]) + if (block->dbg_client_id[dev_data->chip_id] != + MAX_DBG_BUS_CLIENTS) qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0); } @@ -3811,12 +3869,12 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, u32 block_dwords, addr, len; u8 dbg_client_id; - if (!block->has_dbg_bus[dev_data->chip_id]) + if (block->dbg_client_id[dev_data->chip_id] == + MAX_DBG_BUS_CLIENTS) continue; - block_desc = - get_dbg_bus_block_desc(p_hwfn, - (enum block_id)block_id); + block_desc = get_dbg_bus_block_desc(p_hwfn, + (enum block_id)block_id); block_dwords = NUM_DBG_LINES(block_desc) * STATIC_DEBUG_LINE_DWORDS; @@ -4044,7 +4102,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, dump_buf + offset, dump); /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); if (dump) { /* Unstall storms */ @@ -4253,30 +4311,33 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (!check_rule && dump) continue; + if (!dump) { + u32 entry_dump_size = + qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + false, + rule->rule_id, + rule, + 0, + NULL); + + offset += num_reg_entries * entry_dump_size; + (*num_failing_rules) += num_reg_entries; + continue; + } + /* Go over all register entries (number of entries is the same * for all condition registers). */ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { u32 next_reg_offset = 0; - if (!dump) { - offset += qed_idle_chk_dump_failure(p_hwfn, - p_ptt, - dump_buf + offset, - false, - rule->rule_id, - rule, - entry_id, - NULL); - (*num_failing_rules)++; - break; - } - /* Read current entry of all condition registers */ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { const struct dbg_idle_chk_cond_reg *reg = - &cond_regs[reg_id]; + &cond_regs[reg_id]; u32 padded_entry_size, addr; bool wide_bus; @@ -4291,9 +4352,9 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, if (reg->num_entries > 1 || reg->start_entry > 0) { padded_entry_size = - reg->entry_size > 1 ? - roundup_pow_of_two(reg->entry_size) - : 1; + reg->entry_size > 1 ? + roundup_pow_of_two(reg->entry_size) : + 1; addr += (reg->start_entry + entry_id) * padded_entry_size; } @@ -4329,7 +4390,6 @@ qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, entry_id, cond_reg_values); (*num_failing_rules)++; - break; } } } @@ -4402,7 +4462,7 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, dump, "num_rules", num_failing_rules); /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); return offset; } @@ -4474,7 +4534,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, (nvram_offset_bytes + read_offset) | (bytes_to_copy << - DRV_MB_PARAM_NVM_LEN_SHIFT), + DRV_MB_PARAM_NVM_LEN_OFFSET), &ret_mcp_resp, &ret_mcp_param, &ret_read_size, (u32 *)((u8 *)ret_buf + read_offset))) @@ -4701,7 +4761,7 @@ static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, offset += trace_meta_size_dwords; /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4717,7 +4777,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 dwords_read, size_param_offset, offset = 0; + u32 dwords_read, size_param_offset, offset = 0, addr, len; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4753,14 +4813,18 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, * buffer size since more entries could be added to the buffer as we are * emptying it. */ + addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO); + len = REG_FIFO_ELEMENT_DWORDS; for (dwords_read = 0; fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; - dwords_read += REG_FIFO_ELEMENT_DWORDS, offset += - REG_FIFO_ELEMENT_DWORDS) { - if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO, - (u64)(uintptr_t)(&dump_buf[offset]), - REG_FIFO_ELEMENT_DWORDS, 0)) - return DBG_STATUS_DMAE_FAILED; + dwords_read += REG_FIFO_ELEMENT_DWORDS) { + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + len, + true); fifo_has_data = qed_rd(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA) > 0; } @@ -4769,7 +4833,7 @@ static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, dwords_read); out: /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4782,7 +4846,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, bool dump, u32 *num_dumped_dwords) { - u32 dwords_read, size_param_offset, offset = 0; + u32 dwords_read, size_param_offset, offset = 0, addr, len; bool fifo_has_data; *num_dumped_dwords = 0; @@ -4818,16 +4882,19 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, * buffer size since more entries could be added to the buffer as we are * emptying it. */ + addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY); + len = IGU_FIFO_ELEMENT_DWORDS; for (dwords_read = 0; fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; - dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset += - IGU_FIFO_ELEMENT_DWORDS) { - if (qed_dmae_grc2host(p_hwfn, p_ptt, - IGU_REG_ERROR_HANDLING_MEMORY, - (u64)(uintptr_t)(&dump_buf[offset]), - IGU_FIFO_ELEMENT_DWORDS, 0)) - return DBG_STATUS_DMAE_FAILED; - fifo_has_data = qed_rd(p_hwfn, p_ptt, + dwords_read += IGU_FIFO_ELEMENT_DWORDS) { + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + len, + true); + fifo_has_data = qed_rd(p_hwfn, p_ptt, IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; } @@ -4835,7 +4902,7 @@ static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, dwords_read); out: /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4849,7 +4916,7 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, bool dump, u32 *num_dumped_dwords) { - u32 size_param_offset, override_window_dwords, offset = 0; + u32 size_param_offset, override_window_dwords, offset = 0, addr; *num_dumped_dwords = 0; @@ -4875,20 +4942,21 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, /* Add override window info to buffer */ override_window_dwords = - qed_rd(p_hwfn, p_ptt, - GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * - PROTECTION_OVERRIDE_ELEMENT_DWORDS; - if (qed_dmae_grc2host(p_hwfn, p_ptt, - GRC_REG_PROTECTION_OVERRIDE_WINDOW, - (u64)(uintptr_t)(dump_buf + offset), - override_window_dwords, 0)) - return DBG_STATUS_DMAE_FAILED; - offset += override_window_dwords; + qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * + PROTECTION_OVERRIDE_ELEMENT_DWORDS; + addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + override_window_dwords, + true); qed_dump_num_param(dump_buf + size_param_offset, dump, "size", override_window_dwords); out: /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); *num_dumped_dwords = offset; @@ -4952,9 +5020,9 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); - last_list_idx = (next_list_idx > 0 - ? next_list_idx - : asserts->list_num_elements) - 1; + last_list_idx = (next_list_idx > 0 ? + next_list_idx : + asserts->list_num_elements) - 1; addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + asserts->list_dword_offset + last_list_idx * asserts->list_element_dword_size; @@ -4967,7 +5035,7 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, } /* Dump last section */ - offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump); + offset += qed_dump_last_section(dump_buf, offset, dump); return offset; } @@ -5596,10 +5664,6 @@ struct igu_fifo_addr_data { #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 -/********************************* Macros ************************************/ - -#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) - /***************************** Constant Arrays *******************************/ struct user_dbg_array { @@ -5698,6 +5762,7 @@ static struct block_info s_block_info_arr[] = { {"phy_pcie", BLOCK_PHY_PCIE}, {"led", BLOCK_LED}, {"avs_wrap", BLOCK_AVS_WRAP}, + {"pxpreqbus", BLOCK_PXPREQBUS}, {"misc_aeu", BLOCK_MISC_AEU}, {"bar0_map", BLOCK_BAR0_MAP} }; @@ -5830,8 +5895,8 @@ static const char * const s_status_str[] = { /* DBG_STATUS_MCP_COULD_NOT_RESUME */ "Failed to resume MCP after halt", - /* DBG_STATUS_DMAE_FAILED */ - "DMAE transaction failed", + /* DBG_STATUS_RESERVED2 */ + "Reserved debug status - shouldn't be returned", /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ "Failed to empty SEMI sync FIFO", @@ -6109,6 +6174,7 @@ static u32 qed_read_param(u32 *dump_buf, if (*(char_buf + offset++)) { /* String param */ *param_str_val = char_buf + offset; + *param_num_val = 0; offset += strlen(*param_str_val) + 1; if (offset & 0x3) offset += (4 - (offset & 0x3)); @@ -6177,8 +6243,7 @@ static u32 qed_print_section_params(u32 *dump_buf, /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ -static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, - u32 *dump_buf, +static u32 qed_parse_idle_chk_dump_rules(u32 *dump_buf, u32 *dump_buf_end, u32 num_rules, bool print_fw_idle_chk, @@ -6322,8 +6387,7 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, +static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, u32 *parsed_results_bytes, @@ -6375,13 +6439,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "FW_IDLE_CHECK:\n"); rules_print_size = - qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, - dump_buf_end, num_rules, + qed_parse_idle_chk_dump_rules(dump_buf, + dump_buf_end, + num_rules, true, results_buf ? results_buf + - results_offset : NULL, - num_errors, num_warnings); + results_offset : + NULL, + num_errors, + num_warnings); results_offset += rules_print_size; if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; @@ -6392,13 +6459,16 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, results_offset), "\nLSI_IDLE_CHECK:\n"); rules_print_size = - qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf, - dump_buf_end, num_rules, + qed_parse_idle_chk_dump_rules(dump_buf, + dump_buf_end, + num_rules, false, results_buf ? results_buf + - results_offset : NULL, - num_errors, num_warnings); + results_offset : + NULL, + num_errors, + num_warnings); results_offset += rules_print_size; if (!rules_print_size) return DBG_STATUS_IDLE_CHK_PARSE_FAILED; @@ -6537,7 +6607,6 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, - u32 num_dumped_dwords, char *results_buf, u32 *parsed_results_bytes) { @@ -6725,9 +6794,7 @@ free_mem: * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -6834,8 +6901,7 @@ static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn, static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element *element, char *results_buf, - u32 *results_offset, - u32 *parsed_results_bytes) + u32 *results_offset) { const struct igu_fifo_addr_data *found_addr = NULL; u8 source, err_type, i, is_cleanup; @@ -6933,9 +6999,9 @@ static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ", prod_cons, update_flag ? "update" : "nop", - en_dis_int_for_sb - ? (en_dis_int_for_sb == 1 ? "disable" : "nop") - : "enable", + en_dis_int_for_sb ? + (en_dis_int_for_sb == 1 ? "disable" : "nop") : + "enable", segment ? "attn" : "regular", timer_mask); } @@ -6969,9 +7035,7 @@ out: * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -7011,8 +7075,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, for (i = 0; i < num_elements; i++) { status = qed_parse_igu_fifo_element(&elements[i], results_buf, - &results_offset, - parsed_results_bytes); + &results_offset); if (status != DBG_STATUS_OK) return status; } @@ -7028,9 +7091,7 @@ static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn, } static enum dbg_status -qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +qed_parse_protection_override_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -7105,9 +7166,7 @@ qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn, * parsed_results_bytes. * The parsing status is returned. */ -static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn, - u32 *dump_buf, - u32 num_dumped_dwords, +static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf, char *results_buf, u32 *parsed_results_bytes) { @@ -7209,8 +7268,7 @@ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, { u32 num_errors, num_warnings; - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, + return qed_parse_idle_chk_dump(dump_buf, num_dumped_dwords, NULL, results_buf_size, @@ -7221,12 +7279,12 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, char *results_buf, - u32 *num_errors, u32 *num_warnings) + u32 *num_errors, + u32 *num_warnings) { u32 parsed_buf_size; - return qed_parse_idle_chk_dump(p_hwfn, - dump_buf, + return qed_parse_idle_chk_dump(dump_buf, num_dumped_dwords, results_buf, &parsed_buf_size, @@ -7245,9 +7303,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size) { return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, @@ -7259,7 +7315,6 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, - num_dumped_dwords, results_buf, &parsed_buf_size); } @@ -7268,10 +7323,7 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, @@ -7281,10 +7333,7 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_reg_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, @@ -7292,10 +7341,7 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, @@ -7305,10 +7351,7 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_igu_fifo_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - results_buf, &parsed_buf_size); + return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size); } enum dbg_status @@ -7317,9 +7360,7 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, + return qed_parse_protection_override_dump(dump_buf, NULL, results_buf_size); } @@ -7330,9 +7371,7 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_protection_override_dump(p_hwfn, - dump_buf, - num_dumped_dwords, + return qed_parse_protection_override_dump(dump_buf, results_buf, &parsed_buf_size); } @@ -7342,10 +7381,7 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 num_dumped_dwords, u32 *results_buf_size) { - return qed_parse_fw_asserts_dump(p_hwfn, - dump_buf, - num_dumped_dwords, - NULL, results_buf_size); + return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size); } enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, @@ -7355,9 +7391,7 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, { u32 parsed_buf_size; - return qed_parse_fw_asserts_dump(p_hwfn, - dump_buf, - num_dumped_dwords, + return qed_parse_fw_asserts_dump(dump_buf, results_buf, &parsed_buf_size); } @@ -7386,30 +7420,30 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, /* Go over registers with a non-zero attention status */ for (i = 0; i < num_regs; i++) { + struct dbg_attn_bit_mapping *bit_mapping; struct dbg_attn_reg_result *reg_result; - struct dbg_attn_bit_mapping *mapping; u8 num_reg_attn, bit_idx = 0; reg_result = &results->reg_results[i]; num_reg_attn = GET_FIELD(reg_result->data, DBG_ATTN_REG_RESULT_NUM_REG_ATTN); block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES]; - mapping = &((struct dbg_attn_bit_mapping *) - block_attn->ptr)[reg_result->block_attn_offset]; + bit_mapping = &((struct dbg_attn_bit_mapping *) + block_attn->ptr)[reg_result->block_attn_offset]; pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS]; /* Go over attention status bits */ for (j = 0; j < num_reg_attn; j++) { - u16 attn_idx_val = GET_FIELD(mapping[j].data, + u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_VAL); const char *attn_name, *attn_type_str, *masked_str; - u32 name_offset, sts_addr; + u32 attn_name_offset, sts_addr; /* Check if bit mask should be advanced (due to unused * bits). */ - if (GET_FIELD(mapping[j].data, + if (GET_FIELD(bit_mapping[j].data, DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) { bit_idx += (u8)attn_idx_val; continue; @@ -7422,9 +7456,10 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, } /* Find attention name */ - name_offset = block_attn_name_offsets[attn_idx_val]; + attn_name_offset = + block_attn_name_offsets[attn_idx_val]; attn_name = &((const char *) - pstrings->ptr)[name_offset]; + pstrings->ptr)[attn_name_offset]; attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ? "Interrupt" : "Parity"; masked_str = reg_result->mask_val & BIT(bit_idx) ? diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 58a689fb04db..553a6d17260e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn) /* This function reconfigures the QM pf on the fly. * For this purpose we: * 1. reconfigure the QM database - * 2. set new values to runtime arrat + * 2. set new values to runtime array * 3. send an sdm_qm_cmd through the rbc interface to stop the QM * 4. activate init tool in QM_PF stage * 5. send an sdm_qm_cmd through rbc interface to release the QM @@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_init_clear_rt_data(p_hwfn); /* prepare QM portion of runtime array */ - qed_qm_init_pf(p_hwfn, p_ptt); + qed_qm_init_pf(p_hwfn, p_ptt, false); /* activate init tool on runtime array */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, @@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); } - /* Protocl Configuration */ + /* Protocol Configuration */ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, @@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, if (rc) return rc; + /* Sanity check before the PF init sequence that uses DMAE */ + rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + /* PF Init sequence */ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); if (rc) @@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* No need for a case for QED_CMDQS_CQS since * CNQ/CMDQS are the same resource. */ - resc_max_val = NUM_OF_CMDQS_CQS; + resc_max_val = NUM_OF_GLOBAL_QUEUES; break; case QED_RDMA_STATS_QUEUE: resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 @@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ - *p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; + *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; break; case QED_RDMA_STATS_QUEUE: *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index df195c02b711..2dc9b312a795 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_func_ramrod_data *p_data; - struct fcoe_conn_context *p_cxt = NULL; + struct e4_fcoe_conn_context *p_cxt = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_cxt_info cxt_info; @@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, } p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->tstorm_ag_context.flags3, - TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); fcoe_pf_params->dummy_icid = (u16)dummy_cid; @@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { - struct fcoe_task_context *p_task_ctx = NULL; + struct e4_fcoe_task_context *p_task_ctx = NULL; int rc; u32 i; @@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) if (rc) continue; - memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); SET_FIELD(p_task_ctx->timer_context.logical_client_0, TIMERS_CONTEXT_VALIDLC0, 1); SET_FIELD(p_task_ctx->timer_context.logical_client_1, TIMERS_CONTEXT_VALIDLC1, 1); SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, - TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 3427fe7049b5..de873d770575 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -54,7 +54,7 @@ struct qed_hwfn; struct qed_ptt; -/* opcodes for the event ring */ +/* Opcodes for the event ring */ enum common_event_opcode { COMMON_EVENT_PF_START, COMMON_EVENT_PF_STOP, @@ -82,487 +82,7 @@ enum common_ramrod_cmd_id { MAX_COMMON_RAMROD_CMD_ID }; -/* The core storm context for the Ystorm */ -struct ystorm_core_conn_st_ctx { - __le32 reserved[4]; -}; - -/* The core storm context for the Pstorm */ -struct pstorm_core_conn_st_ctx { - __le32 reserved[4]; -}; - -/* Core Slowpath Connection storm context of Xstorm */ -struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo; - __le32 spq_base_hi; - struct regpair consolid_base_addr; - __le16 spq_cons; - __le16 consolid_cons; - __le32 reserved0[55]; -}; - -struct xstorm_core_conn_ag_ctx { - u8 reserved0; - u8 core_state; - u8 flags0; -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 - u8 flags2; -#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 - u8 flags3; -#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 - u8 flags5; -#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 - u8 flags6; -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 - u8 flags7; -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 - u8 flags8; -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 - u8 flags9; -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 - u8 flags10; -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 - u8 flags11; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 - u8 flags12; -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 - u8 flags13; -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 - u8 byte2; - __le16 physical_q0; - __le16 consolid_prod; - __le16 reserved16; - __le16 tx_bd_cons; - __le16 tx_bd_or_spq_prod; - __le16 word5; - __le16 conn_dpi; - u8 byte3; - u8 byte4; - u8 byte5; - u8 byte6; - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le32 reg4; - __le32 reg5; - __le32 reg6; - __le16 word7; - __le16 word8; - __le16 word9; - __le16 word10; - __le32 reg7; - __le32 reg8; - __le32 reg9; - u8 byte7; - u8 byte8; - u8 byte9; - u8 byte10; - u8 byte11; - u8 byte12; - u8 byte13; - u8 byte14; - u8 byte15; - u8 e5_reserved; - __le16 word11; - __le32 reg10; - __le32 reg11; - __le32 reg12; - __le32 reg13; - __le32 reg14; - __le32 reg15; - __le32 reg16; - __le32 reg17; - __le32 reg18; - __le32 reg19; - __le16 word12; - __le16 word13; - __le16 word14; - __le16 word15; -}; - -struct tstorm_core_conn_ag_ctx { - u8 byte0; - u8 byte1; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le32 reg4; - __le32 reg5; - __le32 reg6; - __le32 reg7; - __le32 reg8; - u8 byte2; - u8 byte3; - __le16 word0; - u8 byte4; - u8 byte5; - __le16 word1; - __le16 word2; - __le16 word3; - __le32 reg9; - __le32 reg10; -}; - -struct ustorm_core_conn_ag_ctx { - u8 reserved; - u8 byte1; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 - u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2; - u8 byte3; - __le16 word0; - __le16 word1; - __le32 rx_producers; - __le32 reg1; - __le32 reg2; - __le32 reg3; - __le16 word2; - __le16 word3; -}; - -/* The core storm context for the Mstorm */ -struct mstorm_core_conn_st_ctx { - __le32 reserved[24]; -}; - -/* The core storm context for the Ustorm */ -struct ustorm_core_conn_st_ctx { - __le32 reserved[4]; -}; - -/* core connection context */ -struct core_conn_context { - struct ystorm_core_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2]; - struct pstorm_core_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2]; - struct xstorm_core_conn_st_ctx xstorm_st_context; - struct xstorm_core_conn_ag_ctx xstorm_ag_context; - struct tstorm_core_conn_ag_ctx tstorm_ag_context; - struct ustorm_core_conn_ag_ctx ustorm_ag_context; - struct mstorm_core_conn_st_ctx mstorm_st_context; - struct ustorm_core_conn_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; -}; - +/* How ll2 should deal with packet upon errors */ enum core_error_handle { LL2_DROP_PACKET, LL2_DO_NOTHING, @@ -570,21 +90,25 @@ enum core_error_handle { MAX_CORE_ERROR_HANDLE }; +/* Opcodes for the event ring */ enum core_event_opcode { CORE_EVENT_TX_QUEUE_START, CORE_EVENT_TX_QUEUE_STOP, CORE_EVENT_RX_QUEUE_START, CORE_EVENT_RX_QUEUE_STOP, CORE_EVENT_RX_QUEUE_FLUSH, + CORE_EVENT_TX_QUEUE_UPDATE, MAX_CORE_EVENT_OPCODE }; +/* The L4 pseudo checksum mode for Core */ enum core_l4_pseudo_checksum_mode { CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, MAX_CORE_L4_PSEUDO_CHECKSUM_MODE }; +/* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_port_stats { struct regpair gsi_invalid_hdr; struct regpair gsi_invalid_pkt_length; @@ -592,6 +116,7 @@ struct core_ll2_port_stats { struct regpair gsi_crcchksm_error; }; +/* Ethernet TX Per Queue Stats */ struct core_ll2_pstorm_per_queue_stat { struct regpair sent_ucast_bytes; struct regpair sent_mcast_bytes; @@ -601,6 +126,7 @@ struct core_ll2_pstorm_per_queue_stat { struct regpair sent_bcast_pkts; }; +/* Light-L2 RX Producers in Tstorm RAM */ struct core_ll2_rx_prod { __le16 bd_prod; __le16 cqe_prod; @@ -621,6 +147,7 @@ struct core_ll2_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +/* Core Ramrod Command IDs (light L2) */ enum core_ramrod_cmd_id { CORE_RAMROD_UNUSED, CORE_RAMROD_RX_QUEUE_START, @@ -628,53 +155,64 @@ enum core_ramrod_cmd_id { CORE_RAMROD_RX_QUEUE_STOP, CORE_RAMROD_TX_QUEUE_STOP, CORE_RAMROD_RX_QUEUE_FLUSH, + CORE_RAMROD_TX_QUEUE_UPDATE, MAX_CORE_RAMROD_CMD_ID }; +/* Core RX CQE Type for Light L2 */ enum core_roce_flavor_type { CORE_ROCE, CORE_RROCE, MAX_CORE_ROCE_FLAVOR_TYPE }; +/* Specifies how ll2 should deal with packets errors: packet_too_big and + * no_buff. + */ struct core_rx_action_on_error { u8 error_type; #define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 -#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 -#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 -#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 -#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF -#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 }; +/* Core RX BD for Light L2 */ struct core_rx_bd { struct regpair addr; __le16 reserved[4]; }; +/* Core RX CM offload BD for Light L2 */ struct core_rx_bd_with_buff_len { struct regpair addr; __le16 buff_length; __le16 reserved[3]; }; +/* Core RX CM offload BD for Light L2 */ union core_rx_bd_union { struct core_rx_bd rx_bd; struct core_rx_bd_with_buff_len rx_bd_with_len; }; +/* Opaque Data for Light L2 RX CQE */ struct core_rx_cqe_opaque_data { __le32 data[2]; }; +/* Core RX CQE Type for Light L2 */ enum core_rx_cqe_type { - CORE_RX_CQE_ILLIGAL_TYPE, + CORE_RX_CQE_ILLEGAL_TYPE, CORE_RX_CQE_TYPE_REGULAR, CORE_RX_CQE_TYPE_GSI_OFFLOAD, CORE_RX_CQE_TYPE_SLOW_PATH, MAX_CORE_RX_CQE_TYPE }; +/* Core RX CQE for Light L2 */ struct core_rx_fast_path_cqe { u8 type; u8 placement_offset; @@ -687,6 +225,7 @@ struct core_rx_fast_path_cqe { __le32 reserved1[3]; }; +/* Core Rx CM offload CQE */ struct core_rx_gsi_offload_cqe { u8 type; u8 data_length_error; @@ -696,9 +235,11 @@ struct core_rx_gsi_offload_cqe { __le32 src_mac_addrhi; __le16 src_mac_addrlo; __le16 qp_id; - __le32 gid_dst[4]; + __le32 src_qp; + __le32 reserved[3]; }; +/* Core RX CQE for Light L2 */ struct core_rx_slow_path_cqe { u8 type; u8 ramrod_cmd_id; @@ -707,12 +248,14 @@ struct core_rx_slow_path_cqe { __le32 reserved1[5]; }; +/* Core RX CM offload BD for Light L2 */ union core_rx_cqe_union { struct core_rx_fast_path_cqe rx_cqe_fp; struct core_rx_gsi_offload_cqe rx_cqe_gsi; struct core_rx_slow_path_cqe rx_cqe_sp; }; +/* Ramrod data for rx queue start ramrod */ struct core_rx_start_ramrod_data { struct regpair bd_base; struct regpair cqe_pbl_addr; @@ -723,16 +266,18 @@ struct core_rx_start_ramrod_data { u8 complete_event_flg; u8 drop_ttl0_flg; __le16 num_of_pbl_pages; - u8 inner_vlan_removal_en; + u8 inner_vlan_stripping_en; + u8 report_outer_vlan; u8 queue_id; u8 main_func_queue; u8 mf_si_bcast_accept_all; u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[7]; + u8 reserved[6]; }; +/* Ramrod data for rx queue stop ramrod */ struct core_rx_stop_ramrod_data { u8 complete_cqe_flg; u8 complete_event_flg; @@ -741,46 +286,51 @@ struct core_rx_stop_ramrod_data { __le16 reserved2[2]; }; +/* Flags for Core TX BD */ struct core_tx_bd_data { __le16 as_bitfield; -#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 -#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 -#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 -#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 -#define CORE_TX_BD_DATA_START_BD_MASK 0x1 -#define CORE_TX_BD_DATA_START_BD_SHIFT 2 -#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 -#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 -#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 -#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 -#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 -#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 -#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 -#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_DATA_START_BD_MASK 0x1 +#define CORE_TX_BD_DATA_START_BD_SHIFT 2 +#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 #define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 -#define CORE_TX_BD_DATA_NBDS_MASK 0xF -#define CORE_TX_BD_DATA_NBDS_SHIFT 8 -#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 -#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 -#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 -#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 -#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3 -#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14 -}; - +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 +#define CORE_TX_BD_DATA_NBDS_MASK 0xF +#define CORE_TX_BD_DATA_NBDS_SHIFT 8 +#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 +#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 +#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14 +#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1 +#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15 +}; + +/* Core TX BD for Light L2 */ struct core_tx_bd { struct regpair addr; __le16 nbytes; __le16 nw_vlan_or_lb_echo; struct core_tx_bd_data bd_data; __le16 bitfield1; -#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF -#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 -#define CORE_TX_BD_TX_DST_MASK 0x3 -#define CORE_TX_BD_TX_DST_SHIFT 14 +#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF +#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 +#define CORE_TX_BD_TX_DST_MASK 0x3 +#define CORE_TX_BD_TX_DST_SHIFT 14 }; +/* Light L2 TX Destination */ enum core_tx_dest { CORE_TX_DEST_NW, CORE_TX_DEST_LB, @@ -789,6 +339,7 @@ enum core_tx_dest { MAX_CORE_TX_DEST }; +/* Ramrod data for tx queue start ramrod */ struct core_tx_start_ramrod_data { struct regpair pbl_base_addr; __le16 mtu; @@ -803,10 +354,20 @@ struct core_tx_start_ramrod_data { u8 resrved[3]; }; +/* Ramrod data for tx queue stop ramrod */ struct core_tx_stop_ramrod_data { __le32 reserved0[2]; }; +/* Ramrod data for tx queue update ramrod */ +struct core_tx_update_ramrod_data { + u8 update_qm_pq_id_flg; + u8 reserved0; + __le16 qm_pq_id; + __le32 reserved1[1]; +}; + +/* Enum flag for what type of dcb data to update */ enum dcb_dscp_update_mode { DONT_UPDATE_DCB_DSCP, UPDATE_DCB, @@ -815,6 +376,487 @@ enum dcb_dscp_update_mode { MAX_DCB_DSCP_UPDATE_MODE }; +/* The core storm context for the Ystorm */ +struct ystorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The core storm context for the Pstorm */ +struct pstorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* Core Slowpath Connection storm context of Xstorm */ +struct xstorm_core_conn_st_ctx { + __le32 spq_base_lo; + __le32 spq_base_hi; + struct regpair consolid_base_addr; + __le16 spq_cons; + __le16 consolid_cons; + __le32 reserved0[55]; +}; + +struct e4_xstorm_core_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 consolid_prod; + __le16 reserved16; + __le16 tx_bd_cons; + __le16 tx_bd_or_spq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; +}; + +struct e4_tstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct e4_ustorm_core_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 rx_producers; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The core storm context for the Mstorm */ +struct mstorm_core_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The core storm context for the Ustorm */ +struct ustorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* core connection context */ +struct e4_core_conn_context { + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_core_conn_st_ctx xstorm_st_context; + struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context; + struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context; + struct mstorm_core_conn_st_ctx mstorm_st_context; + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + struct eth_mstorm_per_pf_stat { struct regpair gre_discard_pkts; struct regpair vxlan_discard_pkts; @@ -896,6 +938,50 @@ struct eth_ustorm_per_queue_stat { struct regpair rcv_bcast_pkts; }; +/* Event Ring VF-PF Channel data */ +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; +}; + +/* Event Ring malicious VF data */ +struct malicious_vf_eqe_data { + u8 vf_id; + u8 err_id; + __le16 reserved[3]; +}; + +/* Event Ring initial cleanup data */ +struct initial_cleanup_eqe_data { + u8 vf_id; + u8 reserved[7]; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; + struct iscsi_eqe_data iscsi_info; + struct iscsi_connect_done_results iscsi_conn_done_info; + union rdma_eqe_data rdma_data; + struct malicious_vf_eqe_data malicious_vf; + struct initial_cleanup_eqe_data vf_init_cleanup; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + /* Event Ring Next Page Address */ struct event_ring_next_addr { struct regpair addr; @@ -908,12 +994,21 @@ union event_ring_element { struct event_ring_next_addr next_addr; }; +/* Ports mode */ enum fw_flow_ctrl_mode { flow_ctrl_pause, flow_ctrl_pfc, MAX_FW_FLOW_CTRL_MODE }; +/* GFT profile type */ +enum gft_profile_type { + GFT_PROFILE_TYPE_4_TUPLE, + GFT_PROFILE_TYPE_L4_DST_PORT, + GFT_PROFILE_TYPE_IP_DST_PORT, + MAX_GFT_PROFILE_TYPE +}; + /* Major and Minor hsi Versions */ struct hsi_fp_ver_struct { u8 minor_ver_arr[2]; @@ -921,14 +1016,14 @@ struct hsi_fp_ver_struct { }; enum iwarp_ll2_tx_queues { - IWARP_LL2_IN_ORDER_TX_QUEUE = 1, + IWARP_LL2_IN_ORDER_TX_QUEUE = 1, IWARP_LL2_ALIGNED_TX_QUEUE, IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE, IWARP_LL2_ERROR, MAX_IWARP_LL2_TX_QUEUES }; -/* Mstorm non-triggering VF zone */ +/* Malicious VF error ID */ enum malicious_vf_error_id { MALICIOUS_VF_NO_ERROR, VF_PF_CHANNEL_NOT_READY, @@ -951,9 +1046,11 @@ enum malicious_vf_error_id { ETH_TUNN_IPV6_EXT_NBD_ERR, ETH_CONTROL_PACKET_VIOLATION, ETH_ANTI_SPOOFING_ERR, + ETH_PACKET_SIZE_TOO_LARGE, MAX_MALICIOUS_VF_ERROR_ID }; +/* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD]; @@ -962,7 +1059,21 @@ struct mstorm_non_trigger_vf_zone { /* Mstorm VF zone */ struct mstorm_vf_zone { struct mstorm_non_trigger_vf_zone non_trigger; +}; + +/* vlan header including TPID and TCI fields */ +struct vlan_header { + __le16 tpid; + __le16 tci; +}; +/* outer tag configurations */ +struct outer_tag_config_struct { + u8 enable_stag_pri_change; + u8 pri_map_valid; + u8 reserved[2]; + struct vlan_header outer_tag; + u8 inner_to_outer_pri_map[8]; }; /* personality per PF */ @@ -974,7 +1085,7 @@ enum personality_type { PERSONALITY_RDMA, PERSONALITY_CORE, PERSONALITY_ETH, - PERSONALITY_RESERVED4, + PERSONALITY_RESERVED, MAX_PERSONALITY_TYPE }; @@ -997,7 +1108,6 @@ struct pf_start_ramrod_data { struct regpair event_ring_pbl_addr; struct regpair consolid_q_pbl_addr; struct pf_start_tunnel_config tunnel_config; - __le32 reserved; __le16 event_ring_sb_id; u8 base_vf_id; u8 num_vfs; @@ -1011,21 +1121,22 @@ struct pf_start_ramrod_data { u8 mf_mode; u8 integ_phase; u8 allow_npar_tx_switching; - u8 inner_to_outer_pri_map[8]; - u8 pri_map_valid; - __le32 outer_tag; + u8 reserved0; struct hsi_fp_ver_struct hsi_fp_ver; + struct outer_tag_config_struct outer_tag_config; }; +/* Data for port update ramrod */ struct protocol_dcb_data { u8 dcb_enable_flag; - u8 reserved_a; + u8 dscp_enable_flag; u8 dcb_priority; u8 dcb_tc; - u8 reserved_b; + u8 dscp_val; u8 reserved0; }; +/* Update tunnel configuration */ struct pf_update_tunnel_config { u8 update_rx_pf_clss; u8 update_rx_def_ucast_clss; @@ -1042,8 +1153,8 @@ struct pf_update_tunnel_config { __le16 reserved; }; +/* Data for port update ramrod */ struct pf_update_ramrod_data { - u8 pf_id; u8 update_eth_dcb_data_mode; u8 update_fcoe_dcb_data_mode; u8 update_iscsi_dcb_data_mode; @@ -1051,6 +1162,7 @@ struct pf_update_ramrod_data { u8 update_rroce_dcb_data_mode; u8 update_iwarp_dcb_data_mode; u8 update_mf_vlan_flag; + u8 update_enable_stag_pri_change; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; struct protocol_dcb_data iscsi_dcb_data; @@ -1058,7 +1170,8 @@ struct pf_update_ramrod_data { struct protocol_dcb_data rroce_dcb_data; struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan; - __le16 reserved; + u8 enable_stag_pri_change; + u8 reserved; struct pf_update_tunnel_config tunnel_config; }; @@ -1079,11 +1192,13 @@ enum protocol_version_array_key { MAX_PROTOCOL_VERSION_ARRAY_KEY }; +/* RDMA TX Stats */ struct rdma_sent_stats { struct regpair sent_bytes; struct regpair sent_pkts; }; +/* Pstorm non-triggering VF zone */ struct pstorm_non_trigger_vf_zone { struct eth_pstorm_per_queue_stat eth_queue_stat; struct rdma_sent_stats rdma_stats; @@ -1103,11 +1218,34 @@ struct ramrod_header { __le16 echo; }; +/* RDMA RX Stats */ struct rdma_rcv_stats { struct regpair rcv_bytes; struct regpair rcv_pkts; }; +/* Data for update QCN/DCQCN RL ramrod */ +struct rl_update_ramrod_data { + u8 qcn_update_param_flg; + u8 dcqcn_update_param_flg; + u8 rl_init_flg; + u8 rl_start_flg; + u8 rl_stop_flg; + u8 rl_id_first; + u8 rl_id_last; + u8 rl_dc_qcn_flg; + __le32 rl_bc_rate; + __le16 rl_max_rate; + __le16 rl_r_ai; + __le16 rl_r_hai; + __le16 dcqcn_g; + __le32 dcqcn_k_us; + __le32 dcqcn_timeuot_us; + __le32 qcn_timeuot_us; + __le32 reserved[2]; +}; + +/* Slowpath Element (SPQE) */ struct slow_path_element { struct ramrod_header hdr; struct regpair data_ptr; @@ -1130,11 +1268,12 @@ struct tstorm_per_port_stat { struct regpair roce_irregular_pkt; struct regpair iwarp_irregular_pkt; struct regpair eth_irregular_pkt; - struct regpair reserved1; + struct regpair toe_irregular_pkt; struct regpair preroce_irregular_pkt; struct regpair eth_gre_tunn_filter_discard; struct regpair eth_vxlan_tunn_filter_discard; struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt; }; /* Tstorm VF zone */ @@ -1197,6 +1336,7 @@ struct vf_stop_ramrod_data { __le32 reserved2; }; +/* VF zone size mode */ enum vf_zone_size_mode { VF_ZONE_SIZE_MODE_DEFAULT, VF_ZONE_SIZE_MODE_DOUBLE, @@ -1204,6 +1344,7 @@ enum vf_zone_size_mode { MAX_VF_ZONE_SIZE_MODE }; +/* Attentions status block */ struct atten_status_block { __le32 atten_bits; __le32 atten_ack; @@ -1212,12 +1353,6 @@ struct atten_status_block { __le32 reserved1; }; -enum command_type_bit { - IGU_COMMAND_TYPE_NOP = 0, - IGU_COMMAND_TYPE_SET = 1, - MAX_COMMAND_TYPE_BIT -}; - /* DMAE command */ struct dmae_cmd { __le32 opcode; @@ -1327,74 +1462,74 @@ enum dmae_cmd_src_enum { MAX_DMAE_CMD_SRC_ENUM }; -struct mstorm_core_conn_ag_ctx { +struct e4_mstorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct ystorm_core_conn_ag_ctx { +struct e4_ystorm_core_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -1545,22 +1680,22 @@ struct qm_rf_opportunistic_mask { }; /* QM hardware structure of QM map memory */ -struct qm_rf_pq_map { +struct qm_rf_pq_map_e4 { __le32 reg; -#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF -#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_VOQ_MASK 0x1F -#define QM_RF_PQ_MAP_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 -#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26 }; /* Completion params for aggregated interrupt completion */ @@ -1643,8 +1778,8 @@ enum block_addr { GRCBASE_MULD = 0x4e0000, GRCBASE_YULD = 0x4c8000, GRCBASE_XYLD = 0x4c0000, - GRCBASE_PTLD = 0x590000, - GRCBASE_YPLD = 0x5b0000, + GRCBASE_PTLD = 0x5a0000, + GRCBASE_YPLD = 0x5c0000, GRCBASE_PRM = 0x230000, GRCBASE_PBF_PB1 = 0xda0000, GRCBASE_PBF_PB2 = 0xda4000, @@ -1675,6 +1810,7 @@ enum block_addr { GRCBASE_PHY_PCIE = 0x620000, GRCBASE_LED = 0x6b8000, GRCBASE_AVS_WRAP = 0x6b0000, + GRCBASE_PXPREQBUS = 0x56000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -1766,6 +1902,7 @@ enum block_id { BLOCK_PHY_PCIE, BLOCK_LED, BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID @@ -1841,7 +1978,7 @@ struct dbg_attn_block_result { struct dbg_attn_reg_result reg_results[15]; }; -/* mode header */ +/* Mode header */ struct dbg_mode_hdr { __le16 data; #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 @@ -1863,80 +2000,83 @@ struct dbg_attn_reg { __le32 mask_address; }; -/* attention types */ +/* Attention types */ enum dbg_attn_type { ATTN_TYPE_INTERRUPT, ATTN_TYPE_PARITY, MAX_DBG_ATTN_TYPE }; +/* Debug Bus block data */ struct dbg_bus_block { u8 num_of_lines; u8 has_latency_events; __le16 lines_offset; }; +/* Debug Bus block user data */ struct dbg_bus_block_user_data { u8 num_of_lines; u8 has_latency_events; __le16 names_offset; }; +/* Block Debug line data */ struct dbg_bus_line { u8 data; -#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF -#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 -#define DBG_BUS_LINE_IS_256B_MASK 0x1 -#define DBG_BUS_LINE_IS_256B_SHIFT 4 -#define DBG_BUS_LINE_RESERVED_MASK 0x7 -#define DBG_BUS_LINE_RESERVED_SHIFT 5 +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 u8 group_sizes; }; -/* condition header for registers dump */ +/* Condition header for registers dump */ struct dbg_dump_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ u8 block_id; /* block ID */ u8 data_size; /* size in dwords of the data following this header */ }; -/* memory data for registers dump */ +/* Memory data for registers dump */ struct dbg_dump_mem { __le32 dword0; -#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF -#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 -#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF -#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 __le32 dword1; -#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF -#define DBG_DUMP_MEM_LENGTH_SHIFT 0 -#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 -#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 -#define DBG_DUMP_MEM_RESERVED_MASK 0x7F -#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 }; -/* register data for registers dump */ +/* Register data for registers dump */ struct dbg_dump_reg { __le32 data; -#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ -#define DBG_DUMP_REG_ADDRESS_SHIFT 0 -#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 /* indicates register is wide-bus */ -#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 -#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */ -#define DBG_DUMP_REG_LENGTH_SHIFT 24 +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF +#define DBG_DUMP_REG_LENGTH_SHIFT 24 }; -/* split header for registers dump */ +/* Split header for registers dump */ struct dbg_dump_split_hdr { __le32 hdr; -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF -#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF -#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 }; -/* condition header for idle check */ +/* Condition header for idle check */ struct dbg_idle_chk_cond_hdr { struct dbg_mode_hdr mode; /* Mode header */ __le16 data_size; /* size in dwords of the data following this header */ @@ -1945,12 +2085,12 @@ struct dbg_idle_chk_cond_hdr { /* Idle Check condition register */ struct dbg_idle_chk_cond_reg { __le32 data; -#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 __le16 num_entries; u8 entry_size; u8 start_entry; @@ -1959,12 +2099,12 @@ struct dbg_idle_chk_cond_reg { /* Idle Check info register */ struct dbg_idle_chk_info_reg { __le32 data; -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF -#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 -#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF -#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 __le16 size; /* register size in dwords */ struct dbg_mode_hdr mode; /* Mode header */ }; @@ -2016,13 +2156,13 @@ struct dbg_idle_chk_rule { /* Idle Check rule parsing data */ struct dbg_idle_chk_rule_parsing_data { __le32 data; -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF -#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 }; -/* idle check severity types */ +/* Idle check severity types */ enum dbg_idle_chk_severity_types { /* idle check failure should cause an error */ IDLE_CHK_SEVERITY_ERROR, @@ -2036,14 +2176,14 @@ enum dbg_idle_chk_severity_types { /* Debug Bus block data */ struct dbg_bus_block_data { __le16 data; -#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 -#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF -#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 -#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 -#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF -#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF +#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4 +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8 +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF +#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12 u8 line_num; u8 hw_id; }; @@ -2072,6 +2212,7 @@ enum dbg_bus_clients { MAX_DBG_BUS_CLIENTS }; +/* Debug Bus constraint operation types */ enum dbg_bus_constraint_ops { DBG_BUS_CONSTRAINT_OP_EQ, DBG_BUS_CONSTRAINT_OP_NE, @@ -2086,12 +2227,13 @@ enum dbg_bus_constraint_ops { MAX_DBG_BUS_CONSTRAINT_OPS }; +/* Debug Bus trigger state data */ struct dbg_bus_trigger_state_data { u8 data; -#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF -#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 -#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF -#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0 +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF +#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4 }; /* Debug Bus memory address */ @@ -2165,6 +2307,7 @@ struct dbg_bus_data { struct dbg_bus_storm_data storms[6]; }; +/* Debug bus filter types */ enum dbg_bus_filter_types { DBG_BUS_FILTER_TYPE_OFF, DBG_BUS_FILTER_TYPE_PRE, @@ -2181,6 +2324,7 @@ enum dbg_bus_frame_modes { MAX_DBG_BUS_FRAME_MODES }; +/* Debug bus other engine mode */ enum dbg_bus_other_engine_modes { DBG_BUS_OTHER_ENGINE_MODE_NONE, DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, @@ -2190,12 +2334,14 @@ enum dbg_bus_other_engine_modes { MAX_DBG_BUS_OTHER_ENGINE_MODES }; +/* Debug bus post-trigger recording types */ enum dbg_bus_post_trigger_types { DBG_BUS_POST_TRIGGER_RECORD, DBG_BUS_POST_TRIGGER_DROP, MAX_DBG_BUS_POST_TRIGGER_TYPES }; +/* Debug bus pre-trigger recording types */ enum dbg_bus_pre_trigger_types { DBG_BUS_PRE_TRIGGER_START_FROM_ZERO, DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, @@ -2203,11 +2349,10 @@ enum dbg_bus_pre_trigger_types { MAX_DBG_BUS_PRE_TRIGGER_TYPES }; +/* Debug bus SEMI frame modes */ enum dbg_bus_semi_frame_modes { - DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = - 0, - DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = - 3, + DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0, + DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3, MAX_DBG_BUS_SEMI_FRAME_MODES }; @@ -2220,6 +2365,7 @@ enum dbg_bus_states { MAX_DBG_BUS_STATES }; +/* Debug Bus Storm modes */ enum dbg_bus_storm_modes { DBG_BUS_STORM_MODE_PRINTF, DBG_BUS_STORM_MODE_PRAM_ADDR, @@ -2352,7 +2498,7 @@ enum dbg_status { DBG_STATUS_MCP_TRACE_NO_META, DBG_STATUS_MCP_COULD_NOT_HALT, DBG_STATUS_MCP_COULD_NOT_RESUME, - DBG_STATUS_DMAE_FAILED, + DBG_STATUS_RESERVED2, DBG_STATUS_SEMI_FIFO_NOT_EMPTY, DBG_STATUS_IGU_FIFO_BAD_DATA, DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, @@ -2396,7 +2542,8 @@ struct dbg_tools_data { u8 chip_id; u8 platform_id; u8 initialized; - u8 reserved; + u8 use_dmae; + __le32 num_regs_read; }; /********************************/ @@ -2406,6 +2553,7 @@ struct dbg_tools_data { /* Number of VLAN priorities */ #define NUM_OF_VLAN_PRIORITIES 8 +/* BRB RAM init requirements */ struct init_brb_ram_req { __le32 guranteed_per_tc; __le32 headroom_per_tc; @@ -2414,17 +2562,20 @@ struct init_brb_ram_req { u8 num_active_tcs[MAX_NUM_PORTS]; }; +/* ETS per-TC init requirements */ struct init_ets_tc_req { u8 use_sp; u8 use_wfq; __le16 weight; }; +/* ETS init requirements */ struct init_ets_req { __le32 mtu; struct init_ets_tc_req tc_req[NUM_OF_TCS]; }; +/* NIG LB RL init requirements */ struct init_nig_lb_rl_req { __le16 lb_mac_rate; __le16 lb_rate; @@ -2432,15 +2583,18 @@ struct init_nig_lb_rl_req { __le16 tc_rate[NUM_OF_PHYS_TCS]; }; +/* NIG TC mapping for each priority */ struct init_nig_pri_tc_map_entry { u8 tc_id; u8 valid; }; +/* NIG priority to TC map init requirements */ struct init_nig_pri_tc_map_req { struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; }; +/* QM per-port init parameters */ struct init_qm_port_params { u8 active; u8 active_phys_tcs; @@ -2563,7 +2717,7 @@ struct bin_buffer_hdr { __le32 length; }; -/* binary init buffer types */ +/* Binary init buffer types */ enum bin_init_buffer_type { BIN_BUF_INIT_FW_VER_INFO, BIN_BUF_INIT_CMD, @@ -2793,6 +2947,7 @@ struct iro { }; /***************************** Public Functions *******************************/ + /** * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug * arrays. @@ -2802,6 +2957,18 @@ struct iro { enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr); /** + * @brief qed_read_regs - Reads registers into a buffer (using GRC). + * + * @param p_hwfn - HW device data + * @param p_ptt - Ptt window used for writing the registers. + * @param buf - Destination buffer. + * @param addr - Source GRC address in dwords. + * @param len - Number of registers to read. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); + +/** * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their * default value. * @@ -3119,6 +3286,7 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, #define MAX_NAME_LEN 16 /***************************** Public Functions *******************************/ + /** * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with * debug arrays. @@ -3172,6 +3340,18 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** + * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace + * meta data. + * + * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to + * no NVRAM access). + * + * @param data - pointer to MCP Trace meta data + * @param size - size of MCP Trace meta data in dwords + */ +void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); + +/** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size * for MCP Trace results (in bytes). * @@ -3607,6 +3787,9 @@ static const u32 dbg_bus_blocks[] = { 0x00000000, /* bar0_map, bb, 0 lines */ 0x00000000, /* bar0_map, k2, 0 lines */ 0x00000000, + 0x00000000, /* bar0_map, bb, 0 lines */ + 0x00000000, /* bar0_map, k2, 0 lines */ + 0x00000000, }; /* Win 2 */ @@ -3645,7 +3828,6 @@ static const u32 dbg_bus_blocks[] = { * Returns the required host memory size in 4KB units. * Must be called before all QM init HSI functions. * - * @param pf_id - physical function ID * @param num_pf_cids - number of connections used by this PF * @param num_vf_cids - number of connections used by VFs of this PF * @param num_tids - number of tasks used by this PF @@ -3654,8 +3836,7 @@ static const u32 dbg_bus_blocks[] = { * * @return The required host memory size in 4KB units. */ -u32 qed_qm_pf_mem_size(u8 pf_id, - u32 num_pf_cids, +u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); @@ -3676,7 +3857,7 @@ struct qed_qm_pf_rt_init_params { u8 port_id; u8 pf_id; u8 max_phys_tcs_per_port; - bool is_first_pf; + bool is_pf_loading; u32 num_pf_cids; u32 num_vf_cids; u32 num_tids; @@ -3687,6 +3868,7 @@ struct qed_qm_pf_rt_init_params { u8 num_vports; u16 pf_wfq; u32 pf_rl; + u32 link_speed; struct init_qm_pq_params *pq_params; struct init_qm_vport_params *vport_params; }; @@ -3744,11 +3926,14 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, * @param p_ptt - ptt window used for writing the registers * @param vport_id - VPORT ID * @param vport_rl - rate limit in Mb/sec units + * @param link_speed - link speed in Mbps. * * @return 0 on success, -1 on error. */ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl); + struct qed_ptt *p_ptt, + u8 vport_id, u32 vport_rl, u32 link_speed); + /** * @brief qed_send_qm_stop_cmd Sends a stop command to the QM * @@ -3759,7 +3944,8 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, * @param start_pq - first PQ ID to stop * @param num_pqs - Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occured while waiting for QM command done. + * @return bool, true if successful, false if timeout occurred while waiting for + * QM command done. */ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3769,6 +3955,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, /** * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param dest_port - vxlan destination udp port. */ @@ -3778,6 +3965,7 @@ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, /** * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param vxlan_enable - vxlan enable flag. */ @@ -3787,6 +3975,7 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, /** * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param eth_gre_enable - eth GRE enable enable flag. * @param ip_gre_enable - IP GRE enable enable flag. @@ -3798,6 +3987,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, /** * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port * + * @param p_hwfn * @param p_ptt - ptt window used for writing the registers. * @param dest_port - geneve destination udp port. */ @@ -3814,612 +4004,921 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable); -void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 pf_id); -void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 pf_id, bool tcp, bool udp, - bool ipv4, bool ipv6); - -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) -#define TSTORM_PORT_STAT_OFFSET(port_id) \ + +/** + * @brief qed_gft_disable - Disable GFT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers. + * @param pf_id - pf on which to disable GFT. + */ +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); + +/** + * @brief qed_gft_config - Enable and configure HW for GFT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers. + * @param pf_id - pf on which to enable GFT. + * @param tcp - set profile tcp packets. + * @param udp - set profile udp packet. + * @param ipv4 - set profile ipv4 packet. + * @param ipv6 - set profile ipv6 packet. + * @param profile_type - define packet same fields. Use enum gft_profile_type. + */ +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type); + +/** + * @brief qed_enable_context_validation - Enable and configure context + * validation. + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers. + */ +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief qed_calc_session_ctx_validation - Calcualte validation byte for + * session context. + * + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - context size. + * @param ctx_type - context type. + * @param cid - context cid. + */ +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid); + +/** + * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task + * context. + * + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - context size. + * @param ctx_type - context type. + * @param tid - context tid. + */ +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid); + +/** + * @brief qed_memset_session_ctx - Memset session context to 0 while + * preserving validation bytes. + * + * @param p_hwfn - + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - size to initialzie. + * @param ctx_type - context type. + */ +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/** + * @brief qed_memset_task_ctx - Memset task context to 0 while preserving + * validation bytes. + * + * @param p_ctx_mem - pointer to context memory. + * @param ctx_size - size to initialzie. + * @param ctx_type - context type. + */ +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) + +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) + +/* Tstorm ll2 port statistics */ #define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ (IRO[2].base + ((port_id) * IRO[2].m1)) #define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ - (IRO[4].base + (pf_id) * IRO[4].m1) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) -#define USTORM_EQE_CONS_OFFSET(pf_id) \ +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) + +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ + (IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) + +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_OFFSET(pf_id) \ (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ +#define USTORM_EQE_CONS_SIZE (IRO[5].size) + +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) -#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) + +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) + +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) + +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) + +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) + +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) + +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) + +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size) + +/* Tstorm producers */ #define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) #define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size) + +/* Tstorm LightL2 queue statistics */ #define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) #define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) + +/* Ustorm LiteL2 queue statistics */ #define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) + (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1)) #define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) + +/* Pstorm LiteL2 queue statistics */ #define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17]. size) -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size) + +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) -#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ +#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) + +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ (IRO[19].base + ((queue_id) * IRO[19].m1)) -#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) +#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) + +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size + * mode. + */ #define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ - (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) + (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2)) #define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size) -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) -#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size) + +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ (IRO[22].base + ((pf_id) * IRO[22].m1)) -#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size) + +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[23].base + ((stat_counter_id) * IRO[23].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) -#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ +#define USTORM_QUEUE_STAT_SIZE (IRO[23].size) + +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id)\ (IRO[24].base + ((pf_id) * IRO[24].m1)) -#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size) + +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ (IRO[25].base + ((stat_counter_id) * IRO[25].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) -#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ +#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size) + +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ (IRO[26].base + ((pf_id) * IRO[26].m1)) -#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) -#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \ - (IRO[27].base + ((ethtype) * IRO[27].m1)) -#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ +#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size) + +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \ + (IRO[27].base + ((eth_type_id) * IRO[27].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size) + +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size) + +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ (IRO[29].base + ((pf_id) * IRO[29].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) -#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ +#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) + +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ (IRO[30].base + ((queue_id) * IRO[30].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[31].base + ((rss_id) * IRO[31].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[32].base + ((rss_id) * IRO[32].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + +/* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id. + */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + +/* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + +/* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + +/* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + +/* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + +/* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + +/* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) -#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) -#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + +/* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + +/* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ (IRO[44].base + ((pf_id) * IRO[44].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) -static const struct iro iro_arr[49] = { +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ + (IRO[47].base + ((pf_id) * IRO[47].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size) + +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[48].base + ((roce_pf_id) * IRO[48].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size) + +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ + (IRO[49].base + ((roce_pf_id) * IRO[49].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size) + +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[50].base + ((roce_pf_id) * IRO[50].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size) + +static const struct iro iro_arr[51] = { {0x0, 0x0, 0x0, 0x0, 0x8}, - {0x4cb0, 0x80, 0x0, 0x0, 0x80}, - {0x6518, 0x20, 0x0, 0x0, 0x20}, + {0x4cb8, 0x88, 0x0, 0x0, 0x88}, + {0x6530, 0x20, 0x0, 0x0, 0x20}, {0xb00, 0x8, 0x0, 0x0, 0x4}, {0xa80, 0x8, 0x0, 0x0, 0x4}, {0x0, 0x8, 0x0, 0x0, 0x2}, {0x80, 0x8, 0x0, 0x0, 0x4}, {0x84, 0x8, 0x0, 0x0, 0x2}, + {0x4c48, 0x0, 0x0, 0x0, 0x78}, + {0x3e18, 0x0, 0x0, 0x0, 0x78}, + {0x2b58, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, - {0x3df0, 0x0, 0x0, 0x0, 0x78}, - {0x29b0, 0x0, 0x0, 0x0, 0x78}, - {0x4c38, 0x0, 0x0, 0x0, 0x78}, - {0x4990, 0x0, 0x0, 0x0, 0x78}, - {0x7f48, 0x0, 0x0, 0x0, 0x78}, + {0x4998, 0x0, 0x0, 0x0, 0x78}, + {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, - {0x61f8, 0x10, 0x0, 0x0, 0x10}, - {0xbd20, 0x30, 0x0, 0x0, 0x30}, - {0x95b8, 0x30, 0x0, 0x0, 0x30}, - {0x4b60, 0x80, 0x0, 0x0, 0x40}, + {0x6210, 0x10, 0x0, 0x0, 0x10}, + {0xb820, 0x30, 0x0, 0x0, 0x30}, + {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0x53a0, 0x80, 0x4, 0x0, 0x4}, - {0xc7c8, 0x0, 0x0, 0x0, 0x4}, - {0x4ba0, 0x80, 0x0, 0x0, 0x20}, - {0x8150, 0x40, 0x0, 0x0, 0x30}, - {0xec70, 0x60, 0x0, 0x0, 0x60}, - {0x2b48, 0x80, 0x0, 0x0, 0x38}, - {0xf1b0, 0x78, 0x0, 0x0, 0x78}, + {0x53a8, 0x80, 0x4, 0x0, 0x4}, + {0xc7d0, 0x0, 0x0, 0x0, 0x4}, + {0x4ba8, 0x80, 0x0, 0x0, 0x20}, + {0x8158, 0x40, 0x0, 0x0, 0x30}, + {0xe770, 0x60, 0x0, 0x0, 0x60}, + {0x2cf0, 0x80, 0x0, 0x0, 0x38}, + {0xf2b8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, - {0xaef8, 0x0, 0x0, 0x0, 0xf0}, - {0xafe8, 0x8, 0x0, 0x0, 0x8}, + {0xaf20, 0x0, 0x0, 0x0, 0xf0}, + {0xb010, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, {0x24f8, 0x8, 0x0, 0x0, 0x8}, {0x0, 0x8, 0x0, 0x0, 0x8}, - {0x200, 0x10, 0x8, 0x0, 0x8}, - {0xb78, 0x10, 0x8, 0x0, 0x2}, - {0xd9a8, 0x38, 0x0, 0x0, 0x24}, - {0x12988, 0x10, 0x0, 0x0, 0x8}, - {0x11fa0, 0x38, 0x0, 0x0, 0x18}, - {0xa580, 0x38, 0x0, 0x0, 0x10}, - {0x86f8, 0x30, 0x0, 0x0, 0x18}, - {0x101f8, 0x10, 0x0, 0x0, 0x10}, - {0xde28, 0x48, 0x0, 0x0, 0x38}, - {0x10660, 0x20, 0x0, 0x0, 0x20}, - {0x2b80, 0x80, 0x0, 0x0, 0x10}, - {0x5020, 0x10, 0x0, 0x0, 0x10}, - {0xc9b0, 0x30, 0x0, 0x0, 0x10}, - {0xeec0, 0x10, 0x0, 0x0, 0x10}, + {0x400, 0x18, 0x8, 0x0, 0x8}, + {0xb78, 0x18, 0x8, 0x0, 0x2}, + {0xd898, 0x50, 0x0, 0x0, 0x3c}, + {0x12908, 0x18, 0x0, 0x0, 0x10}, + {0x11aa8, 0x40, 0x0, 0x0, 0x18}, + {0xa588, 0x50, 0x0, 0x0, 0x20}, + {0x8700, 0x40, 0x0, 0x0, 0x28}, + {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0xde48, 0x48, 0x0, 0x0, 0x38}, + {0x10768, 0x20, 0x0, 0x0, 0x20}, + {0x2d28, 0x80, 0x0, 0x0, 0x10}, + {0x5048, 0x10, 0x0, 0x0, 0x10}, + {0xc9b8, 0x30, 0x0, 0x0, 0x10}, + {0xeee0, 0x10, 0x0, 0x0, 0x10}, + {0xa3a0, 0x10, 0x0, 0x0, 0x10}, + {0x13108, 0x8, 0x0, 0x0, 0x8}, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6667 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6669 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 -#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702 -#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703 -#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29966 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29967 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29968 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29969 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29970 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29971 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29972 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29973 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29974 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29975 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29976 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29977 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29978 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29979 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29980 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29981 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29982 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29983 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29984 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29985 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29986 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29987 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29988 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29989 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29990 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29991 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29992 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29993 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29994 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29995 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29996 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29997 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29998 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29999 -#define QM_REG_PQTX2PF_40_RT_OFFSET 30000 -#define QM_REG_PQTX2PF_41_RT_OFFSET 30001 -#define QM_REG_PQTX2PF_42_RT_OFFSET 30002 -#define QM_REG_PQTX2PF_43_RT_OFFSET 30003 -#define QM_REG_PQTX2PF_44_RT_OFFSET 30004 -#define QM_REG_PQTX2PF_45_RT_OFFSET 30005 -#define QM_REG_PQTX2PF_46_RT_OFFSET 30006 -#define QM_REG_PQTX2PF_47_RT_OFFSET 30007 -#define QM_REG_PQTX2PF_48_RT_OFFSET 30008 -#define QM_REG_PQTX2PF_49_RT_OFFSET 30009 -#define QM_REG_PQTX2PF_50_RT_OFFSET 30010 -#define QM_REG_PQTX2PF_51_RT_OFFSET 30011 -#define QM_REG_PQTX2PF_52_RT_OFFSET 30012 -#define QM_REG_PQTX2PF_53_RT_OFFSET 30013 -#define QM_REG_PQTX2PF_54_RT_OFFSET 30014 -#define QM_REG_PQTX2PF_55_RT_OFFSET 30015 -#define QM_REG_PQTX2PF_56_RT_OFFSET 30016 -#define QM_REG_PQTX2PF_57_RT_OFFSET 30017 -#define QM_REG_PQTX2PF_58_RT_OFFSET 30018 -#define QM_REG_PQTX2PF_59_RT_OFFSET 30019 -#define QM_REG_PQTX2PF_60_RT_OFFSET 30020 -#define QM_REG_PQTX2PF_61_RT_OFFSET 30021 -#define QM_REG_PQTX2PF_62_RT_OFFSET 30022 -#define QM_REG_PQTX2PF_63_RT_OFFSET 30023 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30564 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30821 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30823 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30855 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30871 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30905 -#define QM_REG_WFQPFCRD_RT_SIZE 256 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31161 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31162 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31675 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32699 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33211 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 -#define QM_REG_VOQCRDLINE_RT_OFFSET 34043 -#define QM_REG_VOQCRDLINE_RT_SIZE 36 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308 - -#define RUNTIME_ARRAY_SIZE 34309 +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18 +#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19 +#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20 +#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21 +#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22 +#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23 +#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24 +#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25 +#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26 +#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27 +#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28 +#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32 +#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36 +#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2093 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6525 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6527 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6529 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561 +#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562 +#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563 +#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564 +#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493 +#define QM_REG_PQTX2PF_0_RT_OFFSET 34494 +#define QM_REG_PQTX2PF_1_RT_OFFSET 34495 +#define QM_REG_PQTX2PF_2_RT_OFFSET 34496 +#define QM_REG_PQTX2PF_3_RT_OFFSET 34497 +#define QM_REG_PQTX2PF_4_RT_OFFSET 34498 +#define QM_REG_PQTX2PF_5_RT_OFFSET 34499 +#define QM_REG_PQTX2PF_6_RT_OFFSET 34500 +#define QM_REG_PQTX2PF_7_RT_OFFSET 34501 +#define QM_REG_PQTX2PF_8_RT_OFFSET 34502 +#define QM_REG_PQTX2PF_9_RT_OFFSET 34503 +#define QM_REG_PQTX2PF_10_RT_OFFSET 34504 +#define QM_REG_PQTX2PF_11_RT_OFFSET 34505 +#define QM_REG_PQTX2PF_12_RT_OFFSET 34506 +#define QM_REG_PQTX2PF_13_RT_OFFSET 34507 +#define QM_REG_PQTX2PF_14_RT_OFFSET 34508 +#define QM_REG_PQTX2PF_15_RT_OFFSET 34509 +#define QM_REG_PQTX2PF_16_RT_OFFSET 34510 +#define QM_REG_PQTX2PF_17_RT_OFFSET 34511 +#define QM_REG_PQTX2PF_18_RT_OFFSET 34512 +#define QM_REG_PQTX2PF_19_RT_OFFSET 34513 +#define QM_REG_PQTX2PF_20_RT_OFFSET 34514 +#define QM_REG_PQTX2PF_21_RT_OFFSET 34515 +#define QM_REG_PQTX2PF_22_RT_OFFSET 34516 +#define QM_REG_PQTX2PF_23_RT_OFFSET 34517 +#define QM_REG_PQTX2PF_24_RT_OFFSET 34518 +#define QM_REG_PQTX2PF_25_RT_OFFSET 34519 +#define QM_REG_PQTX2PF_26_RT_OFFSET 34520 +#define QM_REG_PQTX2PF_27_RT_OFFSET 34521 +#define QM_REG_PQTX2PF_28_RT_OFFSET 34522 +#define QM_REG_PQTX2PF_29_RT_OFFSET 34523 +#define QM_REG_PQTX2PF_30_RT_OFFSET 34524 +#define QM_REG_PQTX2PF_31_RT_OFFSET 34525 +#define QM_REG_PQTX2PF_32_RT_OFFSET 34526 +#define QM_REG_PQTX2PF_33_RT_OFFSET 34527 +#define QM_REG_PQTX2PF_34_RT_OFFSET 34528 +#define QM_REG_PQTX2PF_35_RT_OFFSET 34529 +#define QM_REG_PQTX2PF_36_RT_OFFSET 34530 +#define QM_REG_PQTX2PF_37_RT_OFFSET 34531 +#define QM_REG_PQTX2PF_38_RT_OFFSET 34532 +#define QM_REG_PQTX2PF_39_RT_OFFSET 34533 +#define QM_REG_PQTX2PF_40_RT_OFFSET 34534 +#define QM_REG_PQTX2PF_41_RT_OFFSET 34535 +#define QM_REG_PQTX2PF_42_RT_OFFSET 34536 +#define QM_REG_PQTX2PF_43_RT_OFFSET 34537 +#define QM_REG_PQTX2PF_44_RT_OFFSET 34538 +#define QM_REG_PQTX2PF_45_RT_OFFSET 34539 +#define QM_REG_PQTX2PF_46_RT_OFFSET 34540 +#define QM_REG_PQTX2PF_47_RT_OFFSET 34541 +#define QM_REG_PQTX2PF_48_RT_OFFSET 34542 +#define QM_REG_PQTX2PF_49_RT_OFFSET 34543 +#define QM_REG_PQTX2PF_50_RT_OFFSET 34544 +#define QM_REG_PQTX2PF_51_RT_OFFSET 34545 +#define QM_REG_PQTX2PF_52_RT_OFFSET 34546 +#define QM_REG_PQTX2PF_53_RT_OFFSET 34547 +#define QM_REG_PQTX2PF_54_RT_OFFSET 34548 +#define QM_REG_PQTX2PF_55_RT_OFFSET 34549 +#define QM_REG_PQTX2PF_56_RT_OFFSET 34550 +#define QM_REG_PQTX2PF_57_RT_OFFSET 34551 +#define QM_REG_PQTX2PF_58_RT_OFFSET 34552 +#define QM_REG_PQTX2PF_59_RT_OFFSET 34553 +#define QM_REG_PQTX2PF_60_RT_OFFSET 34554 +#define QM_REG_PQTX2PF_61_RT_OFFSET 34555 +#define QM_REG_PQTX2PF_62_RT_OFFSET 34556 +#define QM_REG_PQTX2PF_63_RT_OFFSET 34557 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 35098 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354 +#define QM_REG_RLPFPERIOD_RT_OFFSET 35355 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356 +#define QM_REG_RLPFINCVAL_RT_OFFSET 35357 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 35389 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 35405 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 35439 +#define QM_REG_WFQPFCRD_RT_SIZE 256 +#define QM_REG_WFQPFENABLE_RT_OFFSET 35695 +#define QM_REG_WFQVPENABLE_RT_OFFSET 35696 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 36209 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 37233 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 37745 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_PTRTBLTX_RT_OFFSET 38257 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 +#define QM_REG_VOQCRDLINE_RT_OFFSET 39601 +#define QM_REG_VOQCRDLINE_RT_SIZE 36 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 36 +#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 + +#define RUNTIME_ARRAY_SIZE 43023 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -4436,219 +4935,219 @@ struct xstorm_eth_conn_st_ctx { __le32 reserved[60]; }; -struct xstorm_eth_conn_ag_ctx { +struct e4_xstorm_eth_conn_ag_ctx { u8 reserved0; - u8 eth_state; + u8 state; u8 flags0; -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 ereserved1; + __le16 e5_reserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -4681,7 +5180,7 @@ struct xstorm_eth_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 ereserved; + u8 e5_reserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -4704,37 +5203,37 @@ struct ystorm_eth_conn_st_ctx { __le32 reserved[8]; }; -struct ystorm_eth_conn_ag_ctx { +struct e4_ystorm_eth_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 tx_q0_int_coallecing_timeset; u8 byte3; __le16 word0; @@ -4748,89 +5247,89 @@ struct ystorm_eth_conn_ag_ctx { __le32 reg3; }; -struct tstorm_eth_conn_ag_ctx { +struct e4_tstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -4852,63 +5351,63 @@ struct tstorm_eth_conn_ag_ctx { __le32 reg10; }; -struct ustorm_eth_conn_ag_ctx { +struct e4_ustorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -4932,20 +5431,21 @@ struct mstorm_eth_conn_st_ctx { }; /* eth connection context */ -struct eth_conn_context { +struct e4_eth_conn_context { struct tstorm_eth_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context; struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct tstorm_eth_conn_ag_ctx tstorm_ag_context; - struct ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; struct mstorm_eth_conn_st_ctx mstorm_st_context; }; +/* Ethernet filter types: mac/vlan/pair */ enum eth_error_code { ETH_OK = 0x00, ETH_FILTERS_MAC_ADD_FAIL_FULL, @@ -4972,6 +5472,7 @@ enum eth_error_code { MAX_ETH_ERROR_CODE }; +/* Opcodes for the event ring */ enum eth_event_opcode { ETH_EVENT_UNUSED, ETH_EVENT_VPORT_START, @@ -4983,13 +5484,14 @@ enum eth_event_opcode { ETH_EVENT_RX_QUEUE_UPDATE, ETH_EVENT_RX_QUEUE_STOP, ETH_EVENT_FILTERS_UPDATE, - ETH_EVENT_RESERVED, - ETH_EVENT_RESERVED2, - ETH_EVENT_RESERVED3, + ETH_EVENT_RX_ADD_OPENFLOW_FILTER, + ETH_EVENT_RX_DELETE_OPENFLOW_FILTER, + ETH_EVENT_RX_CREATE_OPENFLOW_ACTION, ETH_EVENT_RX_ADD_UDP_FILTER, ETH_EVENT_RX_DELETE_UDP_FILTER, - ETH_EVENT_RESERVED4, - ETH_EVENT_RESERVED5, + ETH_EVENT_RX_CREATE_GFT_ACTION, + ETH_EVENT_RX_GFT_UPDATE_FILTER, + ETH_EVENT_TX_QUEUE_UPDATE, MAX_ETH_EVENT_OPCODE }; @@ -5039,6 +5541,7 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, ETH_IPV4_FIRST_FRAG, @@ -5046,12 +5549,14 @@ enum eth_ipv4_frag_type { MAX_ETH_IPV4_FRAG_TYPE }; +/* eth IPv4 Fragment Type */ enum eth_ip_type { ETH_IPV4, ETH_IPV6, MAX_ETH_IP_TYPE }; +/* Ethernet Ramrod Command IDs */ enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, ETH_RAMROD_VPORT_START, @@ -5070,10 +5575,11 @@ enum eth_ramrod_cmd_id { ETH_RAMROD_RX_DELETE_UDP_FILTER, ETH_RAMROD_RX_CREATE_GFT_ACTION, ETH_RAMROD_GFT_UPDATE_FILTER, + ETH_RAMROD_TX_QUEUE_UPDATE, MAX_ETH_RAMROD_CMD_ID }; -/* return code from eth sp ramrods */ +/* Return code from eth sp ramrods */ struct eth_return_code { u8 value; #define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F @@ -5209,18 +5715,14 @@ struct eth_vport_tx_mode { __le16 reserved2[3]; }; +/* GFT filter update action type */ enum gft_filter_update_action { GFT_ADD_FILTER, GFT_DELETE_FILTER, MAX_GFT_FILTER_UPDATE_ACTION }; -enum gft_logic_filter_type { - GFT_FILTER_TYPE, - RFS_FILTER_TYPE, - MAX_GFT_LOGIC_FILTER_TYPE -}; - +/* Ramrod data for rx add openflow filter */ struct rx_add_openflow_filter_data { __le16 action_icid; u8 priority; @@ -5244,11 +5746,13 @@ struct rx_add_openflow_filter_data { __le16 l4_src_port; }; +/* Ramrod data for rx create gft action */ struct rx_create_gft_action_data { u8 vport_id; u8 reserved[7]; }; +/* Ramrod data for rx create openflow action */ struct rx_create_openflow_action_data { u8 vport_id; u8 reserved[7]; @@ -5286,7 +5790,7 @@ struct rx_queue_start_ramrod_data { struct regpair reserved2; }; -/* Ramrod data for rx queue start ramrod */ +/* Ramrod data for rx queue stop ramrod */ struct rx_queue_stop_ramrod_data { __le16 rx_queue_id; u8 complete_cqe_flg; @@ -5324,14 +5828,22 @@ struct rx_udp_filter_data { __le32 tenant_id; }; +/* Add or delete GFT filter - filter is packet header of type of packet wished + * to pass certain FW flow. + */ struct rx_update_gft_filter_data { struct regpair pkt_hdr_addr; __le16 pkt_hdr_length; - __le16 rx_qid_or_action_icid; - u8 vport_id; - u8 filter_type; + __le16 action_icid; + __le16 rx_qid; + __le16 flow_id; + __le16 vport_id; + u8 action_icid_valid; + u8 rx_qid_valid; + u8 flow_id_valid; u8 filter_action; u8 assert_on_error; + u8 reserved; }; /* Ramrod data for rx queue start ramrod */ @@ -5377,6 +5889,14 @@ struct tx_queue_stop_ramrod_data { __le16 reserved[4]; }; +/* Ramrod data for tx queue update ramrod */ +struct tx_queue_update_ramrod_data { + __le16 update_qm_pq_id_flg; + __le16 qm_pq_id; + __le32 reserved0; + struct regpair reserved1[5]; +}; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@ -5477,219 +5997,219 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct xstorm_eth_conn_agctxdq_ext_ldpart { +struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart { u8 reserved0; - u8 eth_state; + u8 state; u8 flags0; -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 u8 flags3; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 u8 flags4; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 u8 flags5; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 u8 flags6; -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 u8 flags8; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 u8 flags9; -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 -#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 ereserved1; + __le16 e5_reserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5706,256 +6226,256 @@ struct xstorm_eth_conn_agctxdq_ext_ldpart { __le32 reg4; }; -struct mstorm_eth_conn_ag_ctx { +struct e4_mstorm_eth_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct xstorm_eth_hw_conn_ag_ctx { +struct e4_xstorm_eth_hw_conn_ag_ctx { u8 reserved0; - u8 eth_state; + u8 state; u8 flags0; -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 u8 edpm_event_id; __le16 physical_q0; - __le16 ereserved1; + __le16 e5_reserved1; __le16 edpm_num_bds; __le16 tx_bd_cons; __le16 tx_bd_prod; @@ -5963,6 +6483,7 @@ struct xstorm_eth_hw_conn_ag_ctx { __le16 conn_dpi; }; +/* GFT CAM line struct */ struct gft_cam_line { __le32 camline; #define GFT_CAM_LINE_VALID_MASK 0x1 @@ -5975,6 +6496,7 @@ struct gft_cam_line { #define GFT_CAM_LINE_RESERVED1_SHIFT 29 }; +/* GFT CAM line struct with fields breakout */ struct gft_cam_line_mapped { __le32 camline; #define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 @@ -6008,28 +6530,31 @@ union gft_cam_line_union { struct gft_cam_line_mapped cam_line_mapped; }; +/* Used in gft_profile_key: Indication for ip version */ enum gft_profile_ip_version { GFT_PROFILE_IPV4 = 0, GFT_PROFILE_IPV6 = 1, MAX_GFT_PROFILE_IP_VERSION }; +/* Profile key stucr fot GFT logic in Prs */ struct gft_profile_key { __le16 profile_key; -#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 -#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 -#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 -#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 -#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF -#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 -#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF -#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 -#define GFT_PROFILE_KEY_PF_ID_MASK 0xF -#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 -#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 -#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 -}; - +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + +/* Used in gft_profile_key: Indication for tunnel type */ enum gft_profile_tunnel_type { GFT_PROFILE_NO_TUNNEL = 0, GFT_PROFILE_VXLAN_TUNNEL = 1, @@ -6040,6 +6565,7 @@ enum gft_profile_tunnel_type { MAX_GFT_PROFILE_TUNNEL_TYPE }; +/* Used in gft_profile_key: Indication for protocol type */ enum gft_profile_upper_protocol_type { GFT_PROFILE_ROCE_PROTOCOL = 0, GFT_PROFILE_RROCE_PROTOCOL = 1, @@ -6060,6 +6586,7 @@ enum gft_profile_upper_protocol_type { MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE }; +/* GFT RAM line struct */ struct gft_ram_line { __le32 lo; #define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 @@ -6149,6 +6676,7 @@ struct gft_ram_line { #define GFT_RAM_LINE_RESERVED1_SHIFT 10 }; +/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */ enum gft_vlan_select { INNER_PROVIDER_VLAN = 0, INNER_VLAN = 1, @@ -6157,10 +6685,205 @@ enum gft_vlan_select { MAX_GFT_VLAN_SELECT }; +/* The rdma task context of Mstorm */ +struct ystorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct e4_ystorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 msem_ctx_upd_seq; + u8 flags0; +#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct e4_mstorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +/* The roce task context of Mstorm */ struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; +/* The roce task context of Ustorm */ +struct ustorm_rdma_task_st_ctx { + struct regpair temp[2]; +}; + +struct e4_ustorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 + u8 flags1; +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 reg2; + __le32 dif_runt_value; + __le32 reg4; + __le32 reg5; +}; + +/* RDMA task context */ +struct e4_rdma_task_context { + struct ystorm_rdma_task_st_ctx ystorm_st_context; + struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct tdif_task_context tdif_context; + struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_st_ctx mstorm_st_context; + struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +/* rdma function init ramrod data */ struct rdma_close_func_ramrod_data { u8 cnq_start_offset; u8 num_cnqs; @@ -6169,6 +6892,7 @@ struct rdma_close_func_ramrod_data { u8 reserved[4]; }; +/* rdma function init CNQ parameters */ struct rdma_cnq_params { __le16 sb_num; u8 sb_index; @@ -6179,6 +6903,7 @@ struct rdma_cnq_params { u8 reserved1[6]; }; +/* rdma create cq ramrod data */ struct rdma_create_cq_ramrod_data { struct regpair cq_handle; struct regpair pbl_addr; @@ -6193,21 +6918,25 @@ struct rdma_create_cq_ramrod_data { __le16 reserved1; }; +/* rdma deregister tid ramrod data */ struct rdma_deregister_tid_ramrod_data { __le32 itid; __le32 reserved; }; +/* rdma destroy cq output params */ struct rdma_destroy_cq_output_params { __le16 cnq_num; __le16 reserved0; __le32 reserved1; }; +/* rdma destroy cq ramrod data */ struct rdma_destroy_cq_ramrod_data { struct regpair output_params_addr; }; +/* RDMA slow path EQ cmd IDs */ enum rdma_event_opcode { RDMA_EVENT_UNUSED, RDMA_EVENT_FUNC_INIT, @@ -6223,6 +6952,7 @@ enum rdma_event_opcode { MAX_RDMA_EVENT_OPCODE }; +/* RDMA FW return code for slow path ramrods */ enum rdma_fw_return_code { RDMA_RETURN_OK = 0, RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR, @@ -6232,20 +6962,24 @@ enum rdma_fw_return_code { MAX_RDMA_FW_RETURN_CODE }; +/* rdma function init header */ struct rdma_init_func_hdr { u8 cnq_start_offset; u8 num_cnqs; u8 cq_ring_mode; u8 vf_id; u8 vf_valid; - u8 reserved[3]; + u8 relaxed_ordering; + u8 reserved[2]; }; +/* rdma function init ramrod data */ struct rdma_init_func_ramrod_data { struct rdma_init_func_hdr params_header; struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; }; +/* RDMA ramrod command IDs */ enum rdma_ramrod_cmd_id { RDMA_RAMROD_UNUSED, RDMA_RAMROD_FUNC_INIT, @@ -6261,42 +6995,43 @@ enum rdma_ramrod_cmd_id { MAX_RDMA_RAMROD_CMD_ID }; +/* rdma register tid ramrod data */ struct rdma_register_tid_ramrod_data { __le16 flags; #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F #define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 -#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 u8 flags1; #define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F -#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 -#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 u8 flags2; -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 -#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 #define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F -#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 u8 key; u8 length_hi; u8 vf_id; @@ -6313,19 +7048,21 @@ struct rdma_register_tid_ramrod_data { __le32 reserved4[2]; }; +/* rdma resize cq output params */ struct rdma_resize_cq_output_params { __le32 old_cq_cons; __le32 old_cq_prod; }; +/* rdma resize cq ramrod data */ struct rdma_resize_cq_ramrod_data { u8 flags; -#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 -#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 -#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F -#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 u8 pbl_log_page_size; __le16 pbl_num_pages; __le32 max_cqes; @@ -6333,10 +7070,12 @@ struct rdma_resize_cq_ramrod_data { struct regpair output_params_addr; }; +/* The rdma storm context of Mstorm */ struct rdma_srq_context { struct regpair temp[8]; }; +/* rdma create qp requester ramrod data */ struct rdma_srq_create_ramrod_data { struct regpair pbl_base_addr; __le16 pages_in_srq_pbl; @@ -6348,206 +7087,19 @@ struct rdma_srq_create_ramrod_data { struct regpair producers_addr; }; +/* rdma create qp requester ramrod data */ struct rdma_srq_destroy_ramrod_data { struct rdma_srq_id srq_id; __le32 reserved; }; +/* rdma create qp requester ramrod data */ struct rdma_srq_modify_ramrod_data { struct rdma_srq_id srq_id; __le32 wqe_limit; }; -struct ystorm_rdma_task_st_ctx { - struct regpair temp[4]; -}; - -struct ystorm_rdma_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 msem_ctx_upd_seq; - u8 flags0; -#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 -#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 key; - __le32 mw_cnt; - u8 ref_cnt_seq; - u8 ctx_upd_seq; - __le16 dif_flags; - __le16 tx_ref_count; - __le16 last_used_ltid; - __le16 parent_mr_lo; - __le16 parent_mr_hi; - __le32 fbo_lo; - __le32 fbo_hi; -}; - -struct mstorm_rdma_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 icid; - u8 flags0; -#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 -#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 -#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 key; - __le32 mw_cnt; - u8 ref_cnt_seq; - u8 ctx_upd_seq; - __le16 dif_flags; - __le16 tx_ref_count; - __le16 last_used_ltid; - __le16 parent_mr_lo; - __le16 parent_mr_hi; - __le32 fbo_lo; - __le32 fbo_hi; -}; - -struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; -}; - -struct ustorm_rdma_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 icid; - u8 flags0; -#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 - u8 flags1; -#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 -#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 -#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 - u8 flags3; -#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 reg2; - __le32 dif_runt_value; - __le32 reg4; - __le32 reg5; -}; - -struct rdma_task_context { - struct ystorm_rdma_task_st_ctx ystorm_st_context; - struct ystorm_rdma_task_ag_ctx ystorm_ag_context; - struct tdif_task_context tdif_context; - struct mstorm_rdma_task_ag_ctx mstorm_ag_context; - struct mstorm_rdma_task_st_ctx mstorm_st_context; - struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; - struct ustorm_rdma_task_ag_ctx ustorm_ag_context; -}; - +/* RDMA Tid type enumeration (for register_tid ramrod) */ enum rdma_tid_type { RDMA_TID_REGISTERED_MR, RDMA_TID_FMR, @@ -6556,214 +7108,214 @@ enum rdma_tid_type { MAX_RDMA_TID_TYPE }; -struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { +struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; u8 flags0; -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 u8 flags1; -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSTORM_FLUSH_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 u8 flags3; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 u8 flags5; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 u8 flags6; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 u8 flags7; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 u8 flags8; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 u8 flags9; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 u8 flags10; -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 -#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 -#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -6783,126 +7335,126 @@ struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { __le32 reg4; }; -struct mstorm_rdma_conn_ag_ctx { +struct e4_mstorm_rdma_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct tstorm_rdma_conn_ag_ctx { +struct e4_tstorm_rdma_conn_ag_ctx { u8 reserved0; u8 byte1; u8 flags0; -#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -6924,73 +7476,73 @@ struct tstorm_rdma_conn_ag_ctx { __le32 reg10; }; -struct tstorm_rdma_task_ag_ctx { +struct e4_tstorm_rdma_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -7003,63 +7555,63 @@ struct tstorm_rdma_task_ag_ctx { __le32 reg2; }; -struct ustorm_rdma_conn_ag_ctx { +struct e4_ustorm_rdma_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 conn_dpi; @@ -7072,214 +7624,214 @@ struct ustorm_rdma_conn_ag_ctx { __le16 word3; }; -struct xstorm_rdma_conn_ag_ctx { +struct e4_xstorm_rdma_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 -#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 -#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 +#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -7301,37 +7853,37 @@ struct xstorm_rdma_conn_ag_ctx { __le32 reg6; }; -struct ystorm_rdma_conn_ag_ctx { +struct e4_ystorm_rdma_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -7345,62 +7897,70 @@ struct ystorm_rdma_conn_ag_ctx { __le32 reg3; }; -struct mstorm_roce_conn_st_ctx { - struct regpair temp[6]; +/* The roce storm context of Ystorm */ +struct ystorm_roce_conn_st_ctx { + struct regpair temp[2]; }; +/* The roce storm context of Mstorm */ struct pstorm_roce_conn_st_ctx { struct regpair temp[16]; }; -struct ystorm_roce_conn_st_ctx { - struct regpair temp[2]; -}; - +/* The roce storm context of Xstorm */ struct xstorm_roce_conn_st_ctx { struct regpair temp[24]; }; +/* The roce storm context of Tstorm */ struct tstorm_roce_conn_st_ctx { struct regpair temp[30]; }; +/* The roce storm context of Mstorm */ +struct mstorm_roce_conn_st_ctx { + struct regpair temp[6]; +}; + +/* The roce storm context of Ystorm */ struct ustorm_roce_conn_st_ctx { struct regpair temp[12]; }; -struct roce_conn_context { +/* roce connection context */ +struct e4_roce_conn_context { struct ystorm_roce_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_roce_conn_st_ctx pstorm_st_context; struct xstorm_roce_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct xstorm_rdma_conn_ag_ctx xstorm_ag_context; - struct tstorm_rdma_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_rdma_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_rdma_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_roce_conn_st_ctx tstorm_st_context; struct mstorm_roce_conn_st_ctx mstorm_st_context; struct ustorm_roce_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; }; +/* roce create qp requester ramrod data */ struct roce_create_qp_req_ramrod_data { __le16 flags; -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF -#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 u8 max_ord; u8 traffic_class; u8 hop_limit; @@ -7431,26 +7991,27 @@ struct roce_create_qp_req_ramrod_data { __le16 dpi; }; +/* roce create qp responder ramrod data */ struct roce_create_qp_resp_ramrod_data { __le16 flags; -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F -#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 u8 max_ird; u8 traffic_class; u8 hop_limit; @@ -7482,24 +8043,40 @@ struct roce_create_qp_resp_ramrod_data { __le16 dpi; }; +/* roce DCQCN received statistics */ +struct roce_dcqcn_received_stats { + struct regpair ecn_pkt_rcv; + struct regpair cnp_pkt_rcv; +}; + +/* roce DCQCN sent statistics */ +struct roce_dcqcn_sent_stats { + struct regpair cnp_pkt_sent; +}; + +/* RoCE destroy qp requester output params */ struct roce_destroy_qp_req_output_params { __le32 num_bound_mw; __le32 cq_prod; }; +/* RoCE destroy qp requester ramrod data */ struct roce_destroy_qp_req_ramrod_data { struct regpair output_params_addr; }; +/* RoCE destroy qp responder output params */ struct roce_destroy_qp_resp_output_params { __le32 num_invalidated_mw; __le32 cq_prod; }; +/* RoCE destroy qp responder ramrod data */ struct roce_destroy_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* roce special events statistics */ struct roce_events_stats { __le16 silent_drops; __le16 rnr_naks_sent; @@ -7508,6 +8085,7 @@ struct roce_events_stats { __le32 reserved; }; +/* ROCE slow path EQ cmd IDs */ enum roce_event_opcode { ROCE_EVENT_CREATE_QP = 11, ROCE_EVENT_MODIFY_QP, @@ -7518,6 +8096,7 @@ enum roce_event_opcode { MAX_ROCE_EVENT_OPCODE }; +/* roce func init ramrod data */ struct roce_init_func_params { u8 ll2_queue_id; u8 cnp_vlan_priority; @@ -7526,42 +8105,46 @@ struct roce_init_func_params { __le32 cnp_send_timeout; }; +/* roce func init ramrod data */ struct roce_init_func_ramrod_data { struct rdma_init_func_ramrod_data rdma; struct roce_init_func_params roce; }; +/* roce modify qp requester ramrod data */ struct roce_modify_qp_req_ramrod_data { __le16 flags; -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 13 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14 u8 fields; -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF -#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 u8 max_ord; u8 traffic_class; u8 hop_limit; @@ -7570,66 +8153,76 @@ struct roce_modify_qp_req_ramrod_data { __le32 ack_timeout_val; __le16 mtu; __le16 reserved2; - __le32 reserved3[3]; + __le32 reserved3[2]; + __le16 low_latency_phy_queue; + __le16 regular_latency_phy_queue; __le32 src_gid[4]; __le32 dst_gid[4]; }; +/* roce modify qp responder ramrod data */ struct roce_modify_qp_resp_ramrod_data { __le16 flags; -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUES_FLG_SHIFT 10 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11 u8 fields; -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F -#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 u8 max_ird; u8 traffic_class; u8 hop_limit; __le16 p_key; __le32 flow_label; __le16 mtu; - __le16 reserved2; + __le16 low_latency_phy_queue; + __le16 regular_latency_phy_queue; + u8 reserved2[6]; __le32 src_gid[4]; __le32 dst_gid[4]; }; +/* RoCE query qp requester output params */ struct roce_query_qp_req_output_params { __le32 psn; __le32 flags; -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF -#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 }; +/* RoCE query qp requester ramrod data */ struct roce_query_qp_req_ramrod_data { struct regpair output_params_addr; }; +/* RoCE query qp responder output params */ struct roce_query_qp_resp_output_params { __le32 psn; __le32 err_flag; @@ -7639,10 +8232,12 @@ struct roce_query_qp_resp_output_params { #define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 }; +/* RoCE query qp responder ramrod data */ struct roce_query_qp_resp_ramrod_data { struct regpair output_params_addr; }; +/* ROCE ramrod command IDs */ enum roce_ramrod_cmd_id { ROCE_RAMROD_CREATE_QP = 11, ROCE_RAMROD_MODIFY_QP, @@ -7653,163 +8248,163 @@ enum roce_ramrod_cmd_id { MAX_ROCE_RAMROD_CMD_ID }; -struct mstorm_roce_req_conn_ag_ctx { +struct e4_mstorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct mstorm_roce_resp_conn_ag_ctx { +struct e4_mstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; -struct tstorm_roce_req_conn_ag_ctx { +struct e4_tstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 u8 flags1; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 u8 flags3; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 snd_nxt_psn; __le32 snd_max_psn; @@ -7825,95 +8420,95 @@ struct tstorm_roce_req_conn_ag_ctx { u8 byte4; u8 byte5; __le16 snd_sq_cons; - __le16 word2; + __le16 conn_dpi; __le16 word3; __le32 reg9; __le32 reg10; }; -struct tstorm_roce_resp_conn_ag_ctx { +struct e4_tstorm_roce_resp_conn_ag_ctx { u8 byte0; u8 state; u8 flags0; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags2; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 psn_and_rxmit_id_echo; __le32 reg1; __le32 reg2; @@ -7935,63 +8530,63 @@ struct tstorm_roce_resp_conn_ag_ctx { __le32 reg10; }; -struct ustorm_roce_req_conn_ag_ctx { +struct e4_ustorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8004,63 +8599,63 @@ struct ustorm_roce_req_conn_ag_ctx { __le16 word3; }; -struct ustorm_roce_resp_conn_ag_ctx { +struct e4_ustorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8073,214 +8668,214 @@ struct ustorm_roce_resp_conn_ag_ctx { __le16 word3; }; -struct xstorm_roce_req_conn_ag_ctx { +struct e4_xstorm_roce_req_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 u8 flags13; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -8302,224 +8897,224 @@ struct xstorm_roce_req_conn_ag_ctx { __le32 orq_cons; }; -struct xstorm_roce_resp_conn_ag_ctx { +struct e4_xstorm_roce_resp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 u8 flags2; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 u8 flags4; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 u8 flags7; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 u8 flags11; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; - __le16 word1; - __le16 irq_prod; - __le16 word3; - __le16 word4; - __le16 ereserved1; + __le16 irq_prod_shadow; + __le16 word2; __le16 irq_cons; + __le16 irq_prod; + __le16 e5_reserved1; + __le16 conn_dpi; u8 rxmit_opcode; u8 byte4; u8 byte5; @@ -8533,37 +9128,37 @@ struct xstorm_roce_resp_conn_ag_ctx { __le32 msn_and_syndrome; }; -struct ystorm_roce_req_conn_ag_ctx { +struct e4_ystorm_roce_req_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8577,37 +9172,37 @@ struct ystorm_roce_req_conn_ag_ctx { __le32 reg3; }; -struct ystorm_roce_resp_conn_ag_ctx { +struct e4_ystorm_roce_resp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -8621,6 +9216,7 @@ struct ystorm_roce_resp_conn_ag_ctx { __le32 reg3; }; +/* Roce doorbell data */ enum roce_flavor { PLAIN_ROCE, RROCE_IPV4, @@ -8628,228 +9224,231 @@ enum roce_flavor { MAX_ROCE_FLAVOR }; +/* The iwarp storm context of Ystorm */ struct ystorm_iwarp_conn_st_ctx { __le32 reserved[4]; }; +/* The iwarp storm context of Pstorm */ struct pstorm_iwarp_conn_st_ctx { __le32 reserved[36]; }; +/* The iwarp storm context of Xstorm */ struct xstorm_iwarp_conn_st_ctx { __le32 reserved[44]; }; -struct xstorm_iwarp_conn_ag_ctx { +struct e4_xstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 u8 flags2; -#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 u8 flags11; -#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 -#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 -#define XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_IWARP_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -8897,89 +9496,89 @@ struct xstorm_iwarp_conn_ag_ctx { __le32 reg17; }; -struct tstorm_iwarp_conn_ag_ctx { +struct e4_tstorm_iwarp_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 -#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 -#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 -#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 unaligned_nxt_seq; @@ -9001,51 +9600,56 @@ struct tstorm_iwarp_conn_ag_ctx { __le32 last_hq_sequence; }; +/* The iwarp storm context of Tstorm */ struct tstorm_iwarp_conn_st_ctx { __le32 reserved[60]; }; +/* The iwarp storm context of Mstorm */ struct mstorm_iwarp_conn_st_ctx { __le32 reserved[32]; }; +/* The iwarp storm context of Ustorm */ struct ustorm_iwarp_conn_st_ctx { __le32 reserved[24]; }; -struct iwarp_conn_context { +/* iwarp connection context */ +struct e4_iwarp_conn_context { struct ystorm_iwarp_conn_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct pstorm_iwarp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_iwarp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; - struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context; struct timers_context timer_context; - struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context; struct tstorm_iwarp_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; struct mstorm_iwarp_conn_st_ctx mstorm_st_context; struct ustorm_iwarp_conn_st_ctx ustorm_st_context; }; +/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */ struct iwarp_create_qp_ramrod_data { u8 flags; #define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 -#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 -#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 -#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 -#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 -#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 -#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x3 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 6 u8 reserved1; __le16 pd; __le16 sq_num_pages; @@ -9061,6 +9665,7 @@ struct iwarp_create_qp_ramrod_data { u8 reserved2[6]; }; +/* iWARP completion queue types */ enum iwarp_eqe_async_opcode { IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE, IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED, @@ -9083,6 +9688,7 @@ struct iwarp_eqe_data_tcp_async_completion { u8 reserved[5]; }; +/* iWARP completion queue types */ enum iwarp_eqe_sync_opcode { IWARP_EVENT_TYPE_TCP_OFFLOAD = 11, @@ -9095,6 +9701,7 @@ enum iwarp_eqe_sync_opcode { MAX_IWARP_EQE_SYNC_OPCODE }; +/* iWARP EQE completion status */ enum iwarp_fw_return_code { IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 5, IWARP_CONN_ERROR_TCP_CONNECTION_RST, @@ -9125,54 +9732,60 @@ enum iwarp_fw_return_code { MAX_IWARP_FW_RETURN_CODE }; +/* unaligned opaque data received from LL2 */ struct iwarp_init_func_params { u8 ll2_ooo_q_index; u8 reserved1[7]; }; +/* iwarp func init ramrod data */ struct iwarp_init_func_ramrod_data { struct rdma_init_func_ramrod_data rdma; struct tcp_init_params tcp; struct iwarp_init_func_params iwarp; }; +/* iWARP QP - possible states to transition to */ enum iwarp_modify_qp_new_state_type { IWARP_MODIFY_QP_STATE_CLOSING = 1, - IWARP_MODIFY_QP_STATE_ERROR = - 2, + IWARP_MODIFY_QP_STATE_ERROR = 2, MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE }; +/* iwarp modify qp responder ramrod data */ struct iwarp_modify_qp_ramrod_data { __le16 transition_to_state; __le16 flags; -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 -#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 -#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 #define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3 #define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 -#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 -#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF -#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x7FF +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 5 __le32 reserved3[3]; __le32 reserved4[8]; }; +/* MPA params for Enhanced mode */ struct mpa_rq_params { __le32 ird; __le32 ord; }; +/* MPA host Address-Len for private data */ struct mpa_ulp_buffer { struct regpair addr; __le16 len; __le16 reserved[3]; }; +/* iWARP MPA offload params common to Basic and Enhanced modes */ struct mpa_outgoing_params { u8 crc_needed; u8 reject; @@ -9181,6 +9794,9 @@ struct mpa_outgoing_params { struct mpa_ulp_buffer outgoing_ulp_buffer; }; +/* iWARP MPA offload params passed by driver to FW in MPA Offload Request + * Ramrod. + */ struct iwarp_mpa_offload_ramrod_data { struct mpa_outgoing_params common; __le32 tcp_cid; @@ -9188,18 +9804,20 @@ struct iwarp_mpa_offload_ramrod_data { u8 tcp_connect_side; u8 rtr_pref; #define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7 -#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 -#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F -#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 u8 reserved2; struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; struct regpair handle_for_async; struct regpair shared_queue_addr; + __le16 rcv_wnd; u8 stats_counter_id; - u8 reserved3[15]; + u8 reserved3[13]; }; +/* iWARP TCP connection offload params passed by driver to FW */ struct iwarp_offload_params { struct mpa_ulp_buffer incoming_ulp_buffer; struct regpair async_eqe_output_buf; @@ -9211,22 +9829,24 @@ struct iwarp_offload_params { u8 reserved[10]; }; +/* iWARP query QP output params */ struct iwarp_query_qp_output_params { __le32 flags; #define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 -#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 #define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF -#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 u8 reserved1[4]; }; +/* iWARP query QP ramrod data */ struct iwarp_query_qp_ramrod_data { struct regpair output_params_addr; }; +/* iWARP Ramrod Command IDs */ enum iwarp_ramrod_cmd_id { - IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = - 11, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, IWARP_RAMROD_CMD_ID_CREATE_QP, @@ -9236,22 +9856,28 @@ enum iwarp_ramrod_cmd_id { MAX_IWARP_RAMROD_CMD_ID }; +/* Per PF iWARP retransmit path statistics */ struct iwarp_rxmit_stats_drv { struct regpair tx_go_to_slow_start_event_cnt; struct regpair tx_fast_retransmit_event_cnt; }; +/* iWARP and TCP connection offload params passed by driver to FW in iWARP + * offload ramrod. + */ struct iwarp_tcp_offload_ramrod_data { struct iwarp_offload_params iwarp; struct tcp_offload_params_opt2 tcp; }; +/* iWARP MPA negotiation types */ enum mpa_negotiation_mode { MPA_NEGOTIATION_TYPE_BASIC = 1, MPA_NEGOTIATION_TYPE_ENHANCED = 2, MAX_MPA_NEGOTIATION_MODE }; +/* iWARP MPA Enhanced mode RTR types */ enum mpa_rtr_type { MPA_RTR_TYPE_NONE = 0, MPA_RTR_TYPE_ZERO_SEND = 1, @@ -9264,113 +9890,114 @@ enum mpa_rtr_type { MAX_MPA_RTR_TYPE }; +/* unaligned opaque data received from LL2 */ struct unaligned_opaque_data { __le16 first_mpa_offset; u8 tcp_payload_offset; u8 flags; #define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1 -#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 -#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 -#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 -#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F -#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 +#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F +#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 __le32 cid; }; -struct mstorm_iwarp_conn_ag_ctx { +struct e4_mstorm_iwarp_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 -#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 -#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 -#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 rcq_cons; __le16 rcq_cons_th; __le32 reg0; __le32 reg1; }; -struct ustorm_iwarp_conn_ag_ctx { +struct e4_ustorm_iwarp_conn_ag_ctx { u8 reserved; u8 byte1; u8 flags0; -#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 -#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 u8 flags3; -#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 -#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9383,37 +10010,37 @@ struct ustorm_iwarp_conn_ag_ctx { __le16 word3; }; -struct ystorm_iwarp_conn_ag_ctx { +struct e4_ystorm_iwarp_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9427,6 +10054,7 @@ struct ystorm_iwarp_conn_ag_ctx { __le32 reg3; }; +/* The fcoe storm context of Ystorm */ struct ystorm_fcoe_conn_st_ctx { u8 func_mode; u8 cos; @@ -9442,45 +10070,49 @@ struct ystorm_fcoe_conn_st_ctx { struct regpair reserved; __le16 min_frame_size; u8 protection_info_flags; -#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 -#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 -#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F -#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 u8 dst_protection_per_mss; u8 src_protection_per_mss; u8 ptu_log_page_size; u8 flags; -#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 -#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 -#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 -#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F -#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 u8 fcp_xfer_size; }; +/* FCoE 16-bits vlan structure */ struct fcoe_vlan_fields { __le16 fields; -#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF -#define FCOE_VLAN_FIELDS_VID_SHIFT 0 -#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 -#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 -#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 -#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 }; +/* FCoE 16-bits vlan union */ union fcoe_vlan_field_union { struct fcoe_vlan_fields fields; __le16 val; }; +/* FCoE 16-bits vlan, vif union */ union fcoe_vlan_vif_field_union { union fcoe_vlan_field_union vlan; __le16 vif; }; +/* Ethernet context section */ struct pstorm_fcoe_eth_context_section { u8 remote_addr_3; u8 remote_addr_2; @@ -9500,6 +10132,7 @@ struct pstorm_fcoe_eth_context_section { __le16 inner_eth_type; }; +/* The fcoe storm context of Pstorm */ struct pstorm_fcoe_conn_st_ctx { u8 func_mode; u8 cos; @@ -9513,16 +10146,18 @@ struct pstorm_fcoe_conn_st_ctx { u8 sid_1; u8 sid_0; u8 flags; -#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 -#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 -#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 -#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 -#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 -#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0xF -#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 4 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 +#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5 u8 did_2; u8 did_1; u8 did_0; @@ -9532,6 +10167,7 @@ struct pstorm_fcoe_conn_st_ctx { u8 reserved1; }; +/* The fcoe storm context of Xstorm */ struct xstorm_fcoe_conn_st_ctx { u8 func_mode; u8 src_mac_index; @@ -9539,16 +10175,16 @@ struct xstorm_fcoe_conn_st_ctx { u8 cached_wqes_avail; __le16 stat_ram_addr; u8 flags; -#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 -#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 -#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 -#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 -#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 u8 cached_wqes_offset; u8 reserved2; u8 eth_hdr_size; @@ -9574,18 +10210,18 @@ struct xstorm_fcoe_conn_st_ctx { u8 fcp_cmd_byte_credit; u8 fcp_rsp_byte_credit; __le16 protection_info; -#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 -#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 -#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 -#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 -#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 -#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF -#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 -#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF -#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 __le16 xferq_pbl_next_index; __le16 page_size; u8 mid_seq; @@ -9594,216 +10230,216 @@ struct xstorm_fcoe_conn_st_ctx { struct fcoe_wqe cached_wqes[16]; }; -struct xstorm_fcoe_conn_ag_ctx { +struct e4_xstorm_fcoe_conn_ag_ctx { u8 reserved0; - u8 fcoe_state; + u8 state; u8 flags0; -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 u8 flags2; -#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 u8 flags7; -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 u8 flags12; -#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 -#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 -#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 word1; @@ -9831,6 +10467,7 @@ struct xstorm_fcoe_conn_ag_ctx { __le32 reg8; }; +/* The fcoe storm context of Ustorm */ struct ustorm_fcoe_conn_st_ctx { struct regpair respq_pbl_addr; __le16 num_pages_in_pbl; @@ -9840,150 +10477,150 @@ struct ustorm_fcoe_conn_st_ctx { u8 reserved[2]; }; -struct tstorm_fcoe_conn_ag_ctx { +struct e4_tstorm_fcoe_conn_ag_ctx { u8 reserved0; - u8 fcoe_state; + u8 state; u8 flags0; -#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 u8 flags1; -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; }; -struct ustorm_fcoe_conn_ag_ctx { +struct e4_ustorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -9996,72 +10633,76 @@ struct ustorm_fcoe_conn_ag_ctx { __le16 word3; }; +/* The fcoe storm context of Tstorm */ struct tstorm_fcoe_conn_st_ctx { __le16 stat_ram_addr; __le16 rx_max_fc_payload_len; __le16 e_d_tov_val; u8 flags; -#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 -#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 -#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 -#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 -#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F -#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 u8 timers_cleanup_invocation_cnt; __le32 reserved1[2]; - __le32 dst_mac_address_bytes0to3; - __le16 dst_mac_address_bytes4to5; + __le32 dst_mac_address_bytes_0_to_3; + __le16 dst_mac_address_bytes_4_to_5; __le16 ramrod_echo; u8 flags1; -#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 -#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 -#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F -#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 - u8 q_relative_offset; +#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 +#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 + u8 cq_relative_offset; + u8 cmdq_relative_offset; u8 bdq_resource_id; - u8 reserved0[5]; + u8 reserved0[4]; }; -struct mstorm_fcoe_conn_ag_ctx { +struct e4_mstorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; +/* Fast path part of the fcoe storm context of Mstorm */ struct fcoe_mstorm_fcoe_conn_st_ctx_fp { __le16 xfer_prod; - __le16 reserved1; + u8 num_cqs; + u8 reserved1; u8 protection_info; #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 #define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0 @@ -10073,6 +10714,7 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_fp { u8 reserved2[2]; }; +/* Non fast path part of the fcoe storm context of Mstorm */ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { __le16 conn_id; __le16 stat_ram_addr; @@ -10088,37 +10730,46 @@ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { struct regpair reserved2[3]; }; +/* The fcoe storm context of Mstorm */ struct mstorm_fcoe_conn_st_ctx { struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; }; -struct fcoe_conn_context { +/* fcoe connection context */ +struct e4_fcoe_conn_context { struct ystorm_fcoe_conn_st_ctx ystorm_st_context; struct pstorm_fcoe_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct xstorm_fcoe_conn_st_ctx xstorm_st_context; - struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context; struct regpair xstorm_ag_padding[6]; struct ustorm_fcoe_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2]; - struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context; struct tstorm_fcoe_conn_st_ctx tstorm_st_context; - struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context; struct mstorm_fcoe_conn_st_ctx mstorm_st_context; }; +/* FCoE connection offload params passed by driver to FW in FCoE offload + * ramrod. + */ struct fcoe_conn_offload_ramrod_params { struct fcoe_conn_offload_ramrod_data offload_ramrod_data; }; +/* FCoE connection terminate params passed by driver to FW in FCoE terminate + * conn ramrod. + */ struct fcoe_conn_terminate_ramrod_params { struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; }; +/* FCoE event type */ enum fcoe_event_type { FCOE_EVENT_INIT_FUNC, FCOE_EVENT_DESTROY_FUNC, @@ -10129,10 +10780,12 @@ enum fcoe_event_type { MAX_FCOE_EVENT_TYPE }; +/* FCoE init params passed by driver to FW in FCoE init ramrod */ struct fcoe_init_ramrod_params { struct fcoe_init_func_ramrod_data init_ramrod_data; }; +/* FCoE ramrod Command IDs */ enum fcoe_ramrod_cmd_id { FCOE_RAMROD_CMD_ID_INIT_FUNC, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, @@ -10142,41 +10795,44 @@ enum fcoe_ramrod_cmd_id { MAX_FCOE_RAMROD_CMD_ID }; +/* FCoE statistics params buffer passed by driver to FW in FCoE statistics + * ramrod. + */ struct fcoe_stat_ramrod_params { struct fcoe_stat_ramrod_data stat_ramrod_data; }; -struct ystorm_fcoe_conn_ag_ctx { +struct e4_ystorm_fcoe_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10190,230 +10846,233 @@ struct ystorm_fcoe_conn_ag_ctx { __le32 reg3; }; +/* The iscsi storm connection context of Ystorm */ struct ystorm_iscsi_conn_st_ctx { - __le32 reserved[4]; + __le32 reserved[8]; }; +/* Combined iSCSI and TCP storm connection of Pstorm */ struct pstorm_iscsi_tcp_conn_st_ctx { __le32 tcp[32]; __le32 iscsi[4]; }; +/* The combined tcp and iscsi storm context of Xstorm */ struct xstorm_iscsi_tcp_conn_st_ctx { - __le32 reserved_iscsi[40]; __le32 reserved_tcp[4]; + __le32 reserved_iscsi[44]; }; -struct xstorm_iscsi_conn_ag_ctx { +struct e4_xstorm_iscsi_conn_ag_ctx { u8 cdu_validation; u8 state; u8 flags0; -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 u8 flags1; -#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 u8 flags2; -#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 u8 flags3; -#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 u8 flags6; -#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 u8 flags7; -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 u8 flags11; -#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 -#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 -#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 u8 byte2; __le16 physical_q0; __le16 physical_q1; @@ -10449,7 +11108,7 @@ struct xstorm_iscsi_conn_ag_ctx { u8 byte13; u8 byte14; u8 byte15; - u8 ereserved; + u8 e5_reserved; __le16 word11; __le32 reg10; __le32 reg11; @@ -10461,89 +11120,89 @@ struct xstorm_iscsi_conn_ag_ctx { __le32 reg17; }; -struct tstorm_iscsi_conn_ag_ctx { +struct e4_tstorm_iscsi_conn_ag_ctx { u8 reserved0; u8 state; u8 flags0; -#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 u8 flags4; -#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 __le32 reg0; __le32 reg1; __le32 reg2; @@ -10558,63 +11217,63 @@ struct tstorm_iscsi_conn_ag_ctx { __le16 word0; }; -struct ustorm_iscsi_conn_ag_ctx { +struct e4_ustorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 -#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 -#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -10627,113 +11286,117 @@ struct ustorm_iscsi_conn_ag_ctx { __le16 word3; }; +/* The iscsi storm connection context of Tstorm */ struct tstorm_iscsi_conn_st_ctx { - __le32 reserved[40]; + __le32 reserved[44]; }; -struct mstorm_iscsi_conn_ag_ctx { +struct e4_mstorm_iscsi_conn_ag_ctx { u8 reserved; u8 state; u8 flags0; -#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 __le16 word0; __le16 word1; __le32 reg0; __le32 reg1; }; +/* Combined iSCSI and TCP storm connection of Mstorm */ struct mstorm_iscsi_tcp_conn_st_ctx { __le32 reserved_tcp[20]; - __le32 reserved_iscsi[8]; + __le32 reserved_iscsi[12]; }; +/* The iscsi storm context of Ustorm */ struct ustorm_iscsi_conn_st_ctx { __le32 reserved[52]; }; -struct iscsi_conn_context { +/* iscsi connection context */ +struct e4_iscsi_conn_context { struct ystorm_iscsi_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2]; struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; struct regpair pstorm_st_padding[2]; struct pb_context xpb2_context; struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; struct regpair xstorm_st_padding[2]; - struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; - struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context; struct regpair tstorm_ag_padding[2]; struct timers_context timer_context; - struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context; struct pb_context upb_context; struct tstorm_iscsi_conn_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context; struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; struct ustorm_iscsi_conn_st_ctx ustorm_st_context; }; +/* iSCSI init params passed by driver to FW in iSCSI init ramrod */ struct iscsi_init_ramrod_params { struct iscsi_spe_func_init iscsi_init_spe; struct tcp_init_params tcp_init; }; -struct ystorm_iscsi_conn_ag_ctx { +struct e4_ystorm_iscsi_conn_ag_ctx { u8 byte0; u8 byte1; u8 flags0; -#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 u8 byte2; u8 byte3; __le16 word0; @@ -11613,7 +12276,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 -#define DRV_MB_PARAM_NVM_LEN_SHIFT 24 +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a05feb38c6ee..fca2dbd93ad9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, return rc; } +int qed_dmae_sanity(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const char *phase) +{ + u32 size = PAGE_SIZE / 2, val; + struct qed_dmae_params params; + int rc = 0; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + 2 * size, &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return -ENOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + memset((u8 *)p_virt + size, 0, size); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n", + phase, + (u64)p_phys, + p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size); + + memset(¶ms, 0, sizeof(params)); + rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4 /* size_in_dwords */, ¶ms); + if (rc) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = -EINVAL; + goto out; + } + } + +out: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index f2505c691c26..8db2839a8ec8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -299,4 +299,8 @@ union qed_qm_pq_params { int qed_init_fw_data(struct qed_dev *cdev, const u8 *fw_data); + +int qed_dmae_sanity(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const char *phase); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index b069ad088269..18fb5062a83d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -31,6 +31,7 @@ */ #include <linux/types.h> +#include <linux/crc8.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -40,102 +41,197 @@ #include "qed_init_ops.h" #include "qed_reg_addr.h" +#define CDU_VALIDATION_DEFAULT_CFG 61 + +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = { + {400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */ + {528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */ + {608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */ +}; + +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { + {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ +}; + /* General constants */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ 0x100) - 1 : 0) -#define QM_INVALID_PQ_ID 0xffff +#define QM_INVALID_PQ_ID 0xffff + /* Feature enable */ -#define QM_BYPASS_EN 1 -#define QM_BYTE_CRD_EN 1 +#define QM_BYPASS_EN 1 +#define QM_BYTE_CRD_EN 1 + /* Other PQ constants */ -#define QM_OTHER_PQS_PER_PF 4 +#define QM_OTHER_PQS_PER_PF 4 + /* WFQ constants */ -#define QM_WFQ_UPPER_BOUND 62500000 -#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 -#define QM_WFQ_VP_PQ_PF_SHIFT 5 -#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) -#define QM_WFQ_MAX_INC_VAL 43750000 + +/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_WFQ_UPPER_BOUND 62500000 + +/* Bit of VOQ in WFQ VP PQ map */ +#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 + +/* Bit of PF in WFQ VP PQ map */ +#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5 + +/* 0x9000 = 4*9*1024 */ +#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* Max WFQ increment value is 0.7 * upper bound */ +#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10) /* RL constants */ -#define QM_RL_UPPER_BOUND 62500000 -#define QM_RL_PERIOD 5 /* in us */ -#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) -#define QM_RL_MAX_INC_VAL 43750000 -#define QM_RL_INC_VAL(rate) max_t(u32, \ - (u32)(((rate ? rate : \ - 1000000) * \ - QM_RL_PERIOD * \ - 101) / (8 * 100)), 1) + +/* Period in us */ +#define QM_RL_PERIOD 5 + +/* Period in 25MHz cycles */ +#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) + +/* RL increment value - rate is specified in mbps */ +#define QM_RL_INC_VAL(rate) ({ \ + typeof(rate) __rate = (rate); \ + max_t(u32, \ + (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \ + (8 * 100)), \ + 1); }) + +/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_RL_UPPER_BOUND 62500000 + +/* Max PF RL increment value is 0.7 * upper bound */ +#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) + +/* Vport RL Upper bound, link speed is in Mpbs */ +#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \ + QM_RL_INC_VAL(speed), \ + 9700 + 1000)) + +/* Max Vport RL increment value is the Vport RL upper bound */ +#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed) + +/* Vport RL credit threshold in case of QM bypass */ +#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1) + /* AFullOprtnstcCrdMask constants */ -#define QM_OPPOR_LINE_VOQ_DEF 1 -#define QM_OPPOR_FW_STOP_DEF 0 -#define QM_OPPOR_PQ_EMPTY_DEF 1 +#define QM_OPPOR_LINE_VOQ_DEF 1 +#define QM_OPPOR_FW_STOP_DEF 0 +#define QM_OPPOR_PQ_EMPTY_DEF 1 + /* Command Queue constants */ -#define PBF_CMDQ_PURE_LB_LINES 150 -#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ - (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ - PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) -#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \ - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ - (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) -#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \ - 4) * \ - 2) | QM_LINE_CRD_REG_SIGN_BIT) + +/* Pure LB CmdQ lines (+spare) */ +#define PBF_CMDQ_PURE_LB_LINES 150 + +#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8 + +#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ + (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ + (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ + PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) + +#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ + (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ + (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ + PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) + +#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ + ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) + /* BTB: blocks constants (block size = 256B) */ -#define BTB_JUMBO_PKT_BLOCKS 38 -#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS -#define BTB_PURE_LB_FACTOR 10 -#define BTB_PURE_LB_RATIO 7 + +/* 256B blocks in 9700B packet */ +#define BTB_JUMBO_PKT_BLOCKS 38 + +/* Headroom per-port */ +#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS +#define BTB_PURE_LB_FACTOR 10 + +/* Factored (hence really 0.7) */ +#define BTB_PURE_LB_RATIO 7 + /* QM stop command constants */ -#define QM_STOP_PQ_MASK_WIDTH 32 -#define QM_STOP_CMD_ADDR 2 -#define QM_STOP_CMD_STRUCT_SIZE 2 -#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 -#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 -#define QM_STOP_CMD_PAUSE_MASK_MASK -1 -#define QM_STOP_CMD_GROUP_ID_OFFSET 1 -#define QM_STOP_CMD_GROUP_ID_SHIFT 16 -#define QM_STOP_CMD_GROUP_ID_MASK 15 -#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 -#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 -#define QM_STOP_CMD_PQ_TYPE_MASK 1 -#define QM_STOP_CMD_MAX_POLL_COUNT 100 -#define QM_STOP_CMD_POLL_PERIOD_US 500 +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 2 +#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 +#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 /* QM command macros */ -#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ - _STRUCT_SIZE -#define QM_CMD_SET_FIELD(var, cmd, field, \ - value) SET_FIELD(var[cmd ## _ ## field ## \ - _OFFSET], \ - cmd ## _ ## field, \ - value) -/* QM: VOQ macros */ -#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \ - (max_phys_tcs_per_port) + \ - (tc)) -#define LB_VOQ(port) ( \ - MAX_PHYS_VOQS + (port)) -#define VOQ(port, tc, max_phy_tcs_pr_port) \ - ((tc) < \ - LB_TC ? PHYS_VOQ(port, \ - tc, \ - max_phy_tcs_pr_port) \ - : LB_VOQ(port)) +#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE +#define QM_CMD_SET_FIELD(var, cmd, field, value) \ + SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \ + cmd ## _ ## field, \ + value) + +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \ + ext_voq, wrr) \ + do { \ + typeof(map) __map; \ + memset(&__map, 0, sizeof(__map)); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \ + rl_valid); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \ + vp_pq_id); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \ + SET_FIELD(__map.reg, \ + QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \ + STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ + *((u32 *)&__map)); \ + (map) = __map; \ + } while (0) + +#define WRITE_PQ_INFO_TO_RAM 1 +#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ + (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ + ((rl_valid) << 22) | ((rl) << 24)) +#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4) + /******************** INTERNAL IMPLEMENTATION *********************/ + +/* Returns the external VOQ number */ +static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn, + u8 port_id, u8 tc, u8 max_phys_tcs_per_port) +{ + if (tc == PURE_LB_TC) + return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id; + else + return port_id * max_phys_tcs_per_port + tc; +} + /* Prepare PF RL enable/disable runtime init values */ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { + u8 num_ext_voqs = MAX_NUM_VOQS_E4; + u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; + /* Enable RLs for all VOQs */ - STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, - (1 << MAX_NUM_VOQS) - 1); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFVOQENABLE_RT_OFFSET, + (u32)voq_bit_mask); + if (num_ext_voqs >= 32) + STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET, + (u32)(voq_bit_mask >> 32)); + /* Write RL period */ STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); @@ -147,7 +243,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, - QM_RL_UPPER_BOUND); + QM_PF_RL_UPPER_BOUND); } } @@ -181,7 +277,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) if (QM_BYPASS_EN) STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, - QM_RL_UPPER_BOUND); + QM_VP_RL_BYPASS_THRESH_SPEED); } } @@ -202,15 +298,15 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) * the specified VOQ. */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, - u8 voq, u16 cmdq_lines) + u8 ext_voq, u16 cmdq_lines) { - u32 qm_line_crd; + u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); - qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); - OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), + OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), (u32)cmdq_lines); - STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); - STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, + STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, + qm_line_crd); + STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, qm_line_crd); } @@ -221,43 +317,52 @@ static void qed_cmdq_lines_rt_init( u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) { - u8 tc, voq, port_id, num_tcs_in_port; + u8 tc, ext_voq, port_id, num_tcs_in_port; + u8 num_ext_voqs = MAX_NUM_VOQS_E4; + + /* Clear PBF lines of all VOQs */ + for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) + STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); - /* Clear PBF lines for all VOQs */ - for (voq = 0; voq < MAX_NUM_VOQS; voq++) - STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); for (port_id = 0; port_id < max_ports_per_engine; port_id++) { - if (port_params[port_id].active) { - u16 phys_lines, phys_lines_per_tc; - - /* find #lines to divide between active phys TCs */ - phys_lines = port_params[port_id].num_pbf_cmd_lines - - PBF_CMDQ_PURE_LB_LINES; - /* find #lines per active physical TC */ - num_tcs_in_port = 0; - for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { - if (((port_params[port_id].active_phys_tcs >> - tc) & 0x1) == 1) - num_tcs_in_port++; - } + u16 phys_lines, phys_lines_per_tc; - phys_lines_per_tc = phys_lines / num_tcs_in_port; - /* init registers per active TC */ - for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { - if (((port_params[port_id].active_phys_tcs >> - tc) & 0x1) != 1) - continue; + if (!port_params[port_id].active) + continue; - voq = PHYS_VOQ(port_id, tc, - max_phys_tcs_per_port); - qed_cmdq_lines_voq_rt_init(p_hwfn, voq, - phys_lines_per_tc); - } + /* Find number of command queue lines to divide between the + * active physical TCs. In E5, 1/8 of the lines are reserved. + * the lines for pure LB TC are subtracted. + */ + phys_lines = port_params[port_id].num_pbf_cmd_lines; + phys_lines -= PBF_CMDQ_PURE_LB_LINES; + + /* Find #lines per active physical TC */ + num_tcs_in_port = 0; + for (tc = 0; tc < max_phys_tcs_per_port; tc++) + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + phys_lines_per_tc = phys_lines / num_tcs_in_port; - /* init registers for pure LB TC */ - qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), - PBF_CMDQ_PURE_LB_LINES); + /* Init registers per active TC */ + for (tc = 0; tc < max_phys_tcs_per_port; tc++) { + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + tc, max_phys_tcs_per_port); + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + qed_cmdq_lines_voq_rt_init(p_hwfn, + ext_voq, + phys_lines_per_tc); } + + /* Init registers for pure LB TC */ + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + PURE_LB_TC, max_phys_tcs_per_port); + qed_cmdq_lines_voq_rt_init(p_hwfn, + ext_voq, PBF_CMDQ_PURE_LB_LINES); } } @@ -268,11 +373,9 @@ static void qed_btb_blocks_rt_init( struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; - u8 tc, voq, port_id, num_tcs_in_port; + u8 tc, ext_voq, port_id, num_tcs_in_port; for (port_id = 0; port_id < max_ports_per_engine; port_id++) { - u32 temp; - if (!port_params[port_id].active) continue; @@ -280,13 +383,14 @@ static void qed_btb_blocks_rt_init( usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; - /* find blocks per physical TC */ + /* Find blocks per physical TC. Use factor to avoid floating + * arithmethic. + */ num_tcs_in_port = 0; - for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) if (((port_params[port_id].active_phys_tcs >> tc) & 0x1) == 1) num_tcs_in_port++; - } pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / (num_tcs_in_port * BTB_PURE_LB_FACTOR + @@ -299,47 +403,55 @@ static void qed_btb_blocks_rt_init( /* Init physical TCs */ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { if (((port_params[port_id].active_phys_tcs >> - tc) & 0x1) != 1) - continue; - - voq = PHYS_VOQ(port_id, tc, - max_phys_tcs_per_port); - STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), - phys_blocks); + tc) & 0x1) == 1) { + ext_voq = + qed_get_ext_voq(p_hwfn, + port_id, + tc, + max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET + (ext_voq), phys_blocks); + } } /* Init pure LB TC */ - temp = LB_VOQ(port_id); - STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(temp), + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + PURE_LB_TC, max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), pure_lb_blocks); } } /* Prepare Tx PQ mapping runtime init values for the specified PF */ -static void qed_tx_pq_map_rt_init( - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params, - u32 base_mem_addr_4kb) +static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) { - struct init_qm_vport_params *vport_params = p_params->vport_params; - u16 num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; - u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; - u16 last_pq_group = (p_params->start_pq + num_pqs - 1) / - QM_PF_QUEUE_GROUP_SIZE; - u16 i, pq_id, pq_group; - - /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; + struct init_qm_vport_params *vport_params = p_params->vport_params; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; - u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); - u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); - u32 mem_addr_4kb = base_mem_addr_4kb; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; + struct init_qm_pq_params *pq_params = p_params->pq_params; + u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; + + num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; + + first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; + last_pq_group = (p_params->start_pq + num_pqs - 1) / + QM_PF_QUEUE_GROUP_SIZE; + + pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); + vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); + mem_addr_4kb = base_mem_addr_4kb; /* Set mapping from PQ group to PF */ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, (u32)(p_params->pf_id)); + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, QM_PQ_SIZE_256B(p_params->num_pf_cids)); @@ -348,58 +460,82 @@ static void qed_tx_pq_map_rt_init( /* Go over all Tx PQs */ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { - u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, - p_params->max_phys_tcs_per_port); - bool is_vf_pq = (i >= p_params->num_pf_pqs); - struct qm_rf_pq_map tx_pq_map; - - bool rl_valid = p_params->pq_params[i].rl_valid && - (p_params->pq_params[i].vport_id < - MAX_QM_GLOBAL_RLS); + u8 ext_voq, vport_id_in_pf, tc_id = pq_params[i].tc_id; + u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS; + struct qm_rf_pq_map_e4 tx_pq_map; + bool is_vf_pq, rl_valid; + u16 *p_first_tx_pq_id; + + ext_voq = qed_get_ext_voq(p_hwfn, + p_params->port_id, + tc_id, + p_params->max_phys_tcs_per_port); + is_vf_pq = (i >= p_params->num_pf_pqs); + rl_valid = pq_params[i].rl_valid && + pq_params[i].vport_id < max_qm_global_rls; /* Update first Tx PQ of VPORT/TC */ - u8 vport_id_in_pf = p_params->pq_params[i].vport_id - - p_params->start_vport; - u16 *pq_ids = &vport_params[vport_id_in_pf].first_tx_pq_id[0]; - u16 first_tx_pq_id = pq_ids[p_params->pq_params[i].tc_id]; + vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; + p_first_tx_pq_id = + &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; + if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { + u32 map_val = + (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | + (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT); - if (first_tx_pq_id == QM_INVALID_PQ_ID) { /* Create new VP PQ */ - pq_ids[p_params->pq_params[i].tc_id] = pq_id; - first_tx_pq_id = pq_id; + *p_first_tx_pq_id = pq_id; /* Map VP PQ to VOQ and PF */ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET + - first_tx_pq_id, - (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | - (p_params->pf_id << - QM_WFQ_VP_PQ_PF_SHIFT)); + *p_first_tx_pq_id, + map_val); } - if (p_params->pq_params[i].rl_valid && !rl_valid) + /* Check RL ID */ + if (pq_params[i].rl_valid && pq_params[i].vport_id >= + max_qm_global_rls) DP_NOTICE(p_hwfn, - "Invalid VPORT ID for rate limiter configuration"); - /* Fill PQ map entry */ - memset(&tx_pq_map, 0, sizeof(tx_pq_map)); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); - SET_FIELD(tx_pq_map.reg, - QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, - rl_valid ? - p_params->pq_params[i].vport_id : 0); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); - SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, - p_params->pq_params[i].wrr_group); - /* Write PQ map entry to CAM */ - STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, - *((u32 *)&tx_pq_map)); - /* Set base address */ + "Invalid VPORT ID for rate limiter configuration\n"); + + /* Prepare PQ map entry */ + QM_INIT_TX_PQ_MAP(p_hwfn, + tx_pq_map, + E4, + pq_id, + rl_valid ? 1 : 0, + *p_first_tx_pq_id, + rl_valid ? pq_params[i].vport_id : 0, + ext_voq, pq_params[i].wrr_group); + + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); + /* Clear PQ pointer table entry (64 bit) */ + if (p_params->is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + + /* Write PQ info to RAM */ + if (WRITE_PQ_INFO_TO_RAM != 0) { + u32 pq_info = 0; + + pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, + p_params->pf_id, + tc_id, + p_params->port_id, + rl_valid ? 1 : 0, + rl_valid ? + pq_params[i].vport_id : 0); + qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), + pq_info); + } + /* If VF PQ, add indication to PQ VF mask */ if (is_vf_pq) { tx_pq_vf_mask[pq_id / @@ -421,16 +557,16 @@ static void qed_tx_pq_map_rt_init( /* Prepare Other PQ mapping runtime init values for the specified PF */ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, - u8 port_id, u8 pf_id, + bool is_pf_loading, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { u32 pq_size, pq_mem_4kb, mem_addr_4kb; - u16 i, pq_id, pq_group; + u16 i, j, pq_id, pq_group; - /* a single other PQ group is used in each PF, - * where PQ group i is used in PF i. + /* A single other PQ group is used in each PF, where PQ group i is used + * in PF i. */ pq_group = pf_id; pq_size = num_pf_cids + num_tids; @@ -440,16 +576,25 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, /* Map PQ group to PF */ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, (u32)(pf_id)); + /* Set PQ sizes */ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + mem_addr_4kb += pq_mem_4kb; } } @@ -461,16 +606,11 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_pf_rt_init_params *p_params) { u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; - u32 crd_reg_offset; - u32 inc_val; + struct init_qm_pq_params *pq_params = p_params->pq_params; + u32 inc_val, crd_reg_offset; + u8 ext_voq; u16 i; - if (p_params->pf_id < MAX_NUM_PFS_BB) - crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET; - else - crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET; - crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB; - inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); @@ -478,19 +618,26 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, } for (i = 0; i < num_tx_pqs; i++) { - u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, - p_params->max_phys_tcs_per_port); - + ext_voq = qed_get_ext_voq(p_hwfn, + p_params->port_id, + pq_params[i].tc_id, + p_params->max_phys_tcs_per_port); + crd_reg_offset = + (p_params->pf_id < MAX_NUM_PFS_BB ? + QM_REG_WFQPFCRD_RT_OFFSET : + QM_REG_WFQPFCRD_MSB_RT_OFFSET) + + ext_voq * MAX_NUM_PFS_BB + + (p_params->pf_id % MAX_NUM_PFS_BB); OVERWRITE_RT_REG(p_hwfn, - crd_reg_offset + voq * MAX_NUM_PFS_BB, - QM_WFQ_CRD_REG_SIGN_BIT); + crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); } STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); + QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, inc_val); + return 0; } @@ -501,15 +648,19 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); - if (inc_val > QM_RL_MAX_INC_VAL) { + if (inc_val > QM_PF_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } - STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, - QM_RL_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, - QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); + + STORE_RT_REG(p_hwfn, + QM_REG_RLPFCRD_RT_OFFSET + pf_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, + QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); + return 0; } @@ -520,12 +671,12 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, u8 num_vports, struct init_qm_vport_params *vport_params) { + u16 vport_pq_id; u32 inc_val; u8 tc, i; /* Go over all PF VPORTs */ for (i = 0; i < num_vports; i++) { - if (!vport_params[i].vport_wfq) continue; @@ -536,17 +687,14 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, return -1; } - /* each VPORT can have several VPORT PQ IDs for - * different TCs - */ + /* Each VPORT can have several VPORT PQ IDs for various TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; - + vport_pq_id = vport_params[i].first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, - QM_WFQ_CRD_REG_SIGN_BIT); + (u32)QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + vport_pq_id, inc_val); @@ -557,12 +705,17 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, return 0; } +/* Prepare VPORT RL runtime init values for the specified VPORTs. + * Return -1 on error. + */ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, u8 start_vport, u8 num_vports, + u32 link_speed, struct init_qm_vport_params *vport_params) { u8 i, vport_id; + u32 inc_val; if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) { DP_NOTICE(p_hwfn, @@ -572,22 +725,22 @@ static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, /* Go over all PF VPORTs */ for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { - u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); - - if (inc_val > QM_RL_MAX_INC_VAL) { + inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ? + vport_params[i].vport_rl : + link_speed); + if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); return -1; } - STORE_RT_REG(p_hwfn, - QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, - QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, - QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, - QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, + QM_VP_RL_UPPER_BOUND(link_speed) | + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, inc_val); } @@ -599,7 +752,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, { u32 reg_val, i; - for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0; + for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; i++) { udelay(QM_STOP_CMD_POLL_PERIOD_US); reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); @@ -632,8 +785,8 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, } /******************** INTERFACE IMPLEMENTATION *********************/ -u32 qed_qm_pf_mem_size(u8 pf_id, - u32 num_pf_cids, + +u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) { @@ -642,11 +795,10 @@ u32 qed_qm_pf_mem_size(u8 pf_id, QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; } -int qed_qm_common_rt_init( - struct qed_hwfn *p_hwfn, - struct qed_qm_common_rt_init_params *p_params) +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params) { - /* init AFullOprtnstcCrdMask */ + /* Init AFullOprtnstcCrdMask */ u32 mask = (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | @@ -664,18 +816,31 @@ int qed_qm_common_rt_init( QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); + + /* Enable/disable PF RL */ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); + + /* Enable/disable PF WFQ */ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); + + /* Enable/disable VPORT RL */ qed_enable_vport_rl(p_hwfn, p_params->vport_rl_en); + + /* Enable/disable VPORT WFQ */ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); + + /* Init PBF CMDQ line credit */ qed_cmdq_lines_rt_init(p_hwfn, p_params->max_ports_per_engine, p_params->max_phys_tcs_per_port, p_params->port_params); + + /* Init BTB blocks in PBF */ qed_btb_blocks_rt_init(p_hwfn, p_params->max_ports_per_engine, p_params->max_phys_tcs_per_port, p_params->port_params); + return 0; } @@ -695,24 +860,31 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; /* Map Other PQs (if any) */ - qed_other_pq_map_rt_init(p_hwfn, p_params->port_id, p_params->pf_id, - p_params->num_pf_cids, p_params->num_tids, 0); + qed_other_pq_map_rt_init(p_hwfn, + p_params->pf_id, + p_params->is_pf_loading, p_params->num_pf_cids, + p_params->num_tids, 0); /* Map Tx PQs */ qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb); + /* Init PF WFQ */ if (p_params->pf_wfq) if (qed_pf_wfq_rt_init(p_hwfn, p_params)) return -1; + /* Init PF RL */ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; + /* Set VPORT WFQ */ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; + /* Set VPORT RL */ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, - p_params->num_vports, vport_params)) + p_params->num_vports, p_params->link_speed, + vport_params)) return -1; return 0; @@ -729,6 +901,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, } qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + return 0; } @@ -737,14 +910,13 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, { u32 inc_val = QM_RL_INC_VAL(pf_rl); - if (inc_val > QM_RL_MAX_INC_VAL) { + if (inc_val > QM_PF_RL_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); return -1; } - qed_wr(p_hwfn, p_ptt, - QM_REG_RLPFCRD + pf_id * 4, - QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); return 0; @@ -767,33 +939,35 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, for (tc = 0; tc < NUM_OF_TCS; tc++) { vport_pq_id = first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) - qed_wr(p_hwfn, p_ptt, - QM_REG_WFQVPWEIGHT + vport_pq_id * 4, - inc_val); + qed_wr(p_hwfn, + p_ptt, + QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); } return 0; } int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl) + struct qed_ptt *p_ptt, + u8 vport_id, u32 vport_rl, u32 link_speed) { - u32 inc_val = QM_RL_INC_VAL(vport_rl); + u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS; - if (vport_id >= MAX_QM_GLOBAL_RLS) { + if (vport_id >= max_qm_global_rls) { DP_NOTICE(p_hwfn, "Invalid VPORT ID for rate limiter configuration\n"); return -1; } - if (inc_val > QM_RL_MAX_INC_VAL) { + inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed); + if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) { DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration\n"); return -1; } - qed_wr(p_hwfn, p_ptt, - QM_REG_RLGLBLCRD + vport_id * 4, - QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, + QM_REG_RLGLBLCRD + vport_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); return 0; @@ -805,23 +979,27 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, bool is_tx_pq, u16 start_pq, u16 num_pqs) { u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; - u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; + u32 pq_mask = 0, last_pq, pq_id; + + last_pq = start_pq + num_pqs - 1; /* Set command's PQ type */ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); + /* Go over requested PQs */ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { /* Set PQ bit in mask (stop command only) */ if (!is_release_cmd) - pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); + pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH)); /* If last PQ or end of PQ mask, write command */ if ((pq_id == last_pq) || (pq_id % QM_STOP_PQ_MASK_WIDTH == (QM_STOP_PQ_MASK_WIDTH - 1))) { - QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, - PAUSE_MASK, pq_mask); - QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, + QM_CMD_SET_FIELD(cmd_arr, + QM_STOP_CMD, PAUSE_MASK, pq_mask); + QM_CMD_SET_FIELD(cmd_arr, + QM_STOP_CMD, GROUP_ID, pq_id / QM_STOP_PQ_MASK_WIDTH); if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, @@ -834,87 +1012,103 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, return true; } -static void -qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) -{ - if (enable) - set_bit(bit, var); - else - clear_bit(bit, var); -} +#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ + do { \ + typeof(var) *__p_var = &(var); \ + typeof(offset) __offset = offset; \ + *__p_var = (*__p_var & ~BIT(__offset)) | \ + ((enable) ? BIT(__offset) : 0); \ + } while (0) #define PRS_ETH_TUNN_FIC_FORMAT -188897008 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) { + /* Update PRS register */ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + + /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); + + /* Update PBF register */ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); } void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable) { - unsigned long reg_val = 0; + u32 reg_val; u8 shift; + /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, - PRS_ETH_TUNN_FIC_FORMAT); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_FIC_FORMAT); + /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, - vxlan_enable ? 1 : 0); + /* Update DORQ register */ + qed_wr(p_hwfn, + p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); } -void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable) { - unsigned long reg_val = 0; + u32 reg_val; u8 shift; + /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, - PRS_ETH_TUNN_FIC_FORMAT); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_FIC_FORMAT); + /* Update NIG register */ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, - eth_gre_enable ? 1 : 0); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, - ip_gre_enable ? 1 : 0); + /* Update DORQ registers */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); + qed_wr(p_hwfn, + p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); } void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port) { + /* Update PRS register */ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + + /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + + /* Update PBF register */ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); } @@ -922,32 +1116,39 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_geneve_enable, bool ip_geneve_enable) { - unsigned long reg_val = 0; + u32 reg_val; u8 shift; + /* Update PRS register */ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, eth_geneve_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable); shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; - qed_set_tunnel_type_enable_bit(®_val, shift, ip_geneve_enable); - + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable); qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); if (reg_val) - qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, - PRS_ETH_TUNN_FIC_FORMAT); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_FIC_FORMAT); + /* Update NIG register */ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, eth_geneve_enable ? 1 : 0); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); - /* EDPM with geneve tunnel not supported in BB_B0 */ + /* EDPM with geneve tunnel not supported in BB */ if (QED_IS_BB_B0(p_hwfn->cdev)) return; - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, + /* Update DORQ registers */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5, eth_geneve_enable ? 1 : 0); - qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5, ip_geneve_enable ? 1 : 0); } @@ -959,117 +1160,297 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, #define RAM_LINE_SIZE sizeof(u64) #define REG_SIZE sizeof(u32) -void qed_set_rfs_mode_disable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 pf_id) +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) { - u32 hw_addr = PRS_REG_GFT_PROFILE_MASK_RAM + - pf_id * RAM_LINE_SIZE; - - /*stop using gft logic */ + /* Disable gft search for PF */ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); - qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0); + + /* Clean ram & cam for next gft session */ + + /* Zero camline */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); - qed_wr(p_hwfn, p_ptt, hw_addr, 0); - qed_wr(p_hwfn, p_ptt, hw_addr + 4, 0); + + /* Zero ramline */ + qed_wr(p_hwfn, + p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, 0); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE, + 0); } -void qed_set_rfs_mode_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 pf_id, bool tcp, bool udp, - bool ipv4, bool ipv6) +void qed_set_gft_event_id_cm_hdr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - union gft_cam_line_union camline; - struct gft_ram_line ramline; u32 rfs_cm_hdr_event_id; + /* Set RFS event ID to be awakened i Tstorm By Prs */ rfs_cm_hdr_event_id = qed_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT); + rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR << + PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); +} + +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type) +{ + u32 reg_val, cam_line, ram_line_lo, ram_line_hi; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, - "set_rfs_mode_enable: must accept at least on of - ipv4 or ipv6"); + "gft_config: must accept at least on of - ipv4 or ipv6'\n"); if (!tcp && !udp) DP_NOTICE(p_hwfn, - "set_rfs_mode_enable: must accept at least on of - udp or tcp"); + "gft_config: must accept at least on of - udp or tcp\n"); + if (profile_type >= MAX_GFT_PROFILE_TYPE) + DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n"); - rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID << - PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; - rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR << - PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; - qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id); + /* Set RFS event ID to be awakened i Tstorm By Prs */ + reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); - /* Configure Registers for RFS mode */ - qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); + /* Do not load context only cid in PRS on match. */ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); - camline.cam_line_mapped.camline = 0; - /* Cam line is now valid!! */ - SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_VALID, 1); + /* Do not use tenant ID exist bit for gft search */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); - /* filters are per PF!! */ - SET_FIELD(camline.cam_line_mapped.camline, + /* Set Cam */ + cam_line = 0; + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); + + /* Filters are per PF!! */ + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK, GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); - SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + if (!(tcp && udp)) { - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); if (tcp) - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_TCP_PROTOCOL); else - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, GFT_PROFILE_UDP_PROTOCOL); } if (!(ipv4 && ipv6)) { - SET_FIELD(camline.cam_line_mapped.camline, - GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); if (ipv4) - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV4); else - SET_FIELD(camline.cam_line_mapped.camline, + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION, GFT_PROFILE_IPV6); } /* Write characteristics to cam */ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, - camline.cam_line_mapped.camline); - camline.cam_line_mapped.camline = qed_rd(p_hwfn, p_ptt, - PRS_REG_GFT_CAM + - CAM_LINE_SIZE * pf_id); + cam_line); + cam_line = + qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); /* Write line to RAM - compare to filter 4 tuple */ - ramline.lo = 0; - ramline.hi = 0; - SET_FIELD(ramline.hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ramline.hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ramline.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ramline.lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ramline.lo, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ramline.lo, GFT_RAM_LINE_DST_PORT, 1); - - /* Each iteration write to reg */ - qed_wr(p_hwfn, p_ptt, + ram_line_lo = 0; + ram_line_hi = 0; + + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } + + qed_wr(p_hwfn, + p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, - ramline.lo); - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + 4, - ramline.hi); + ram_line_lo); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id + REG_SIZE, + ram_line_hi); /* Set default profile so that no filter match will happen */ - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + - RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, - ramline.lo); - qed_wr(p_hwfn, p_ptt, - PRS_REG_GFT_PROFILE_MASK_RAM + - RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH + 4, - ramline.hi); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff); + qed_wr(p_hwfn, + p_ptt, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff); + + /* Enable gft search */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); +} + +DECLARE_CRC8_TABLE(cdu_crc8_table); + +/* Calculate and return CDU validation byte per connection type/region/cid */ +static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) +{ + const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; + u8 crc, validation_byte = 0; + static u8 crc8_table_valid; /* automatically initialized to 0 */ + u32 validation_string = 0; + u32 data_to_crc; + + if (!crc8_table_valid) { + crc8_populate_msb(cdu_crc8_table, 0x07); + crc8_table_valid = 1; + } + + /* The CRC is calculated on the String-to-compress: + * [31:8] = {CID[31:20],CID[11:0]} + * [7:4] = Region + * [3:0] = Type + */ + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) + validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); + + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) + validation_string |= ((region & 0xF) << 4); + + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) + validation_string |= (conn_type & 0xF); + + /* Convert to big-endian and calculate CRC8 */ + data_to_crc = be32_to_cpu(validation_string); + + crc = crc8(cdu_crc8_table, + (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); + + /* The validation byte [7:0] is composed: + * for type A validation + * [7] = active configuration bit + * [6:0] = crc[6:0] + * + * for type B validation + * [7] = active configuration bit + * [6:3] = connection_type[3:0] + * [2:0] = crc[2:0] + */ + validation_byte |= + ((validation_cfg >> + CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; + + if ((validation_cfg >> + CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) + validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); + else + validation_byte |= crc & 0x7F; + + return validation_byte; +} + +/* Calcualte and set validation bytes for session context */ +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + + p_ctx = (u8 * const)p_ctx_mem; + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + memset(p_ctx, 0, ctx_size); + + *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid); + *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid); + *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid); +} + +/* Calcualte and set validation bytes for task context */ +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid) +{ + u8 *p_ctx, *region1_val_ptr; + + p_ctx = (u8 * const)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + memset(p_ctx, 0, ctx_size); + + *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid); +} + +/* Memset session context to 0 while preserving validation bytes */ +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + u8 x_val, t_val, u_val; + + p_ctx = (u8 * const)p_ctx_mem; + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + x_val = *x_val_ptr; + t_val = *t_val_ptr; + u_val = *u_val_ptr; + + memset(p_ctx, 0, ctx_size); + + *x_val_ptr = x_val; + *t_val_ptr = t_val; + *u_val_ptr = u_val; +} + +/* Memset task context to 0 while preserving validation bytes */ +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) +{ + u8 *p_ctx, *region1_val_ptr; + u8 region1_val; + + p_ctx = (u8 * const)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + region1_val = *region1_val_ptr; + + memset(p_ctx, 0, ctx_size); + + *region1_val_ptr = region1_val; +} + +/* Enable and configure context validation */ +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 ctx_validation; + + /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; + qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); + + /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; + qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); + + /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; + qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index e3f368882f46..3bb76da6baa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, } /* init_ops callbacks entry point */ -static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct init_callback_op *p_cmd) +static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_callback_op *p_cmd) { - DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n"); + int rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return -EINVAL; + } + + return rc; } static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, @@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, break; case INIT_OP_CALLBACK: - qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 719cdbfe1695..d3eabcf9c86c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -59,10 +59,10 @@ struct qed_pi_info { }; struct qed_sb_sp_info { - struct qed_sb_info sb_info; + struct qed_sb_info sb_info; /* per protocol index data */ - struct qed_pi_info pi_info_arr[PIS_PER_SB]; + struct qed_pi_info pi_info_arr[PIS_PER_SB_E4]; }; enum qed_attention_type { @@ -82,7 +82,7 @@ struct aeu_invert_reg_bit { #define ATTENTION_LENGTH_SHIFT (4) #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ ATTENTION_LENGTH_SHIFT) -#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ ATTENTION_PARITY) @@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, if (IS_VF(p_hwfn->cdev)) return; - sb_offset = igu_sb_id * PIS_PER_SB; + sb_offset = igu_sb_id * PIS_PER_SB_E4; memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 5199634ed630..54b4ee0acfd7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); #define QED_SB_EVENT_MASK 0x0003 #define SB_ALIGNED_SIZE(p_hwfn) \ - ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) + ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn) #define QED_SB_INVALID_IDX 0xffff diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 813c77cc857f..c0d4a54a5edb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -62,22 +62,6 @@ #include "qed_sriov.h" #include "qed_reg_addr.h" -static int -qed_iscsi_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, - u16 echo, union event_ring_data *data, u8 fw_return_code) -{ - if (p_hwfn->p_iscsi_info->event_cb) { - struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; - - return p_iscsi->event_cb(p_iscsi->event_context, - fw_event_code, data); - } else { - DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); - return -EINVAL; - } -} - struct qed_iscsi_conn { struct list_head list_entry; bool free_on_delete; @@ -105,7 +89,7 @@ struct qed_iscsi_conn { u8 local_mac[6]; u8 remote_mac[6]; u16 vlan_id; - u8 tcp_flags; + u16 tcp_flags; u8 ip_version; u32 remote_ip[4]; u32 local_ip[4]; @@ -122,7 +106,6 @@ struct qed_iscsi_conn { u32 ss_thresh; u16 srtt; u16 rtt_var; - u32 ts_time; u32 ts_recent; u32 ts_recent_age; u32 total_rt; @@ -144,7 +127,6 @@ struct qed_iscsi_conn { u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; - u32 ts_ticks_per_second; u16 da_timeout_value; u8 ack_frequency; @@ -162,6 +144,22 @@ struct qed_iscsi_conn { }; static int +qed_iscsi_async_event(struct qed_hwfn *p_hwfn, + u8 fw_event_code, + u16 echo, union event_ring_data *data, u8 fw_return_code) +{ + if (p_hwfn->p_iscsi_info->event_cb) { + struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + + return p_iscsi->event_cb(p_iscsi->event_context, + fw_event_code, data); + } else { + DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); + return -EINVAL; + } +} + +static int qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_addr, @@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; - p_init->ooo_enable = p_params->ooo_enable; p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + p_params->ll2_ooo_queue_id; + p_init->func_params.log_page_size = p_params->log_page_size; val = p_params->num_tasks; p_init->func_params.num_tasks = cpu_to_le16(val); @@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); val = p_params->tx_sws_timer; p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); - p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; + p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt; p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_cb = async_event_cb; @@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, int rc = 0; u32 dval; u16 wval; - u8 i; u16 *p; + u8 i; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); - p_tcp->flags = p_conn->tcp_flags; + p_tcp->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp->ip_version = p_conn->ip_version; for (i = 0; i < 4; i++) { dval = p_conn->remote_ip[i]; @@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); - p_tcp2->flags = p_conn->tcp_flags; + p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags); p_tcp2->ip_version = p_conn->ip_version; for (i = 0; i < 4; i++) { @@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); + p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt; + p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval); } return qed_spq_post(p_hwfn, p_ent, NULL); @@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, } } -static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, - struct qed_iscsi_conn *p_conn) +static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn) { if (!p_conn->queue_cnts_virt_addr) goto nomem; @@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); if (!rc) - rc = qed_iscsi_setup_connection(p_hwfn, p_conn); + rc = qed_iscsi_setup_connection(p_conn); if (rc) { spin_lock_bh(&p_hwfn->p_iscsi_info->lock); @@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, con->ss_thresh = conn_info->ss_thresh; con->srtt = conn_info->srtt; con->rtt_var = conn_info->rtt_var; - con->ts_time = conn_info->ts_time; con->ts_recent = conn_info->ts_recent; con->ts_recent_age = conn_info->ts_recent_age; con->total_rt = conn_info->total_rt; @@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, con->mss = conn_info->mss; con->snd_wnd_scale = conn_info->snd_wnd_scale; con->rcv_wnd_scale = conn_info->rcv_wnd_scale; - con->ts_ticks_per_second = conn_info->ts_ticks_per_second; con->da_timeout_value = conn_info->da_timeout_value; con->ack_frequency = conn_info->ack_frequency; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 409041eab189..ca4a81dc1ace 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -64,14 +64,21 @@ struct mpa_v2_hdr { #define QED_IWARP_INVALID_TCP_CID 0xffffffff #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) -#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) +#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) #define TIMESTAMP_HEADER_SIZE (12) +#define QED_IWARP_MAX_FIN_RT_DEFAULT (2) #define QED_IWARP_TS_EN BIT(0) #define QED_IWARP_DA_EN BIT(1) #define QED_IWARP_PARAM_CRC_NEEDED (1) #define QED_IWARP_PARAM_P2P (1) +#define QED_IWARP_DEF_MAX_RT_TIME (0) +#define QED_IWARP_DEF_CWND_FACTOR (4) +#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) +#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ +#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ + static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u16 echo, union event_ring_data *data, @@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } -void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, - struct iwarp_init_func_params *p_ramrod) +void +qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_init_func_ramrod_data *p_ramrod) { - p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) + - p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + p_ramrod->iwarp.ll2_ooo_q_index = + RESC_START(p_hwfn, QED_LL2_QUEUE) + + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + + p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; + + return; } static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) @@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) tcp->ttl = 0x40; tcp->tos_or_tc = 0; + tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; + tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss; + tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; + tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT; + tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL; + tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->connect_mode = ep->connect_mode; @@ -807,6 +826,7 @@ static int qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; + struct qed_iwarp_info *iwarp_info; struct qed_sp_init_data init_data; dma_addr_t async_output_phys; struct qed_spq_entry *p_ent; @@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) p_mpa_ramrod->common.reject = 1; } + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; p_mpa_ramrod->mode = ep->mpa_rev; SET_FIELD(p_mpa_ramrod->rtr_pref, IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); @@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - ilog2(QED_IWARP_RCV_WND_SIZE_MIN); + iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index c1ecd743305f..b8f612d00241 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -95,6 +95,7 @@ struct qed_iwarp_info { spinlock_t iw_lock; /* for iwarp resources */ spinlock_t qp_lock; /* for teardown races */ u32 rcv_wnd_scale; + u16 rcv_wnd_size; u16 max_mtu; u8 mac_addr[ETH_ALEN]; u8 crc_needed; @@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_rdma_start_in_params *params); void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, - struct iwarp_init_func_params *p_ramrod); + struct iwarp_init_func_ramrod_data *p_ramrod); int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 085338990f49..893ef08a4b39 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -223,10 +223,9 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid; int rc; - p_cid = vmalloc(sizeof(*p_cid)); + p_cid = vzalloc(sizeof(*p_cid)); if (!p_cid) return NULL; - memset(p_cid, 0, sizeof(*p_cid)); p_cid->opaque_fid = opaque_fid; p_cid->cid = cid; @@ -1969,33 +1968,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev) _qed_get_vport_stats(cdev, cdev->reset_stats); } -static void -qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_arfs_config_params *p_cfg_params) +static enum gft_profile_type +qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) { - if (p_cfg_params->arfs_enable) { - qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, - p_cfg_params->tcp, p_cfg_params->udp, - p_cfg_params->ipv4, p_cfg_params->ipv6); - DP_VERBOSE(p_hwfn, QED_MSG_SP, - "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", + if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) + return GFT_PROFILE_TYPE_4_TUPLE; + if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) + return GFT_PROFILE_TYPE_IP_DST_PORT; + return GFT_PROFILE_TYPE_L4_DST_PORT; +} + +void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params) +{ + if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { + qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_cfg_params->tcp, + p_cfg_params->udp, + p_cfg_params->ipv4, + p_cfg_params->ipv6, + qed_arfs_mode_to_hsi(p_cfg_params->mode)); + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", p_cfg_params->tcp ? "Enable" : "Disable", p_cfg_params->udp ? "Enable" : "Disable", p_cfg_params->ipv4 ? "Enable" : "Disable", - p_cfg_params->ipv6 ? "Enable" : "Disable"); + p_cfg_params->ipv6 ? "Enable" : "Disable", + (u32)p_cfg_params->mode); } else { - qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n"); + qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); } - - DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n", - p_cfg_params->arfs_enable ? "Enable" : "Disable"); } -static int -qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, +int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_cb, - dma_addr_t p_addr, u16 length, u16 qid, - u8 vport_id, bool b_is_add) + struct qed_ntuple_filter_params *p_params) { struct rx_update_gft_filter_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -2004,13 +2015,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 abs_vport_id = 0; int rc = -EINVAL; - rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) return rc; - rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); - if (rc) - return rc; + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id); + if (rc) + return rc; + } /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -2032,17 +2045,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; p_ramrod = &p_ent->ramrod.rx_update_gft; - DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr); - p_ramrod->pkt_hdr_length = cpu_to_le16(length); - p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); - p_ramrod->vport_id = abs_vport_id; - p_ramrod->filter_type = RFS_FILTER_TYPE; - p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; + + DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); + p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); + + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + } + + p_ramrod->flow_id_valid = 0; + p_ramrod->flow_id = 0; + + p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); + p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER + : GFT_DELETE_FILTER; DP_VERBOSE(p_hwfn, QED_MSG_SP, "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", abs_vport_id, abs_rx_q_id, - b_is_add ? "Adding" : "Removing", (u64)p_addr, length); + p_params->b_is_add ? "Adding" : "Removing", + (u64)p_params->addr, p_params->length); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -2743,7 +2766,8 @@ static int qed_configure_filter(struct qed_dev *cdev, } } -static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) +static int qed_configure_arfs_searcher(struct qed_dev *cdev, + enum qed_filter_config_mode mode) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_arfs_config_params arfs_config_params; @@ -2753,8 +2777,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) arfs_config_params.udp = true; arfs_config_params.ipv4 = true; arfs_config_params.ipv6 = true; - arfs_config_params.arfs_enable = en_searcher; - + arfs_config_params.mode = mode; qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, &arfs_config_params); return 0; @@ -2762,8 +2785,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) static void qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, - void *cookie, union event_ring_data *data, - u8 fw_return_code) + void *cookie, + union event_ring_data *data, u8 fw_return_code) { struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; void *dev = p_hwfn->cdev->ops_cookie; @@ -2771,10 +2794,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, op->arfs_filter_op(dev, cookie, fw_return_code); } -static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, - dma_addr_t mapping, u16 length, - u16 vport_id, u16 rx_queue_id, - bool add_filter) +static int +qed_ntuple_arfs_filter_config(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_spq_comp_cb cb; @@ -2783,9 +2806,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, cb.function = qed_arfs_sp_response_handler; cb.cookie = cookie; - rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, - &cb, mapping, length, rx_queue_id, - vport_id, add_filter); + if (params->b_is_vf) { + if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false, + false)) { + DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n", + params->vf_id); + return rc; + } + + params->vport_id = params->vf_id + 1; + params->qid = QED_RFS_NTUPLE_QID_RSS; + } + + rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params); if (rc) DP_NOTICE(p_hwfn, "Failed to issue a-RFS filter configuration\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index cc1f248551c9..c4030e949cce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -190,7 +190,7 @@ struct qed_arfs_config_params { bool udp; bool ipv4; bool ipv6; - bool arfs_enable; + enum qed_filter_config_mode mode; }; struct qed_sp_vport_update_params { @@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); +/** + * *@brief qed_arfs_mode_configure - + * + **Enable or disable rfs mode. It must accept atleast one of tcp or udp true + **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * + **@param p_hwfn + **@param p_ptt + **@param p_cfg_params - arfs mode configuration parameters. + * + */ +void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params); + +/** + * @brief - qed_configure_rfs_ntuple_filter + * + * This ramrod should be used to add or remove arfs hw filter + * + * @params p_hwfn + * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @params p_params + */ +int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, + struct qed_spq_comp_cb *p_cb, + struct qed_ntuple_filter_params *p_params); + #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) #define QED_QUEUE_CID_SELF (0xff) diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 047f556ca62e..c4f14fdc4e77 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; + data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id); + + data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp); } static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, @@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, qed_chain_get_pbl_phys(&p_rx->rcq_chain)); p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; - p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; + p_ramrod->inner_vlan_stripping_en = + p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; @@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); - p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? - CORE_TX_DEST_NW : CORE_TX_DEST_LB; + switch (data->input.tx_dest) { + case QED_LL2_TX_DEST_NW: + p_ll2_info->tx_dest = CORE_TX_DEST_NW; + break; + case QED_LL2_TX_DEST_LB: + p_ll2_info->tx_dest = CORE_TX_DEST_LB; + break; + case QED_LL2_TX_DEST_DROP: + p_ll2_info->tx_dest = CORE_TX_DEST_DROP; + break; + default: + return -EINVAL; + } + if (data->input.conn_type == QED_LL2_TYPE_OOO || data->input.secondary_queue) p_ll2_info->main_func_queue = false; @@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) goto release_terminate; } - if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && - cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) { + if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); rc = qed_ll2_start_ooo(cdev, params); if (rc) { @@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev) qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); eth_zero_addr(cdev->ll2_mac_address); - if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && - cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) + if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) qed_ll2_stop_ooo(cdev); rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 8b99c7d26f34..6f46cb11f349 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MSG_CODE_NVM_READ_NVRAM, addr + offset + (bytes_to_copy << - DRV_MB_PARAM_NVM_LEN_SHIFT), + DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, (u32 *)(p_buf + offset)); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index c8c4b3940564..bdc46f11ce45 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { qed_iwarp_init_fw_ramrod(p_hwfn, - &p_ent->ramrod.iwarp_init_func.iwarp); + &p_ent->ramrod.iwarp_init_func); p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; } else { p_ramrod = &p_ent->ramrod.roce_init_func.rdma; diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 0cdb4337b3a0..f7122059b6b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -124,6 +124,8 @@ 0x1f0434UL #define PRS_REG_SEARCH_TAG1 \ 0x1f0444UL +#define PRS_REG_SEARCH_TENANT_ID \ + 0x1f044cUL #define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ 0x1f0a0cUL #define PRS_REG_SEARCH_TCP_FIRST_FRAG \ @@ -200,7 +202,13 @@ 0x2e8800UL #define CCFC_REG_STRONG_ENABLE_VF \ 0x2e070cUL -#define CDU_REG_CID_ADDR_PARAMS \ +#define CDU_REG_CCFC_CTX_VALID0 \ + 0x580400UL +#define CDU_REG_CCFC_CTX_VALID1 \ + 0x580404UL +#define CDU_REG_TCFC_CTX_VALID0 \ + 0x580408UL +#define CDU_REG_CID_ADDR_PARAMS \ 0x580900UL #define DBG_REG_CLIENT_ENABLE \ 0x010004UL @@ -564,7 +572,7 @@ #define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL #define PRS_REG_GRE_PROTOCOL 0x1f0734UL #define PRS_REG_VXLAN_PORT 0x1f0738UL -#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL #define NIG_REG_ENC_TYPE_ENABLE 0x501058UL #define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) @@ -580,11 +588,11 @@ #define PRS_REG_NGE_PORT 0x1f086cUL #define NIG_REG_NGE_PORT 0x508b38UL -#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL -#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL -#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL -#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL #define NIG_REG_NGE_IP_ENABLE 0x508b28UL #define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL @@ -595,15 +603,15 @@ #define QM_REG_WFQPFWEIGHT 0x2f4e80UL #define QM_REG_WFQVPWEIGHT 0x2fa000UL -#define PGLCS_REG_DBG_SELECT_K2 \ +#define PGLCS_REG_DBG_SELECT_K2_E5 \ 0x001d14UL -#define PGLCS_REG_DBG_DWORD_ENABLE_K2 \ +#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x001d18UL -#define PGLCS_REG_DBG_SHIFT_K2 \ +#define PGLCS_REG_DBG_SHIFT_K2_E5 \ 0x001d1cUL -#define PGLCS_REG_DBG_FORCE_VALID_K2 \ +#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \ 0x001d20UL -#define PGLCS_REG_DBG_FORCE_FRAME_K2 \ +#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \ 0x001d24UL #define MISC_REG_RESET_PL_PDA_VMAIN_1 \ 0x008070UL @@ -615,7 +623,7 @@ 0x009050UL #define MISCS_REG_RESET_PL_HV \ 0x009060UL -#define MISCS_REG_RESET_PL_HV_2_K2 \ +#define MISCS_REG_RESET_PL_HV_2_K2_E5 \ 0x009150UL #define DMAE_REG_DBG_SELECT \ 0x00c510UL @@ -647,15 +655,15 @@ 0x0500b0UL #define GRC_REG_DBG_FORCE_FRAME \ 0x0500b4UL -#define UMAC_REG_DBG_SELECT_K2 \ +#define UMAC_REG_DBG_SELECT_K2_E5 \ 0x051094UL -#define UMAC_REG_DBG_DWORD_ENABLE_K2 \ +#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x051098UL -#define UMAC_REG_DBG_SHIFT_K2 \ +#define UMAC_REG_DBG_SHIFT_K2_E5 \ 0x05109cUL -#define UMAC_REG_DBG_FORCE_VALID_K2 \ +#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \ 0x0510a0UL -#define UMAC_REG_DBG_FORCE_FRAME_K2 \ +#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \ 0x0510a4UL #define MCP2_REG_DBG_SELECT \ 0x052400UL @@ -717,15 +725,15 @@ 0x1f0ba0UL #define PRS_REG_DBG_FORCE_FRAME \ 0x1f0ba4UL -#define CNIG_REG_DBG_SELECT_K2 \ +#define CNIG_REG_DBG_SELECT_K2_E5 \ 0x218254UL -#define CNIG_REG_DBG_DWORD_ENABLE_K2 \ +#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x218258UL -#define CNIG_REG_DBG_SHIFT_K2 \ +#define CNIG_REG_DBG_SHIFT_K2_E5 \ 0x21825cUL -#define CNIG_REG_DBG_FORCE_VALID_K2 \ +#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \ 0x218260UL -#define CNIG_REG_DBG_FORCE_FRAME_K2 \ +#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \ 0x218264UL #define PRM_REG_DBG_SELECT \ 0x2306a8UL @@ -997,35 +1005,35 @@ 0x580710UL #define CDU_REG_DBG_FORCE_FRAME \ 0x580714UL -#define WOL_REG_DBG_SELECT_K2 \ +#define WOL_REG_DBG_SELECT_K2_E5 \ 0x600140UL -#define WOL_REG_DBG_DWORD_ENABLE_K2 \ +#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x600144UL -#define WOL_REG_DBG_SHIFT_K2 \ +#define WOL_REG_DBG_SHIFT_K2_E5 \ 0x600148UL -#define WOL_REG_DBG_FORCE_VALID_K2 \ +#define WOL_REG_DBG_FORCE_VALID_K2_E5 \ 0x60014cUL -#define WOL_REG_DBG_FORCE_FRAME_K2 \ +#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \ 0x600150UL -#define BMBN_REG_DBG_SELECT_K2 \ +#define BMBN_REG_DBG_SELECT_K2_E5 \ 0x610140UL -#define BMBN_REG_DBG_DWORD_ENABLE_K2 \ +#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x610144UL -#define BMBN_REG_DBG_SHIFT_K2 \ +#define BMBN_REG_DBG_SHIFT_K2_E5 \ 0x610148UL -#define BMBN_REG_DBG_FORCE_VALID_K2 \ +#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \ 0x61014cUL -#define BMBN_REG_DBG_FORCE_FRAME_K2 \ +#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \ 0x610150UL -#define NWM_REG_DBG_SELECT_K2 \ +#define NWM_REG_DBG_SELECT_K2_E5 \ 0x8000ecUL -#define NWM_REG_DBG_DWORD_ENABLE_K2 \ +#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x8000f0UL -#define NWM_REG_DBG_SHIFT_K2 \ +#define NWM_REG_DBG_SHIFT_K2_E5 \ 0x8000f4UL -#define NWM_REG_DBG_FORCE_VALID_K2 \ +#define NWM_REG_DBG_FORCE_VALID_K2_E5 \ 0x8000f8UL -#define NWM_REG_DBG_FORCE_FRAME_K2\ +#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \ 0x8000fcUL #define PBF_REG_DBG_SELECT \ 0xd80060UL @@ -1247,36 +1255,76 @@ 0x1901534UL #define USEM_REG_DBG_FORCE_FRAME \ 0x1901538UL -#define NWS_REG_DBG_SELECT_K2 \ +#define NWS_REG_DBG_SELECT_K2_E5 \ 0x700128UL -#define NWS_REG_DBG_DWORD_ENABLE_K2 \ +#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x70012cUL -#define NWS_REG_DBG_SHIFT_K2 \ +#define NWS_REG_DBG_SHIFT_K2_E5 \ 0x700130UL -#define NWS_REG_DBG_FORCE_VALID_K2 \ +#define NWS_REG_DBG_FORCE_VALID_K2_E5 \ 0x700134UL -#define NWS_REG_DBG_FORCE_FRAME_K2 \ +#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \ 0x700138UL -#define MS_REG_DBG_SELECT_K2 \ +#define MS_REG_DBG_SELECT_K2_E5 \ 0x6a0228UL -#define MS_REG_DBG_DWORD_ENABLE_K2 \ +#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \ 0x6a022cUL -#define MS_REG_DBG_SHIFT_K2 \ +#define MS_REG_DBG_SHIFT_K2_E5 \ 0x6a0230UL -#define MS_REG_DBG_FORCE_VALID_K2 \ +#define MS_REG_DBG_FORCE_VALID_K2_E5 \ 0x6a0234UL -#define MS_REG_DBG_FORCE_FRAME_K2 \ +#define MS_REG_DBG_FORCE_FRAME_K2_E5 \ 0x6a0238UL -#define PCIE_REG_DBG_COMMON_SELECT_K2 \ +#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \ 0x054398UL -#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2 \ +#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \ 0x05439cUL -#define PCIE_REG_DBG_COMMON_SHIFT_K2 \ +#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \ 0x0543a0UL -#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2 \ +#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \ 0x0543a4UL -#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2 \ +#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \ 0x0543a8UL +#define PTLD_REG_DBG_SELECT_E5 \ + 0x5a1600UL +#define PTLD_REG_DBG_DWORD_ENABLE_E5 \ + 0x5a1604UL +#define PTLD_REG_DBG_SHIFT_E5 \ + 0x5a1608UL +#define PTLD_REG_DBG_FORCE_VALID_E5 \ + 0x5a160cUL +#define PTLD_REG_DBG_FORCE_FRAME_E5 \ + 0x5a1610UL +#define YPLD_REG_DBG_SELECT_E5 \ + 0x5c1600UL +#define YPLD_REG_DBG_DWORD_ENABLE_E5 \ + 0x5c1604UL +#define YPLD_REG_DBG_SHIFT_E5 \ + 0x5c1608UL +#define YPLD_REG_DBG_FORCE_VALID_E5 \ + 0x5c160cUL +#define YPLD_REG_DBG_FORCE_FRAME_E5 \ + 0x5c1610UL +#define RGSRC_REG_DBG_SELECT_E5 \ + 0x320040UL +#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \ + 0x320044UL +#define RGSRC_REG_DBG_SHIFT_E5 \ + 0x320048UL +#define RGSRC_REG_DBG_FORCE_VALID_E5 \ + 0x32004cUL +#define RGSRC_REG_DBG_FORCE_FRAME_E5 \ + 0x320050UL +#define TGSRC_REG_DBG_SELECT_E5 \ + 0x322040UL +#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \ + 0x322044UL +#define TGSRC_REG_DBG_SHIFT_E5 \ + 0x322048UL +#define TGSRC_REG_DBG_FORCE_VALID_E5 \ + 0x32204cUL +#define TGSRC_REG_DBG_FORCE_FRAME_E5 \ + 0x322050UL #define MISC_REG_RESET_PL_UA \ 0x008050UL #define MISC_REG_RESET_PL_HV \ @@ -1415,7 +1463,7 @@ 0x1940000UL #define SEM_FAST_REG_INT_RAM \ 0x020000UL -#define SEM_FAST_REG_INT_RAM_SIZE \ +#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \ 20480 #define GRC_REG_TRACE_FIFO_VALID_DATA \ 0x050064UL @@ -1433,6 +1481,8 @@ 0x340800UL #define BRB_REG_BIG_RAM_DATA \ 0x341500UL +#define BRB_REG_BIG_RAM_DATA_SIZE \ + 64 #define SEM_FAST_REG_STALL_0_BB_K2 \ 0x000488UL #define SEM_FAST_REG_STALLED \ @@ -1451,7 +1501,7 @@ 0x238c30UL #define MISCS_REG_BLOCK_256B_EN \ 0x009074UL -#define MCP_REG_SCRATCH_SIZE \ +#define MCP_REG_SCRATCH_SIZE_BB_K2 \ 57344 #define MCP_REG_CPU_REG_FILE \ 0xe05200UL @@ -1485,35 +1535,35 @@ 0x008c14UL #define NWS_REG_NWS_CMU_K2 \ 0x720000UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \ 0x000680UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \ 0x000684UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \ 0x0006c0UL -#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \ 0x0006c4UL -#define MS_REG_MS_CMU_K2 \ +#define MS_REG_MS_CMU_K2_E5 \ 0x6a4000UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ 0x000208UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ 0x00020cUL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ 0x000210UL -#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ 0x000214UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \ 0x000208UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \ 0x00020cUL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \ 0x000210UL -#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \ 0x000214UL -#define PHY_PCIE_REG_PHY0_K2 \ +#define PHY_PCIE_REG_PHY0_K2_E5 \ 0x620000UL -#define PHY_PCIE_REG_PHY1_K2 \ +#define PHY_PCIE_REG_PHY1_K2_E5 \ 0x624000UL #define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL #define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index a1d33f35aad3..5e927b6cac22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); p_ramrod->mf_mode = MF_NPAR; } - p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; + + p_ramrod->outer_tag_config.outer_tag.tci = + cpu_to_le16(p_hwfn->hw_info.ovlan); /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, @@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", - sb, sb_index, p_ramrod->outer_tag); + "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", + sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci); rc = qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index be48d9abd001..217b62a3f587 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq) { - struct core_conn_context *p_cxt; + struct e4_core_conn_context *p_cxt; struct qed_cxt_info cxt_info; u16 physical_q; int rc; @@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt = cxt_info.p_cxt; SET_FIELD(p_cxt->xstorm_ag_context.flags10, - XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags1, - XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); SET_FIELD(p_cxt->xstorm_ag_context.flags9, - XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); /* QM physical queue */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 3f40b1de7957..5acb91b3564c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } -static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, - int rel_vf_id, - bool b_enabled_only, bool b_non_malicious) +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious) { if (!p_hwfn->pf_iov_info) { DP_NOTICE(p_hwfn->cdev, "No iov info\n"); @@ -1621,7 +1621,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, /* fill in pfdev info */ pfdev_info->chip_num = p_hwfn->cdev->chip_num; pfdev_info->db_size = 0; - pfdev_info->indices_per_sb = PIS_PER_SB; + pfdev_info->indices_per_sb = PIS_PER_SB_E4; pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; @@ -3582,11 +3582,11 @@ static int qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) { - u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; + u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4]; int i, cnt; /* Read initial consumers & producers */ - for (i = 0; i < MAX_NUM_VOQS; i++) { + for (i = 0; i < MAX_NUM_VOQS_E4; i++) { u32 prod; cons[i] = qed_rd(p_hwfn, p_ptt, @@ -3601,7 +3601,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, /* Wait for consumers to pass the producers */ i = 0; for (cnt = 0; cnt < 50; cnt++) { - for (; i < MAX_NUM_VOQS; i++) { + for (; i < MAX_NUM_VOQS_E4; i++) { u32 tmp; tmp = qed_rd(p_hwfn, p_ptt, @@ -3611,7 +3611,7 @@ qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, break; } - if (i == MAX_NUM_VOQS) + if (i == MAX_NUM_VOQS_E4) break; msleep(20); @@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, int vfid, int val) { + struct qed_mcp_link_state *p_link; struct qed_vf_info *vf; u8 abs_vp_id = 0; int rc; @@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, if (rc) return rc; - return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); + p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; + + return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val, + p_link->speed); } static int diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 3955929ba892..9a8fd79611f2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -274,6 +274,23 @@ enum qed_iov_wq_flag { #ifdef CONFIG_QED_SRIOV /** + * @brief Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled VF id is valid + * else any VF id less than max_vfs is valid + * + * @param p_hwfn + * @param rel_vf_id - Relative VF ID + * @param b_enabled_only - consider only enabled VF + * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * + * @return bool - true for valid VF ID + */ +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious); + +/** * @brief - Given a VF index, return index of next [including that] active VF. * * @param p_hwfn @@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev); int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); void qed_inform_vf_link_state(struct qed_hwfn *hwfn); #else +static inline bool +qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, bool b_enabled_only, bool b_non_malicious) +{ + return false; +} + static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) { diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 8a336517baac..9935978c5542 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -40,6 +40,7 @@ #include <linux/kernel.h> #include <linux/mutex.h> #include <linux/bpf.h> +#include <net/xdp.h> #include <linux/qed/qede_rdma.h> #include <linux/io.h> #ifdef CONFIG_RFS_ACCEL @@ -52,9 +53,9 @@ #include <linux/qed/qed_eth_if.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 10 -#define QEDE_REVISION_VERSION 10 -#define QEDE_ENGINEERING_VERSION 21 +#define QEDE_MINOR_VERSION 33 +#define QEDE_REVISION_VERSION 0 +#define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_REVISION_VERSION) "." \ @@ -345,6 +346,7 @@ struct qede_rx_queue { u64 xdp_no_pass; void *handle; + struct xdp_rxq_info xdp_rxq; }; union db_prod { diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index 77aa826227e5..6687e04d1558 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, u16 rxq_id, bool add_fltr) { const struct qed_eth_ops *op = edev->ops; + struct qed_ntuple_filter_params params; if (n->used) return; + memset(¶ms, 0, sizeof(params)); + + params.addr = n->mapping; + params.length = n->buf_len; + params.qid = rxq_id; + params.b_is_add = add_fltr; + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", add_fltr ? "Adding" : "Deleting", @@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev, n->used = true; n->filter_op = add_fltr; - op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0, - rxq_id, add_fltr); + op->ntuple_filter_config(edev->cdev, n, ¶ms); } static void @@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, edev->arfs->filter_count++; if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - edev->ops->configure_arfs_searcher(edev->cdev, true); + enum qed_filter_config_mode mode; + + mode = QED_FILTER_CONFIG_MODE_5_TUPLE; + edev->ops->configure_arfs_searcher(edev->cdev, mode); edev->arfs->enable = true; } @@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, edev->arfs->filter_count--; if (!edev->arfs->filter_count && edev->arfs->enable) { + enum qed_filter_config_mode mode; + + mode = QED_FILTER_CONFIG_MODE_DISABLE; edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, false); + edev->ops->configure_arfs_searcher(edev->cdev, mode); } } @@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) if (!edev->arfs->filter_count) { if (edev->arfs->enable) { + enum qed_filter_config_mode mode; + + mode = QED_FILTER_CONFIG_MODE_DISABLE; edev->arfs->enable = false; - edev->ops->configure_arfs_searcher(edev->cdev, false); + edev->ops->configure_arfs_searcher(edev->cdev, mode); } #ifdef CONFIG_RFS_ACCEL } else { diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 48ec4c56cddf..dafc079ab6b9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1006,6 +1006,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, xdp.data = xdp.data_hard_start + *data_offset; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + *len; + xdp.rxq = &rxq->xdp_rxq; /* Queues always have a full reset currently, so for the time * being until there's atomic program replace just mark read diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 90d79ae2a48f..2db70eabddfe 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -765,6 +765,12 @@ static void qede_free_fp_array(struct qede_dev *edev) fp = &edev->fp_array[i]; kfree(fp->sb_info); + /* Handle mem alloc failure case where qede_init_fp + * didn't register xdp_rxq_info yet. + * Implicit only (fp->type & QEDE_FASTPATH_RX) + */ + if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq)) + xdp_rxq_info_unreg(&fp->rxq->xdp_rxq); kfree(fp->rxq); kfree(fp->xdp_tx); kfree(fp->txq); @@ -1147,7 +1153,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, static int qede_alloc_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int rc; @@ -1493,6 +1499,10 @@ static void qede_init_fp(struct qede_dev *edev) else fp->rxq->data_direction = DMA_FROM_DEVICE; fp->rxq->dev = &edev->pdev->dev; + + /* Driver have no error path from here */ + WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, + fp->rxq->rxq_id) < 0); } if (fp->type & QEDE_FASTPATH_TX) { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index cedacddf50fb..7e7704daf5f1 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -143,7 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - int ingress_format = RMNET_INGRESS_FORMAT_DEAGGREGATION; + u32 data_format = RMNET_INGRESS_FORMAT_DEAGGREGATION; struct net_device *real_dev; int mode = RMNET_EPMODE_VND; struct rmnet_endpoint *ep; @@ -185,11 +185,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, struct ifla_vlan_flags *flags; flags = nla_data(data[IFLA_VLAN_FLAGS]); - ingress_format = flags->flags & flags->mask; + data_format = flags->flags & flags->mask; } - netdev_dbg(dev, "data format [ingress 0x%08X]\n", ingress_format); - port->ingress_data_format = ingress_format; + netdev_dbg(dev, "data format [0x%08X]\n", data_format); + port->data_format = data_format; return 0; @@ -353,7 +353,7 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], struct ifla_vlan_flags *flags; flags = nla_data(data[IFLA_VLAN_FLAGS]); - port->ingress_data_format = flags->flags & flags->mask; + port->data_format = flags->flags & flags->mask; } return 0; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 2ea9fe326571..00e4634100d3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -32,7 +32,7 @@ struct rmnet_endpoint { */ struct rmnet_port { struct net_device *dev; - u32 ingress_data_format; + u32 data_format; u8 nr_rmnet_devs; u8 rmnet_mode; struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 05539321ba3a..601edec28c5f 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -16,6 +16,7 @@ #include <linux/netdevice.h> #include <linux/netdev_features.h> #include <linux/if_arp.h> +#include <net/sock.h> #include "rmnet_private.h" #include "rmnet_config.h" #include "rmnet_vnd.h" @@ -65,19 +66,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_endpoint *ep; + u16 len, pad; u8 mux_id; - u16 len; if (RMNET_MAP_GET_CD_BIT(skb)) { - if (port->ingress_data_format - & RMNET_INGRESS_FORMAT_MAP_COMMANDS) + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS) return rmnet_map_command(skb, port); goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); - len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); + pad = RMNET_MAP_GET_PAD(skb); + len = RMNET_MAP_GET_LENGTH(skb) - pad; if (mux_id >= RMNET_MAX_LOGICAL_EP) goto free_skb; @@ -90,8 +91,14 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); - skb_trim(skb, len); rmnet_set_skb_proto(skb); + + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { + if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + skb_trim(skb, len); rmnet_deliver_skb(skb); return; @@ -114,8 +121,8 @@ rmnet_map_ingress_handler(struct sk_buff *skb, skb_push(skb, ETH_HLEN); } - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { - while ((skbn = rmnet_map_deaggregate(skb)) != NULL) + if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { + while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); @@ -134,19 +141,24 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, additional_header_len = 0; required_headroom = sizeof(struct rmnet_map_header); + if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) { + additional_header_len = sizeof(struct rmnet_map_ul_csum_header); + required_headroom += additional_header_len; + } + if (skb_headroom(skb) < required_headroom) { if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) goto fail; } + if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) + rmnet_map_checksum_uplink_packet(skb, orig_dev); + map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); if (!map_header) goto fail; - if (mux_id == 0xff) - map_header->mux_id = 0; - else - map_header->mux_id = mux_id; + map_header->mux_id = mux_id; skb->protocol = htons(ETH_P_MAP); @@ -208,6 +220,8 @@ void rmnet_egress_handler(struct sk_buff *skb) struct rmnet_priv *priv; u8 mux_id; + sk_pacing_shift_update(skb->sk, 8); + orig_dev = skb->dev; priv = netdev_priv(orig_dev); skb->dev = priv->real_dev; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index 4df359de28c5..6ce31e29136d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -47,6 +47,22 @@ struct rmnet_map_header { u16 pkt_len; } __aligned(1); +struct rmnet_map_dl_csum_trailer { + u8 reserved1; + u8 valid:1; + u8 reserved2:7; + u16 csum_start_offset; + u16 csum_length; + __be16 csum_value; +} __aligned(1); + +struct rmnet_map_ul_csum_header { + __be16 csum_start_offset; + u16 csum_insert_offset:14; + u16 udp_ip4_ind:1; + u16 csum_enabled:1; +} __aligned(1); + #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \ (Y)->data)->mux_id) #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \ @@ -67,10 +83,13 @@ struct rmnet_map_header { #define RMNET_MAP_NO_PAD_BYTES 0 #define RMNET_MAP_ADD_PAD_BYTES 1 -u8 rmnet_map_demultiplex(struct sk_buff *skb); -struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port); struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, int hdrlen, int pad); void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len); +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev); #endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 51e604923ac1..6bc328fb88e1 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -58,11 +58,24 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, } static void rmnet_map_send_ack(struct sk_buff *skb, - unsigned char type) + unsigned char type, + struct rmnet_port *port) { struct rmnet_map_control_command *cmd; int xmit_status; + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { + if (skb->len < sizeof(struct rmnet_map_header) + + RMNET_MAP_GET_LENGTH(skb) + + sizeof(struct rmnet_map_dl_csum_trailer)) { + kfree_skb(skb); + return; + } + + skb_trim(skb, skb->len - + sizeof(struct rmnet_map_dl_csum_trailer)); + } + skb->protocol = htons(ETH_P_MAP); cmd = RMNET_MAP_GET_CMD_START(skb); @@ -100,5 +113,5 @@ void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) break; } if (rc == RMNET_MAP_COMMAND_ACK) - rmnet_map_send_ack(skb, rc); + rmnet_map_send_ack(skb, rc, port); } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c index 86b8c758f94e..c74a6c56d315 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c @@ -14,6 +14,9 @@ */ #include <linux/netdevice.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/ip6_checksum.h> #include "rmnet_config.h" #include "rmnet_map.h" #include "rmnet_private.h" @@ -21,6 +24,233 @@ #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) +static __sum16 *rmnet_map_get_csum_field(unsigned char protocol, + const void *txporthdr) +{ + __sum16 *check = NULL; + + switch (protocol) { + case IPPROTO_TCP: + check = &(((struct tcphdr *)txporthdr)->check); + break; + + case IPPROTO_UDP: + check = &(((struct udphdr *)txporthdr)->check); + break; + + default: + check = NULL; + break; + } + + return check; +} + +static int +rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer) +{ + __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum; + u16 csum_value, csum_value_final; + struct iphdr *ip4h; + void *txporthdr; + __be16 addend; + + ip4h = (struct iphdr *)(skb->data); + if ((ntohs(ip4h->frag_off) & IP_MF) || + ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) + return -EOPNOTSUPP; + + txporthdr = skb->data + ip4h->ihl * 4; + + csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr); + + if (!csum_field) + return -EPROTONOSUPPORT; + + /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */ + if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) + return 0; + + csum_value = ~ntohs(csum_trailer->csum_value); + hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl); + ip_payload_csum = csum16_sub((__force __sum16)csum_value, + (__force __be16)hdr_csum); + + pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr, + ntohs(ip4h->tot_len) - ip4h->ihl * 4, + ip4h->protocol, 0); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip4h->protocol) { + case IPPROTO_UDP: + /* RFC 768 - DL4 1's complement rule for UDP csum 0 */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL4 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) + return 0; + else + return -EINVAL; +} + +#if IS_ENABLED(CONFIG_IPV6) +static int +rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, + struct rmnet_map_dl_csum_trailer *csum_trailer) +{ + __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp; + u16 csum_value, csum_value_final; + __be16 ip6_hdr_csum, addend; + struct ipv6hdr *ip6h; + void *txporthdr; + u32 length; + + ip6h = (struct ipv6hdr *)(skb->data); + + txporthdr = skb->data + sizeof(struct ipv6hdr); + csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr); + + if (!csum_field) + return -EPROTONOSUPPORT; + + csum_value = ~ntohs(csum_trailer->csum_value); + ip6_hdr_csum = (__force __be16) + ~ntohs((__force __be16)ip_compute_csum(ip6h, + (int)(txporthdr - (void *)(skb->data)))); + ip6_payload_csum = csum16_sub((__force __sum16)csum_value, + ip6_hdr_csum); + + length = (ip6h->nexthdr == IPPROTO_UDP) ? + ntohs(((struct udphdr *)txporthdr)->len) : + ntohs(ip6h->payload_len); + pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + length, ip6h->nexthdr, 0)); + addend = (__force __be16)ntohs((__force __be16)pseudo_csum); + pseudo_csum = csum16_add(ip6_payload_csum, addend); + + addend = (__force __be16)ntohs((__force __be16)*csum_field); + csum_temp = ~csum16_sub(pseudo_csum, addend); + csum_value_final = (__force u16)csum_temp; + + if (unlikely(csum_value_final == 0)) { + switch (ip6h->nexthdr) { + case IPPROTO_UDP: + /* RFC 2460 section 8.1 + * DL6 One's complement rule for UDP checksum 0 + */ + csum_value_final = ~csum_value_final; + break; + + case IPPROTO_TCP: + /* DL6 Non-RFC compliant TCP checksum found */ + if (*csum_field == (__force __sum16)0xFFFF) + csum_value_final = ~csum_value_final; + break; + } + } + + if (csum_value_final == ntohs((__force __be16)*csum_field)) + return 0; + else + return -EINVAL; +} +#endif + +static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + void *txphdr; + u16 *csum; + + txphdr = iphdr + ip4h->ihl * 4; + + if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv4_ul_csum_header(void *iphdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + struct iphdr *ip4h = (struct iphdr *)iphdr; + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)iphdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + if (ip4h->protocol == IPPROTO_UDP) + ul_header->udp_ip4_ind = 1; + else + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr); +} + +#if IS_ENABLED(CONFIG_IPV6) +static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr) +{ + struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr; + void *txphdr; + u16 *csum; + + txphdr = ip6hdr + sizeof(struct ipv6hdr); + + if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) { + csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr); + *csum = ~(*csum); + } +} + +static void +rmnet_map_ipv6_ul_csum_header(void *ip6hdr, + struct rmnet_map_ul_csum_header *ul_header, + struct sk_buff *skb) +{ + __be16 *hdr = (__be16 *)ul_header, offset; + + offset = htons((__force u16)(skb_transport_header(skb) - + (unsigned char *)ip6hdr)); + ul_header->csum_start_offset = offset; + ul_header->csum_insert_offset = skb->csum_offset; + ul_header->csum_enabled = 1; + ul_header->udp_ip4_ind = 0; + + /* Changing remaining fields to network order */ + hdr++; + *hdr = htons((__force u16)*hdr); + + skb->ip_summed = CHECKSUM_NONE; + + rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr); +} +#endif + /* Adds MAP header to front of skb->data * Padding is calculated and set appropriately in MAP header. Mux ID is * initialized to 0. @@ -32,9 +262,6 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, u32 padding, map_datalen; u8 *padbytes; - if (skb_headroom(skb) < sizeof(struct rmnet_map_header)) - return NULL; - map_datalen = skb->len - hdrlen; map_header = (struct rmnet_map_header *) skb_push(skb, sizeof(struct rmnet_map_header)); @@ -69,7 +296,8 @@ done: * returned, indicating that there are no more packets to deaggregate. Caller * is responsible for freeing the original skb. */ -struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) { struct rmnet_map_header *maph; struct sk_buff *skbn; @@ -81,6 +309,9 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) maph = (struct rmnet_map_header *)skb->data; packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header); + if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) + packet_len += sizeof(struct rmnet_map_dl_csum_trailer); + if (((int)skb->len - (int)packet_len) < 0) return NULL; @@ -100,3 +331,73 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb) return skbn; } + +/* Validates packet checksums. Function takes a pointer to + * the beginning of a buffer which contains the IP payload + + * padding + checksum trailer. + * Only IPv4 and IPv6 are supported along with TCP & UDP. + * Fragmented or tunneled packets are not supported. + */ +int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) +{ + struct rmnet_map_dl_csum_trailer *csum_trailer; + + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + return -EOPNOTSUPP; + + csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); + + if (!csum_trailer->valid) + return -EINVAL; + + if (skb->protocol == htons(ETH_P_IP)) + return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer); + else if (skb->protocol == htons(ETH_P_IPV6)) +#if IS_ENABLED(CONFIG_IPV6) + return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer); +#else + return -EPROTONOSUPPORT; +#endif + + return 0; +} + +/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP + * packets that are supported for UL checksum offload. + */ +void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, + struct net_device *orig_dev) +{ + struct rmnet_map_ul_csum_header *ul_header; + void *iphdr; + + ul_header = (struct rmnet_map_ul_csum_header *) + skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); + + if (unlikely(!(orig_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) + goto sw_csum; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + iphdr = (char *)ul_header + + sizeof(struct rmnet_map_ul_csum_header); + + if (skb->protocol == htons(ETH_P_IP)) { + rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); + return; + } else if (skb->protocol == htons(ETH_P_IPV6)) { +#if IS_ENABLED(CONFIG_IPV6) + rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); + return; +#else + goto sw_csum; +#endif + } + } + +sw_csum: + ul_header->csum_start_offset = 0; + ul_header->csum_insert_offset = 0; + ul_header->csum_enabled = 0; + ul_header->udp_ip4_ind = 0; +} diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h index d21428078504..de0143eaa05a 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -21,6 +21,8 @@ /* Constants */ #define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(0) #define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(1) +#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(2) +#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(3) /* Replace skb->dev to a virtual rmnet device and pass up the stack */ #define RMNET_EPMODE_VND (1) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 5bb29f44d114..570a227acdd8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -188,6 +188,10 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, if (rmnet_get_endpoint(port, id)) return -EBUSY; + rmnet_dev->hw_features = NETIF_F_RXCSUM; + rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + rmnet_dev->hw_features |= NETIF_F_SG; + rc = register_netdevice(rmnet_dev); if (!rc) { ep->egress_dev = rmnet_dev; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index e7ab23e87de2..81045dfa1cd8 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -748,8 +748,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, mss = skb_shinfo(skb)->gso_size; if (mss > MSSMask) { - WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n", - mss); + netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n", + mss); goto out_dma_error; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 857f67bebc6a..272c5962e4f7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1675,33 +1675,24 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp) } } -static void __rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr, bool pm) +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) { if (tp->link_ok(ioaddr)) { rtl_link_chg_patch(tp); /* This is to cancel a scheduled suspend if there's one. */ - if (pm) - pm_request_resume(&tp->pci_dev->dev); + pm_request_resume(&tp->pci_dev->dev); netif_carrier_on(dev); if (net_ratelimit()) netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); - if (pm) - pm_schedule_suspend(&tp->pci_dev->dev, 5000); + pm_runtime_idle(&tp->pci_dev->dev); } } -static void rtl8169_check_link_status(struct net_device *dev, - struct rtl8169_private *tp, - void __iomem *ioaddr) -{ - __rtl8169_check_link_status(dev, tp, ioaddr, false); -} - #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@ -7746,7 +7737,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp) rtl8169_pcierr_interrupt(dev); if (status & LinkChg) - __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); rtl_irq_enable_all(tp); } @@ -7959,7 +7950,7 @@ static int rtl_open(struct net_device *dev) rtl_unlock_work(tp); tp->saved_wolopts = 0; - pm_runtime_put_noidle(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); rtl8169_check_link_status(dev, tp, ioaddr); out: @@ -8103,8 +8094,10 @@ static int rtl8169_runtime_suspend(struct device *device) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (!tp->TxDescArray) + if (!tp->TxDescArray) { + rtl_pll_power_down(tp); return 0; + } rtl_lock_work(tp); tp->saved_wolopts = __rtl8169_get_wol(tp); @@ -8146,9 +8139,11 @@ static int rtl8169_runtime_idle(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - return tp->TxDescArray ? -EBUSY : 0; + if (!netif_running(dev) || !netif_carrier_ok(dev)) + pm_schedule_suspend(device, 10000); + + return -EBUSY; } static const struct dev_pm_ops rtl8169_pm_ops = { @@ -8195,9 +8190,6 @@ static void rtl_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = &pdev->dev; - - pm_runtime_get_sync(d); rtl8169_net_suspend(dev); @@ -8215,8 +8207,6 @@ static void rtl_shutdown(struct pci_dev *pdev) pci_wake_from_d3(pdev, true); pci_set_power_state(pdev, PCI_D3hot); } - - pm_runtime_put_noidle(d); } static void rtl_remove_one(struct pci_dev *pdev) @@ -8701,11 +8691,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl8168_driver_start(tp); } - if (pci_dev_run_wake(pdev)) - pm_runtime_put_noidle(&pdev->dev); - netif_carrier_off(dev); + if (pci_dev_run_wake(pdev)) + pm_runtime_put_sync(&pdev->dev); + return 0; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 009780df664b..c87f57ca4437 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2205,8 +2205,7 @@ out_dma_free: if (chip_id != RCAR_GEN2) ravb_ptp_stop(ndev); out_release: - if (ndev) - free_netdev(ndev); + free_netdev(ndev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 75323000c364..7aa1c12750b3 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = { [FWNLCR0] = 0x0090, [FWALCR0] = 0x0094, [TXNLCR1] = 0x00a0, - [TXALCR1] = 0x00a0, + [TXALCR1] = 0x00a4, [RXNLCR1] = 0x00a8, [RXALCR1] = 0x00ac, [FWNLCR1] = 0x00b0, @@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { [FWNLCR0] = 0x0090, [FWALCR0] = 0x0094, [TXNLCR1] = 0x00a0, - [TXALCR1] = 0x00a0, + [TXALCR1] = 0x00a4, [RXNLCR1] = 0x00a8, [RXALCR1] = 0x00ac, [FWNLCR1] = 0x00b0, @@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* ioremap the TSU registers */ if (mdp->cd->tsu) { struct resource *rtsu; + rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); - if (IS_ERR(mdp->tsu_addr)) { - ret = PTR_ERR(mdp->tsu_addr); + if (!rtsu) { + dev_err(&pdev->dev, "no TSU resource\n"); + ret = -ENODEV; + goto out_release; + } + /* We can only request the TSU region for the first port + * of the two sharing this TSU for the probe to succeed... + */ + if (devno % 2 == 0 && + !devm_request_mem_region(&pdev->dev, rtsu->start, + resource_size(rtsu), + dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "can't request TSU resource.\n"); + ret = -EBUSY; + goto out_release; + } + mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, + resource_size(rtsu)); + if (!mdp->tsu_addr) { + dev_err(&pdev->dev, "TSU region ioremap() failed.\n"); + ret = -ENOMEM; goto out_release; } mdp->port = devno % 2; ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER; } - /* initialize first or needed device */ - if (!devno || pd->needs_init) { + /* Need to init only the first port of the two sharing a TSU */ + if (devno % 2 == 0) { if (mdp->cd->chip_reset) mdp->cd->chip_reset(ndev); @@ -3282,8 +3301,7 @@ out_napi_del: out_release: /* net_dev free */ - if (ndev) - free_netdev(ndev); + free_netdev(ndev); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3780161de5a1..12f0abc30cb1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -953,31 +953,42 @@ void efx_link_status_changed(struct efx_nic *efx) netif_info(efx, link, efx->net_dev, "link down\n"); } -void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising) { - efx->link_advertising = advertising; - if (advertising) { - if (advertising & ADVERTISED_Pause) - efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); - else - efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); - if (advertising & ADVERTISED_Asym_Pause) - efx->wanted_fc ^= EFX_FC_TX; - } + memcpy(efx->link_advertising, advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); + + efx->link_advertising[0] |= ADVERTISED_Autoneg; + if (advertising[0] & ADVERTISED_Pause) + efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); + else + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); + if (advertising[0] & ADVERTISED_Asym_Pause) + efx->wanted_fc ^= EFX_FC_TX; +} + +/* Equivalent to efx_link_set_advertising with all-zeroes, except does not + * force the Autoneg bit on. + */ +void efx_link_clear_advertising(struct efx_nic *efx) +{ + bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); } void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) { efx->wanted_fc = wanted_fc; - if (efx->link_advertising) { + if (efx->link_advertising[0]) { if (wanted_fc & EFX_FC_RX) - efx->link_advertising |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + efx->link_advertising[0] |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); else - efx->link_advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + efx->link_advertising[0] &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); if (wanted_fc & EFX_FC_TX) - efx->link_advertising ^= ADVERTISED_Asym_Pause; + efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; } } diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 16da3e9a6000..0cddc5ad77b1 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -258,7 +258,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) } void efx_link_status_changed(struct efx_nic *efx); -void efx_link_set_advertising(struct efx_nic *efx, u32); +void efx_link_set_advertising(struct efx_nic *efx, + const unsigned long *advertising); +void efx_link_clear_advertising(struct efx_nic *efx); void efx_link_set_wanted_fc(struct efx_nic *efx, u8); static inline void efx_device_detach_sync(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 3747b5644110..4db2dc2bf52f 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -720,7 +720,7 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, goto out; } - if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { + if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) { netif_dbg(efx, drv, efx->net_dev, "Autonegotiation is disabled\n"); rc = -EINVAL; @@ -732,10 +732,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev, (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) efx->type->prepare_enable_fc_tx(efx); - old_adv = efx->link_advertising; + old_adv = efx->link_advertising[0]; old_fc = efx->wanted_fc; efx_link_set_wanted_fc(efx, wanted_fc); - if (efx->link_advertising != old_adv || + if (efx->link_advertising[0] != old_adv || (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { rc = efx->phy_op->reconfigure(efx); if (rc) { diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 65ee1a468170..ce8aabf9091e 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -171,89 +171,108 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev, return 0; } -static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) +static void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset) { - u32 result = 0; + #define SET_BIT(name) __set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS); switch (media) { case MC_CMD_MEDIA_KX4: - result |= SUPPORTED_Backplane; + SET_BIT(Backplane); if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseKX_Full; + SET_BIT(1000baseKX_Full); if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseKX4_Full; + SET_BIT(10000baseKX4_Full); if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) - result |= SUPPORTED_40000baseKR4_Full; + SET_BIT(40000baseKR4_Full); break; case MC_CMD_MEDIA_XFP: case MC_CMD_MEDIA_SFP_PLUS: case MC_CMD_MEDIA_QSFP_PLUS: - result |= SUPPORTED_FIBRE; + SET_BIT(FIBRE); if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseT_Full; + SET_BIT(1000baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseT_Full; + SET_BIT(10000baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) - result |= SUPPORTED_40000baseCR4_Full; + SET_BIT(40000baseCR4_Full); + if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) + SET_BIT(100000baseCR4_Full); + if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) + SET_BIT(25000baseCR_Full); + if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN)) + SET_BIT(50000baseCR2_Full); break; case MC_CMD_MEDIA_BASE_T: - result |= SUPPORTED_TP; + SET_BIT(TP); if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) - result |= SUPPORTED_10baseT_Half; + SET_BIT(10baseT_Half); if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) - result |= SUPPORTED_10baseT_Full; + SET_BIT(10baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) - result |= SUPPORTED_100baseT_Half; + SET_BIT(100baseT_Half); if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) - result |= SUPPORTED_100baseT_Full; + SET_BIT(100baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) - result |= SUPPORTED_1000baseT_Half; + SET_BIT(1000baseT_Half); if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) - result |= SUPPORTED_1000baseT_Full; + SET_BIT(1000baseT_Full); if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) - result |= SUPPORTED_10000baseT_Full; + SET_BIT(10000baseT_Full); break; } if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) - result |= SUPPORTED_Pause; + SET_BIT(Pause); if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) - result |= SUPPORTED_Asym_Pause; + SET_BIT(Asym_Pause); if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) - result |= SUPPORTED_Autoneg; + SET_BIT(Autoneg); - return result; + #undef SET_BIT } -static u32 ethtool_to_mcdi_cap(u32 cap) +static u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset) { u32 result = 0; - if (cap & SUPPORTED_10baseT_Half) + #define TEST_BIT(name) test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \ + linkset) + + if (TEST_BIT(10baseT_Half)) result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); - if (cap & SUPPORTED_10baseT_Full) + if (TEST_BIT(10baseT_Full)) result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); - if (cap & SUPPORTED_100baseT_Half) + if (TEST_BIT(100baseT_Half)) result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); - if (cap & SUPPORTED_100baseT_Full) + if (TEST_BIT(100baseT_Full)) result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); - if (cap & SUPPORTED_1000baseT_Half) + if (TEST_BIT(1000baseT_Half)) result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); - if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) + if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full)) result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); - if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) + if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full)) result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); - if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full)) + if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full)) result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN); - if (cap & SUPPORTED_Pause) + if (TEST_BIT(100000baseCR4_Full)) + result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN); + if (TEST_BIT(25000baseCR_Full)) + result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN); + if (TEST_BIT(50000baseCR2_Full)) + result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN); + if (TEST_BIT(Pause)) result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); - if (cap & SUPPORTED_Asym_Pause) + if (TEST_BIT(Asym_Pause)) result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); - if (cap & SUPPORTED_Autoneg) + if (TEST_BIT(Autoneg)) result |= (1 << MC_CMD_PHY_CAP_AN_LBN); + #undef TEST_BIT + return result; } @@ -285,7 +304,7 @@ static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) return flags; } -static u32 mcdi_to_ethtool_media(u32 media) +static u8 mcdi_to_ethtool_media(u32 media) { switch (media) { case MC_CMD_MEDIA_XAUI: @@ -371,8 +390,8 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) - efx->link_advertising = - mcdi_to_ethtool_cap(phy_data->media, caps); + mcdi_to_ethtool_linkset(phy_data->media, caps, + efx->link_advertising); else phy_data->forced_cap = caps; @@ -435,8 +454,8 @@ fail: int efx_mcdi_port_reconfigure(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 caps = (efx->link_advertising ? - ethtool_to_mcdi_cap(efx->link_advertising) : + u32 caps = (efx->link_advertising[0] ? + ethtool_linkset_to_mcdi_cap(efx->link_advertising) : phy_cfg->forced_cap); return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), @@ -509,34 +528,28 @@ static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN); int rc; - u32 supported, advertising, lp_advertising; - supported = mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); - advertising = efx->link_advertising; cmd->base.speed = efx->link_state.speed; cmd->base.duplex = efx->link_state.fd; cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media); cmd->base.phy_address = phy_cfg->port; - cmd->base.autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); + cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg); cmd->base.mdio_support = (efx->mdio.mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap, + cmd->link_modes.supported); + memcpy(cmd->link_modes.advertising, efx->link_advertising, + sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK())); BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, outbuf, sizeof(outbuf), NULL); if (rc) return; - lp_advertising = - mcdi_to_ethtool_cap(phy_cfg->media, - MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, - lp_advertising); + mcdi_to_ethtool_linkset(phy_cfg->media, + MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP), + cmd->link_modes.lp_advertising); } static int @@ -546,29 +559,28 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; u32 caps; int rc; - u32 advertising; - - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); if (cmd->base.autoneg) { - caps = (ethtool_to_mcdi_cap(advertising) | - 1 << MC_CMD_PHY_CAP_AN_LBN); + caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) | + 1 << MC_CMD_PHY_CAP_AN_LBN); } else if (cmd->base.duplex) { switch (cmd->base.speed) { - case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; - case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; - case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; - case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; - case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; - default: return -EINVAL; + case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; + case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; + case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break; + case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break; + case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break; + case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break; + default: return -EINVAL; } } else { switch (cmd->base.speed) { - case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; - case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; - case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; - default: return -EINVAL; + case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; + case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; + case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; + default: return -EINVAL; } } @@ -578,11 +590,10 @@ efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, return rc; if (cmd->base.autoneg) { - efx_link_set_advertising( - efx, advertising | ADVERTISED_Autoneg); + efx_link_set_advertising(efx, cmd->link_modes.advertising); phy_cfg->forced_cap = 0; } else { - efx_link_set_advertising(efx, 0); + efx_link_clear_advertising(efx); phy_cfg->forced_cap = caps; } return 0; @@ -985,6 +996,9 @@ static unsigned int efx_mcdi_event_link_speed[] = { [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, [MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000, + [MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000, + [MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000, + [MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000, }; void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 4cedc5c4c6d9..3dd42f3136fe 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -937,7 +937,7 @@ struct efx_nic { unsigned int mdio_bus; enum efx_phy_mode phy_mode; - u32 link_advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising); struct efx_link_state link_state; unsigned int n_link_state_changes; diff --git a/drivers/net/ethernet/socionext/Kconfig b/drivers/net/ethernet/socionext/Kconfig new file mode 100644 index 000000000000..6bcfe27fc560 --- /dev/null +++ b/drivers/net/ethernet/socionext/Kconfig @@ -0,0 +1,34 @@ +config NET_VENDOR_SOCIONEXT + bool "Socionext ethernet drivers" + default y + ---help--- + Option to select ethernet drivers for Socionext platforms. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Socionext devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_SOCIONEXT + +config SNI_AVE + tristate "Socionext AVE ethernet support" + depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF + select PHYLIB + ---help--- + Driver for gigabit ethernet MACs, called AVE, in the + Socionext UniPhier family. + +config SNI_NETSEC + tristate "Socionext NETSEC ethernet support" + depends on (ARCH_SYNQUACER || COMPILE_TEST) && OF + select PHYLIB + select MII + ---help--- + Enable to add support for the SocioNext NetSec Gigabit Ethernet + controller + PHY, as found on the Synquacer SC2A11 SoC + + To compile this driver as a module, choose M here: the module will be + called netsec. If unsure, say N. + +endif #NET_VENDOR_SOCIONEXT diff --git a/drivers/net/ethernet/socionext/Makefile b/drivers/net/ethernet/socionext/Makefile new file mode 100644 index 000000000000..7fd837a999fd --- /dev/null +++ b/drivers/net/ethernet/socionext/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for all ethernet ip drivers on Socionext platforms +# +obj-$(CONFIG_SNI_AVE) += sni_ave.o +obj-$(CONFIG_SNI_NETSEC) += netsec.o diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c new file mode 100644 index 000000000000..6c263af86b8a --- /dev/null +++ b/drivers/net/ethernet/socionext/netsec.c @@ -0,0 +1,1777 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/acpi.h> +#include <linux/of_mdio.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#include <net/tcp.h> +#include <net/ip6_checksum.h> + +#define NETSEC_REG_SOFT_RST 0x104 +#define NETSEC_REG_COM_INIT 0x120 + +#define NETSEC_REG_TOP_STATUS 0x200 +#define NETSEC_IRQ_RX BIT(1) +#define NETSEC_IRQ_TX BIT(0) + +#define NETSEC_REG_TOP_INTEN 0x204 +#define NETSEC_REG_INTEN_SET 0x234 +#define NETSEC_REG_INTEN_CLR 0x238 + +#define NETSEC_REG_NRM_TX_STATUS 0x400 +#define NETSEC_REG_NRM_TX_INTEN 0x404 +#define NETSEC_REG_NRM_TX_INTEN_SET 0x428 +#define NETSEC_REG_NRM_TX_INTEN_CLR 0x42c +#define NRM_TX_ST_NTOWNR BIT(17) +#define NRM_TX_ST_TR_ERR BIT(16) +#define NRM_TX_ST_TXDONE BIT(15) +#define NRM_TX_ST_TMREXP BIT(14) + +#define NETSEC_REG_NRM_RX_STATUS 0x440 +#define NETSEC_REG_NRM_RX_INTEN 0x444 +#define NETSEC_REG_NRM_RX_INTEN_SET 0x468 +#define NETSEC_REG_NRM_RX_INTEN_CLR 0x46c +#define NRM_RX_ST_RC_ERR BIT(16) +#define NRM_RX_ST_PKTCNT BIT(15) +#define NRM_RX_ST_TMREXP BIT(14) + +#define NETSEC_REG_PKT_CMD_BUF 0xd0 + +#define NETSEC_REG_CLK_EN 0x100 + +#define NETSEC_REG_PKT_CTRL 0x140 + +#define NETSEC_REG_DMA_TMR_CTRL 0x20c +#define NETSEC_REG_F_TAIKI_MC_VER 0x22c +#define NETSEC_REG_F_TAIKI_VER 0x230 +#define NETSEC_REG_DMA_HM_CTRL 0x214 +#define NETSEC_REG_DMA_MH_CTRL 0x220 +#define NETSEC_REG_ADDR_DIS_CORE 0x218 +#define NETSEC_REG_DMAC_HM_CMD_BUF 0x210 +#define NETSEC_REG_DMAC_MH_CMD_BUF 0x21c + +#define NETSEC_REG_NRM_TX_PKTCNT 0x410 + +#define NETSEC_REG_NRM_TX_DONE_PKTCNT 0x414 +#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT 0x418 + +#define NETSEC_REG_NRM_TX_TMR 0x41c + +#define NETSEC_REG_NRM_RX_PKTCNT 0x454 +#define NETSEC_REG_NRM_RX_RXINT_PKTCNT 0x458 +#define NETSEC_REG_NRM_TX_TXINT_TMR 0x420 +#define NETSEC_REG_NRM_RX_RXINT_TMR 0x460 + +#define NETSEC_REG_NRM_RX_TMR 0x45c + +#define NETSEC_REG_NRM_TX_DESC_START_UP 0x434 +#define NETSEC_REG_NRM_TX_DESC_START_LW 0x408 +#define NETSEC_REG_NRM_RX_DESC_START_UP 0x474 +#define NETSEC_REG_NRM_RX_DESC_START_LW 0x448 + +#define NETSEC_REG_NRM_TX_CONFIG 0x430 +#define NETSEC_REG_NRM_RX_CONFIG 0x470 + +#define MAC_REG_STATUS 0x1024 +#define MAC_REG_DATA 0x11c0 +#define MAC_REG_CMD 0x11c4 +#define MAC_REG_FLOW_TH 0x11cc +#define MAC_REG_INTF_SEL 0x11d4 +#define MAC_REG_DESC_INIT 0x11fc +#define MAC_REG_DESC_SOFT_RST 0x1204 +#define NETSEC_REG_MODE_TRANS_COMP_STATUS 0x500 + +#define GMAC_REG_MCR 0x0000 +#define GMAC_REG_MFFR 0x0004 +#define GMAC_REG_GAR 0x0010 +#define GMAC_REG_GDR 0x0014 +#define GMAC_REG_FCR 0x0018 +#define GMAC_REG_BMR 0x1000 +#define GMAC_REG_RDLAR 0x100c +#define GMAC_REG_TDLAR 0x1010 +#define GMAC_REG_OMR 0x1018 + +#define MHZ(n) ((n) * 1000 * 1000) + +#define NETSEC_TX_SHIFT_OWN_FIELD 31 +#define NETSEC_TX_SHIFT_LD_FIELD 30 +#define NETSEC_TX_SHIFT_DRID_FIELD 24 +#define NETSEC_TX_SHIFT_PT_FIELD 21 +#define NETSEC_TX_SHIFT_TDRID_FIELD 16 +#define NETSEC_TX_SHIFT_CC_FIELD 15 +#define NETSEC_TX_SHIFT_FS_FIELD 9 +#define NETSEC_TX_LAST 8 +#define NETSEC_TX_SHIFT_CO 7 +#define NETSEC_TX_SHIFT_SO 6 +#define NETSEC_TX_SHIFT_TRS_FIELD 4 + +#define NETSEC_RX_PKT_OWN_FIELD 31 +#define NETSEC_RX_PKT_LD_FIELD 30 +#define NETSEC_RX_PKT_SDRID_FIELD 24 +#define NETSEC_RX_PKT_FR_FIELD 23 +#define NETSEC_RX_PKT_ER_FIELD 21 +#define NETSEC_RX_PKT_ERR_FIELD 16 +#define NETSEC_RX_PKT_TDRID_FIELD 12 +#define NETSEC_RX_PKT_FS_FIELD 9 +#define NETSEC_RX_PKT_LS_FIELD 8 +#define NETSEC_RX_PKT_CO_FIELD 6 + +#define NETSEC_RX_PKT_ERR_MASK 3 + +#define NETSEC_MAX_TX_PKT_LEN 1518 +#define NETSEC_MAX_TX_JUMBO_PKT_LEN 9018 + +#define NETSEC_RING_GMAC 15 +#define NETSEC_RING_MAX 2 + +#define NETSEC_TCP_SEG_LEN_MAX 1460 +#define NETSEC_TCP_JUMBO_SEG_LEN_MAX 8960 + +#define NETSEC_RX_CKSUM_NOTAVAIL 0 +#define NETSEC_RX_CKSUM_OK 1 +#define NETSEC_RX_CKSUM_NG 2 + +#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END BIT(20) +#define NETSEC_IRQ_TRANSITION_COMPLETE BIT(4) + +#define NETSEC_MODE_TRANS_COMP_IRQ_N2T BIT(20) +#define NETSEC_MODE_TRANS_COMP_IRQ_T2N BIT(19) + +#define NETSEC_INT_PKTCNT_MAX 2047 + +#define NETSEC_FLOW_START_TH_MAX 95 +#define NETSEC_FLOW_STOP_TH_MAX 95 +#define NETSEC_FLOW_PAUSE_TIME_MIN 5 + +#define NETSEC_CLK_EN_REG_DOM_ALL 0x3f + +#define NETSEC_PKT_CTRL_REG_MODE_NRM BIT(28) +#define NETSEC_PKT_CTRL_REG_EN_JUMBO BIT(27) +#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER BIT(3) +#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE BIT(2) +#define NETSEC_PKT_CTRL_REG_LOG_HD_ER BIT(1) +#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH BIT(0) + +#define NETSEC_CLK_EN_REG_DOM_G BIT(5) +#define NETSEC_CLK_EN_REG_DOM_C BIT(1) +#define NETSEC_CLK_EN_REG_DOM_D BIT(0) + +#define NETSEC_COM_INIT_REG_DB BIT(2) +#define NETSEC_COM_INIT_REG_CLS BIT(1) +#define NETSEC_COM_INIT_REG_ALL (NETSEC_COM_INIT_REG_CLS | \ + NETSEC_COM_INIT_REG_DB) + +#define NETSEC_SOFT_RST_REG_RESET 0 +#define NETSEC_SOFT_RST_REG_RUN BIT(31) + +#define NETSEC_DMA_CTRL_REG_STOP 1 +#define MH_CTRL__MODE_TRANS BIT(20) + +#define NETSEC_GMAC_CMD_ST_READ 0 +#define NETSEC_GMAC_CMD_ST_WRITE BIT(28) +#define NETSEC_GMAC_CMD_ST_BUSY BIT(31) + +#define NETSEC_GMAC_BMR_REG_COMMON 0x00412080 +#define NETSEC_GMAC_BMR_REG_RESET 0x00020181 +#define NETSEC_GMAC_BMR_REG_SWR 0x00000001 + +#define NETSEC_GMAC_OMR_REG_ST BIT(13) +#define NETSEC_GMAC_OMR_REG_SR BIT(1) + +#define NETSEC_GMAC_MCR_REG_IBN BIT(30) +#define NETSEC_GMAC_MCR_REG_CST BIT(25) +#define NETSEC_GMAC_MCR_REG_JE BIT(20) +#define NETSEC_MCR_PS BIT(15) +#define NETSEC_GMAC_MCR_REG_FES BIT(14) +#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON 0x0000280c +#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON 0x0001a00c + +#define NETSEC_FCR_RFE BIT(2) +#define NETSEC_FCR_TFE BIT(1) + +#define NETSEC_GMAC_GAR_REG_GW BIT(1) +#define NETSEC_GMAC_GAR_REG_GB BIT(0) + +#define NETSEC_GMAC_GAR_REG_SHIFT_PA 11 +#define NETSEC_GMAC_GAR_REG_SHIFT_GR 6 +#define GMAC_REG_SHIFT_CR_GAR 2 + +#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ 2 +#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ 3 +#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ 0 +#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ 1 +#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ 4 +#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ 5 + +#define NETSEC_GMAC_RDLAR_REG_COMMON 0x18000 +#define NETSEC_GMAC_TDLAR_REG_COMMON 0x1c000 + +#define NETSEC_REG_NETSEC_VER_F_TAIKI 0x50000 + +#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP BIT(31) +#define NETSEC_REG_DESC_RING_CONFIG_CH_RST BIT(30) +#define NETSEC_REG_DESC_TMR_MODE 4 +#define NETSEC_REG_DESC_ENDIAN 0 + +#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST 1 +#define NETSEC_MAC_DESC_INIT_REG_INIT 1 + +#define NETSEC_EEPROM_MAC_ADDRESS 0x00 +#define NETSEC_EEPROM_HM_ME_ADDRESS_H 0x08 +#define NETSEC_EEPROM_HM_ME_ADDRESS_L 0x0C +#define NETSEC_EEPROM_HM_ME_SIZE 0x10 +#define NETSEC_EEPROM_MH_ME_ADDRESS_H 0x14 +#define NETSEC_EEPROM_MH_ME_ADDRESS_L 0x18 +#define NETSEC_EEPROM_MH_ME_SIZE 0x1C +#define NETSEC_EEPROM_PKT_ME_ADDRESS 0x20 +#define NETSEC_EEPROM_PKT_ME_SIZE 0x24 + +#define DESC_NUM 128 +#define NAPI_BUDGET (DESC_NUM / 2) + +#define DESC_SZ sizeof(struct netsec_de) + +#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) + +enum ring_id { + NETSEC_RING_TX = 0, + NETSEC_RING_RX +}; + +struct netsec_desc { + struct sk_buff *skb; + dma_addr_t dma_addr; + void *addr; + u16 len; +}; + +struct netsec_desc_ring { + phys_addr_t desc_phys; + struct netsec_desc *desc; + void *vaddr; + u16 pkt_cnt; + u16 head, tail; +}; + +struct netsec_priv { + struct netsec_desc_ring desc_ring[NETSEC_RING_MAX]; + struct ethtool_coalesce et_coalesce; + spinlock_t reglock; /* protect reg access */ + struct napi_struct napi; + phy_interface_t phy_interface; + struct net_device *ndev; + struct device_node *phy_np; + struct phy_device *phydev; + struct mii_bus *mii_bus; + void __iomem *ioaddr; + void __iomem *eeprom_base; + struct device *dev; + struct clk *clk; + u32 msg_enable; + u32 freq; + bool rx_cksum_offload_flag; +}; + +struct netsec_de { /* Netsec Descriptor layout */ + u32 attr; + u32 data_buf_addr_up; + u32 data_buf_addr_lw; + u32 buf_len_info; +}; + +struct netsec_tx_pkt_ctrl { + u16 tcp_seg_len; + bool tcp_seg_offload_flag; + bool cksum_offload_flag; +}; + +struct netsec_rx_pkt_info { + int rx_cksum_result; + int err_code; + bool err_flag; +}; + +static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val) +{ + writel(val, priv->ioaddr + reg_addr); +} + +static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr) +{ + return readl(priv->ioaddr + reg_addr); +} + +/************* MDIO BUS OPS FOLLOW *************/ + +#define TIMEOUT_SPINS_MAC 1000 +#define TIMEOUT_SECONDARY_MS_MAC 100 + +static u32 netsec_clk_type(u32 freq) +{ + if (freq < MHZ(35)) + return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ; + if (freq < MHZ(60)) + return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ; + if (freq < MHZ(100)) + return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ; + if (freq < MHZ(150)) + return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ; + if (freq < MHZ(250)) + return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ; + + return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ; +} + +static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask) +{ + u32 timeout = TIMEOUT_SPINS_MAC; + + while (--timeout && netsec_read(priv, addr) & mask) + cpu_relax(); + if (timeout) + return 0; + + timeout = TIMEOUT_SECONDARY_MS_MAC; + while (--timeout && netsec_read(priv, addr) & mask) + usleep_range(1000, 2000); + + if (timeout) + return 0; + + netdev_WARN(priv->ndev, "%s: timeout\n", __func__); + + return -ETIMEDOUT; +} + +static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value) +{ + netsec_write(priv, MAC_REG_DATA, value); + netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE); + return netsec_wait_while_busy(priv, + MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); +} + +static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read) +{ + int ret; + + netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ); + ret = netsec_wait_while_busy(priv, + MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY); + if (ret) + return ret; + + *read = netsec_read(priv, MAC_REG_DATA); + + return 0; +} + +static int netsec_mac_wait_while_busy(struct netsec_priv *priv, + u32 addr, u32 mask) +{ + u32 timeout = TIMEOUT_SPINS_MAC; + int ret, data; + + do { + ret = netsec_mac_read(priv, addr, &data); + if (ret) + break; + cpu_relax(); + } while (--timeout && (data & mask)); + + if (timeout) + return 0; + + timeout = TIMEOUT_SECONDARY_MS_MAC; + do { + usleep_range(1000, 2000); + + ret = netsec_mac_read(priv, addr, &data); + if (ret) + break; + cpu_relax(); + } while (--timeout && (data & mask)); + + if (timeout && !ret) + return 0; + + netdev_WARN(priv->ndev, "%s: timeout\n", __func__); + + return -ETIMEDOUT; +} + +static int netsec_mac_update_to_phy_state(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->ndev->phydev; + u32 value = 0; + + value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON : + NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON; + + if (phydev->speed != SPEED_1000) + value |= NETSEC_MCR_PS; + + if (priv->phy_interface != PHY_INTERFACE_MODE_GMII && + phydev->speed == SPEED_100) + value |= NETSEC_GMAC_MCR_REG_FES; + + value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE; + + if (phy_interface_mode_is_rgmii(priv->phy_interface)) + value |= NETSEC_GMAC_MCR_REG_IBN; + + if (netsec_mac_write(priv, GMAC_REG_MCR, value)) + return -ETIMEDOUT; + + return 0; +} + +static int netsec_phy_write(struct mii_bus *bus, + int phy_addr, int reg, u16 val) +{ + struct netsec_priv *priv = bus->priv; + + if (netsec_mac_write(priv, GMAC_REG_GDR, val)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_GAR, + phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | + reg << NETSEC_GMAC_GAR_REG_SHIFT_GR | + NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB | + (netsec_clk_type(priv->freq) << + GMAC_REG_SHIFT_CR_GAR))) + return -ETIMEDOUT; + + return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); +} + +static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr) +{ + struct netsec_priv *priv = bus->priv; + u32 data; + int ret; + + if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB | + phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA | + reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR | + (netsec_clk_type(priv->freq) << + GMAC_REG_SHIFT_CR_GAR))) + return -ETIMEDOUT; + + ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR, + NETSEC_GMAC_GAR_REG_GB); + if (ret) + return ret; + + ret = netsec_mac_read(priv, GMAC_REG_GDR, &data); + if (ret) + return ret; + + return data; +} + +/************* ETHTOOL_OPS FOLLOW *************/ + +static void netsec_et_get_drvinfo(struct net_device *net_device, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "netsec", sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(net_device->dev.parent), + sizeof(info->bus_info)); +} + +static int netsec_et_get_coalesce(struct net_device *net_device, + struct ethtool_coalesce *et_coalesce) +{ + struct netsec_priv *priv = netdev_priv(net_device); + + *et_coalesce = priv->et_coalesce; + + return 0; +} + +static int netsec_et_set_coalesce(struct net_device *net_device, + struct ethtool_coalesce *et_coalesce) +{ + struct netsec_priv *priv = netdev_priv(net_device); + + priv->et_coalesce = *et_coalesce; + + if (priv->et_coalesce.tx_coalesce_usecs < 50) + priv->et_coalesce.tx_coalesce_usecs = 50; + if (priv->et_coalesce.tx_max_coalesced_frames < 1) + priv->et_coalesce.tx_max_coalesced_frames = 1; + + netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT, + priv->et_coalesce.tx_max_coalesced_frames); + netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR, + priv->et_coalesce.tx_coalesce_usecs); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP); + + if (priv->et_coalesce.rx_coalesce_usecs < 50) + priv->et_coalesce.rx_coalesce_usecs = 50; + if (priv->et_coalesce.rx_max_coalesced_frames < 1) + priv->et_coalesce.rx_max_coalesced_frames = 1; + + netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT, + priv->et_coalesce.rx_max_coalesced_frames); + netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR, + priv->et_coalesce.rx_coalesce_usecs); + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT); + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP); + + return 0; +} + +static u32 netsec_et_get_msglevel(struct net_device *dev) +{ + struct netsec_priv *priv = netdev_priv(dev); + + return priv->msg_enable; +} + +static void netsec_et_set_msglevel(struct net_device *dev, u32 datum) +{ + struct netsec_priv *priv = netdev_priv(dev); + + priv->msg_enable = datum; +} + +static const struct ethtool_ops netsec_ethtool_ops = { + .get_drvinfo = netsec_et_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_link = ethtool_op_get_link, + .get_coalesce = netsec_et_get_coalesce, + .set_coalesce = netsec_et_set_coalesce, + .get_msglevel = netsec_et_get_msglevel, + .set_msglevel = netsec_et_set_msglevel, +}; + +/************* NETDEV_OPS FOLLOW *************/ + +static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv, + struct netsec_desc *desc) +{ + struct sk_buff *skb; + + if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) { + skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len); + } else { + desc->len = L1_CACHE_ALIGN(desc->len); + skb = netdev_alloc_skb(priv->ndev, desc->len); + } + if (!skb) + return NULL; + + desc->addr = skb->data; + desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->dev, desc->dma_addr)) { + dev_kfree_skb_any(skb); + return NULL; + } + return skb; +} + +static void netsec_set_rx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, u16 idx, + const struct netsec_desc *desc, + struct sk_buff *skb) +{ + struct netsec_de *de = dring->vaddr + DESC_SZ * idx; + u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | + (1 << NETSEC_RX_PKT_FS_FIELD) | + (1 << NETSEC_RX_PKT_LS_FIELD); + + if (idx == DESC_NUM - 1) + attr |= (1 << NETSEC_RX_PKT_LD_FIELD); + + de->data_buf_addr_up = upper_32_bits(desc->dma_addr); + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = desc->len; + de->attr = attr; + dma_wmb(); + + dring->desc[idx].dma_addr = desc->dma_addr; + dring->desc[idx].addr = desc->addr; + dring->desc[idx].len = desc->len; + dring->desc[idx].skb = skb; +} + +static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, + u16 idx, + struct netsec_rx_pkt_info *rxpi, + struct netsec_desc *desc, u16 *len) +{ + struct netsec_de de = {}; + + memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ); + + *len = de.buf_len_info >> 16; + + rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1; + rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3; + rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) & + NETSEC_RX_PKT_ERR_MASK; + *desc = dring->desc[idx]; + return desc->skb; +} + +static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, + struct netsec_rx_pkt_info *rxpi, + struct netsec_desc *desc, + u16 *len) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct sk_buff *tmp_skb, *skb = NULL; + struct netsec_desc td; + int tail; + + *rxpi = (struct netsec_rx_pkt_info){}; + + td.len = priv->ndev->mtu + 22; + + tmp_skb = netsec_alloc_skb(priv, &td); + + dma_rmb(); + + tail = dring->tail; + + if (!tmp_skb) { + netsec_set_rx_de(priv, dring, tail, &dring->desc[tail], + dring->desc[tail].skb); + } else { + skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len); + netsec_set_rx_de(priv, dring, tail, &td, tmp_skb); + } + + /* move tail ahead */ + dring->tail = (dring->tail + 1) % DESC_NUM; + + dring->pkt_cnt--; + + return skb; +} + +static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + unsigned int pkts, bytes; + + dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); + + if (dring->pkt_cnt < budget) + budget = dring->pkt_cnt; + + pkts = 0; + bytes = 0; + + while (pkts < budget) { + struct netsec_desc *desc; + struct netsec_de *entry; + int tail, eop; + + tail = dring->tail; + + /* move tail ahead */ + dring->tail = (tail + 1) % DESC_NUM; + + desc = &dring->desc[tail]; + entry = dring->vaddr + DESC_SZ * tail; + + eop = (entry->attr >> NETSEC_TX_LAST) & 1; + + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, + DMA_TO_DEVICE); + if (eop) { + pkts++; + bytes += desc->skb->len; + dev_kfree_skb(desc->skb); + } + *desc = (struct netsec_desc){}; + } + dring->pkt_cnt -= budget; + + priv->ndev->stats.tx_packets += budget; + priv->ndev->stats.tx_bytes += bytes; + + netdev_completed_queue(priv->ndev, budget, bytes); + + return budget; +} + +static int netsec_process_tx(struct netsec_priv *priv, int budget) +{ + struct net_device *ndev = priv->ndev; + int new, done = 0; + + do { + new = netsec_clean_tx_dring(priv, budget); + done += new; + budget -= new; + } while (new); + + if (done && netif_queue_stopped(ndev)) + netif_wake_queue(ndev); + + return done; +} + +static int netsec_process_rx(struct netsec_priv *priv, int budget) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct net_device *ndev = priv->ndev; + struct netsec_rx_pkt_info rx_info; + int done = 0, rx_num = 0; + struct netsec_desc desc; + struct sk_buff *skb; + u16 len; + + while (done < budget) { + if (!rx_num) { + rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT); + dring->pkt_cnt += rx_num; + + /* move head 'rx_num' */ + dring->head = (dring->head + rx_num) % DESC_NUM; + + rx_num = dring->pkt_cnt; + if (!rx_num) + break; + } + done++; + rx_num--; + skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len); + if (unlikely(!skb) || rx_info.err_flag) { + netif_err(priv, drv, priv->ndev, + "%s: rx fail err(%d)\n", + __func__, rx_info.err_code); + ndev->stats.rx_dropped++; + continue; + } + + dma_unmap_single(priv->dev, desc.dma_addr, desc.len, + DMA_FROM_DEVICE); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, priv->ndev); + + if (priv->rx_cksum_offload_flag && + rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) { + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += len; + } + } + + return done; +} + +static int netsec_napi_poll(struct napi_struct *napi, int budget) +{ + struct netsec_priv *priv; + struct net_device *ndev; + int tx, rx, done, todo; + + priv = container_of(napi, struct netsec_priv, napi); + ndev = priv->ndev; + + todo = budget; + do { + if (!todo) + break; + + tx = netsec_process_tx(priv, todo); + todo -= tx; + + if (!todo) + break; + + rx = netsec_process_rx(priv, todo); + todo -= rx; + } while (rx || tx); + + done = budget - todo; + + if (done < budget && napi_complete_done(napi, done)) { + unsigned long flags; + + spin_lock_irqsave(&priv->reglock, flags); + netsec_write(priv, NETSEC_REG_INTEN_SET, + NETSEC_IRQ_RX | NETSEC_IRQ_TX); + spin_unlock_irqrestore(&priv->reglock, flags); + } + + return done; +} + +static void netsec_set_tx_de(struct netsec_priv *priv, + struct netsec_desc_ring *dring, + const struct netsec_tx_pkt_ctrl *tx_ctrl, + const struct netsec_desc *desc, + struct sk_buff *skb) +{ + int idx = dring->head; + struct netsec_de *de; + u32 attr; + + de = dring->vaddr + (DESC_SZ * idx); + + attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) | + (1 << NETSEC_TX_SHIFT_PT_FIELD) | + (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) | + (1 << NETSEC_TX_SHIFT_FS_FIELD) | + (1 << NETSEC_TX_LAST) | + (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) | + (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) | + (1 << NETSEC_TX_SHIFT_TRS_FIELD); + if (idx == DESC_NUM - 1) + attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD); + + de->data_buf_addr_up = upper_32_bits(desc->dma_addr); + de->data_buf_addr_lw = lower_32_bits(desc->dma_addr); + de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len; + de->attr = attr; + dma_wmb(); + + dring->desc[idx] = *desc; + dring->desc[idx].skb = skb; + + /* move head ahead */ + dring->head = (dring->head + 1) % DESC_NUM; +} + +static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + struct netsec_tx_pkt_ctrl tx_ctrl = {}; + struct netsec_desc tx_desc; + u16 tso_seg_len = 0; + int filled; + + /* differentiate between full/emtpy ring */ + if (dring->head >= dring->tail) + filled = dring->head - dring->tail; + else + filled = dring->head + DESC_NUM - dring->tail; + + if (DESC_NUM - filled < 2) { /* if less than 2 available */ + netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__); + netif_stop_queue(priv->ndev); + dma_wmb(); + return NETDEV_TX_BUSY; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) + tx_ctrl.cksum_offload_flag = true; + + if (skb_is_gso(skb)) + tso_seg_len = skb_shinfo(skb)->gso_size; + + if (tso_seg_len > 0) { + if (skb->protocol == htons(ETH_P_IP)) { + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = + ~tcp_v4_check(0, ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0); + } else { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + tx_ctrl.tcp_seg_offload_flag = true; + tx_ctrl.tcp_seg_len = tso_seg_len; + } + + tx_desc.dma_addr = dma_map_single(priv->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) { + netif_err(priv, drv, priv->ndev, + "%s: DMA mapping failed\n", __func__); + ndev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tx_desc.addr = skb->data; + tx_desc.len = skb_headlen(skb); + + skb_tx_timestamp(skb); + netdev_sent_queue(priv->ndev, skb->len); + + netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb); + netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */ + + return NETDEV_TX_OK; +} + +static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + struct netsec_desc *desc; + u16 idx; + + if (!dring->vaddr || !dring->desc) + return; + + for (idx = 0; idx < DESC_NUM; idx++) { + desc = &dring->desc[idx]; + if (!desc->addr) + continue; + + dma_unmap_single(priv->dev, desc->dma_addr, desc->len, + id == NETSEC_RING_RX ? DMA_FROM_DEVICE : + DMA_TO_DEVICE); + dev_kfree_skb(desc->skb); + } + + memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); + memset(dring->vaddr, 0, DESC_SZ * DESC_NUM); + + dring->head = 0; + dring->tail = 0; + dring->pkt_cnt = 0; +} + +static void netsec_free_dring(struct netsec_priv *priv, int id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + + if (dring->vaddr) { + dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM, + dring->vaddr, dring->desc_phys); + dring->vaddr = NULL; + } + + kfree(dring->desc); + dring->desc = NULL; +} + +static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[id]; + int ret = 0; + + dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, + &dring->desc_phys, GFP_KERNEL); + if (!dring->vaddr) { + ret = -ENOMEM; + goto err; + } + + dring->desc = kzalloc(DESC_NUM * sizeof(*dring->desc), GFP_KERNEL); + if (!dring->desc) { + ret = -ENOMEM; + goto err; + } + + return 0; +err: + netsec_free_dring(priv, id); + + return ret; +} + +static int netsec_setup_rx_dring(struct netsec_priv *priv) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + struct netsec_desc desc; + struct sk_buff *skb; + int n; + + desc.len = priv->ndev->mtu + 22; + + for (n = 0; n < DESC_NUM; n++) { + skb = netsec_alloc_skb(priv, &desc); + if (!skb) { + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + return -ENOMEM; + } + netsec_set_rx_de(priv, dring, n, &desc, skb); + } + + return 0; +} + +static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, + u32 addr_h, u32 addr_l, u32 size) +{ + u64 base = (u64)addr_h << 32 | addr_l; + void __iomem *ucode; + u32 i; + + ucode = ioremap(base, size * sizeof(u32)); + if (!ucode) + return -ENOMEM; + + for (i = 0; i < size; i++) + netsec_write(priv, reg, readl(ucode + i * 4)); + + iounmap(ucode); + return 0; +} + +static int netsec_netdev_load_microcode(struct netsec_priv *priv) +{ + u32 addr_h, addr_l, size; + int err; + + addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H); + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L); + size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H); + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L); + size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + addr_h = 0; + addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS); + size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE); + err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF, + addr_h, addr_l, size); + if (err) + return err; + + return 0; +} + +static int netsec_reset_hardware(struct netsec_priv *priv) +{ + u32 value; + int err; + + /* stop DMA engines */ + if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) { + netsec_write(priv, NETSEC_REG_DMA_HM_CTRL, + NETSEC_DMA_CTRL_REG_STOP); + netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, + NETSEC_DMA_CTRL_REG_STOP); + + while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) & + NETSEC_DMA_CTRL_REG_STOP) + cpu_relax(); + + while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) & + NETSEC_DMA_CTRL_REG_STOP) + cpu_relax(); + } + + netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET); + netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN); + netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL); + + while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0) + cpu_relax(); + + /* set desc_start addr */ + netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP, + upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_phys)); + netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW, + lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_phys)); + + netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP, + upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_phys)); + netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW, + lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_phys)); + + /* set normal tx dring ring config */ + netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG, + 1 << NETSEC_REG_DESC_ENDIAN); + netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG, + 1 << NETSEC_REG_DESC_ENDIAN); + + err = netsec_netdev_load_microcode(priv); + if (err) { + netif_err(priv, probe, priv->ndev, + "%s: failed to load microcode (%d)\n", __func__, err); + return err; + } + + /* start DMA engines */ + netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1); + netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0); + + usleep_range(1000, 2000); + + if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) & + NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) { + netif_err(priv, probe, priv->ndev, + "microengine start failed\n"); + return -ENXIO; + } + netsec_write(priv, NETSEC_REG_TOP_STATUS, + NETSEC_TOP_IRQ_REG_CODE_LOAD_END); + + value = NETSEC_PKT_CTRL_REG_MODE_NRM; + if (priv->ndev->mtu > ETH_DATA_LEN) + value |= NETSEC_PKT_CTRL_REG_EN_JUMBO; + + /* change to normal mode */ + netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS); + netsec_write(priv, NETSEC_REG_PKT_CTRL, value); + + while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) & + NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0) + cpu_relax(); + + /* clear any pending EMPTY/ERR irq status */ + netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0); + + /* Disable TX & RX intr */ + netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); + + return 0; +} + +static int netsec_start_gmac(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->ndev->phydev; + u32 value = 0; + int ret; + + if (phydev->speed != SPEED_1000) + value = (NETSEC_GMAC_MCR_REG_CST | + NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON); + + if (netsec_mac_write(priv, GMAC_REG_MCR, value)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_BMR, + NETSEC_GMAC_BMR_REG_RESET)) + return -ETIMEDOUT; + + /* Wait soft reset */ + usleep_range(1000, 5000); + + ret = netsec_mac_read(priv, GMAC_REG_BMR, &value); + if (ret) + return ret; + if (value & NETSEC_GMAC_BMR_REG_SWR) + return -EAGAIN; + + netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1); + if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1)) + return -ETIMEDOUT; + + netsec_write(priv, MAC_REG_DESC_INIT, 1); + if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1)) + return -ETIMEDOUT; + + if (netsec_mac_write(priv, GMAC_REG_BMR, + NETSEC_GMAC_BMR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_RDLAR, + NETSEC_GMAC_RDLAR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_TDLAR, + NETSEC_GMAC_TDLAR_REG_COMMON)) + return -ETIMEDOUT; + if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001)) + return -ETIMEDOUT; + + ret = netsec_mac_update_to_phy_state(priv); + if (ret) + return ret; + + ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); + if (ret) + return ret; + + value |= NETSEC_GMAC_OMR_REG_SR; + value |= NETSEC_GMAC_OMR_REG_ST; + + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); + + netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce); + + if (netsec_mac_write(priv, GMAC_REG_OMR, value)) + return -ETIMEDOUT; + + return 0; +} + +static int netsec_stop_gmac(struct netsec_priv *priv) +{ + u32 value; + int ret; + + ret = netsec_mac_read(priv, GMAC_REG_OMR, &value); + if (ret) + return ret; + value &= ~NETSEC_GMAC_OMR_REG_SR; + value &= ~NETSEC_GMAC_OMR_REG_ST; + + /* disable all interrupts */ + netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); + netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); + + return netsec_mac_write(priv, GMAC_REG_OMR, value); +} + +static void netsec_phy_adjust_link(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + if (ndev->phydev->link) + netsec_start_gmac(priv); + else + netsec_stop_gmac(priv); + + phy_print_status(ndev->phydev); +} + +static irqreturn_t netsec_irq_handler(int irq, void *dev_id) +{ + struct netsec_priv *priv = dev_id; + u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS); + unsigned long flags; + + /* Disable interrupts */ + if (status & NETSEC_IRQ_TX) { + val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS); + netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val); + } + if (status & NETSEC_IRQ_RX) { + val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS); + netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val); + } + + spin_lock_irqsave(&priv->reglock, flags); + netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX); + spin_unlock_irqrestore(&priv->reglock, flags); + + napi_schedule(&priv->napi); + + return IRQ_HANDLED; +} + +static int netsec_netdev_open(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + int ret; + + pm_runtime_get_sync(priv->dev); + + ret = netsec_setup_rx_dring(priv); + if (ret) { + netif_err(priv, probe, priv->ndev, + "%s: fail setup ring\n", __func__); + goto err1; + } + + ret = request_irq(priv->ndev->irq, netsec_irq_handler, + IRQF_SHARED, "netsec", priv); + if (ret) { + netif_err(priv, drv, priv->ndev, "request_irq failed\n"); + goto err2; + } + + if (dev_of_node(priv->dev)) { + if (!of_phy_connect(priv->ndev, priv->phy_np, + netsec_phy_adjust_link, 0, + priv->phy_interface)) { + netif_err(priv, link, priv->ndev, "missing PHY\n"); + ret = -ENODEV; + goto err3; + } + } else { + ret = phy_connect_direct(priv->ndev, priv->phydev, + netsec_phy_adjust_link, + priv->phy_interface); + if (ret) { + netif_err(priv, link, priv->ndev, + "phy_connect_direct() failed (%d)\n", ret); + goto err3; + } + } + + phy_start(ndev->phydev); + + netsec_start_gmac(priv); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + /* Enable RX intr. */ + netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX); + + return 0; +err3: + free_irq(priv->ndev->irq, priv); +err2: + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); +err1: + pm_runtime_put_sync(priv->dev); + return ret; +} + +static int netsec_netdev_stop(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + netif_stop_queue(priv->ndev); + dma_wmb(); + + napi_disable(&priv->napi); + + netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0); + netsec_stop_gmac(priv); + + free_irq(priv->ndev->irq, priv); + + netsec_uninit_pkt_dring(priv, NETSEC_RING_TX); + netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); + + phy_stop(ndev->phydev); + phy_disconnect(ndev->phydev); + + pm_runtime_put_sync(priv->dev); + + return 0; +} + +static int netsec_netdev_init(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + int ret; + + ret = netsec_alloc_dring(priv, NETSEC_RING_TX); + if (ret) + return ret; + + ret = netsec_alloc_dring(priv, NETSEC_RING_RX); + if (ret) + goto err1; + + ret = netsec_reset_hardware(priv); + if (ret) + goto err2; + + return 0; +err2: + netsec_free_dring(priv, NETSEC_RING_RX); +err1: + netsec_free_dring(priv, NETSEC_RING_TX); + return ret; +} + +static void netsec_netdev_uninit(struct net_device *ndev) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + netsec_free_dring(priv, NETSEC_RING_RX); + netsec_free_dring(priv, NETSEC_RING_TX); +} + +static int netsec_netdev_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct netsec_priv *priv = netdev_priv(ndev); + + priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM); + + return 0; +} + +static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr, + int cmd) +{ + return phy_mii_ioctl(ndev->phydev, ifr, cmd); +} + +static const struct net_device_ops netsec_netdev_ops = { + .ndo_init = netsec_netdev_init, + .ndo_uninit = netsec_netdev_uninit, + .ndo_open = netsec_netdev_open, + .ndo_stop = netsec_netdev_stop, + .ndo_start_xmit = netsec_netdev_start_xmit, + .ndo_set_features = netsec_netdev_set_features, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = netsec_netdev_ioctl, +}; + +static int netsec_of_probe(struct platform_device *pdev, + struct netsec_priv *priv) +{ + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + if (!priv->phy_np) { + dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); + return -EINVAL; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */ + if (IS_ERR(priv->clk)) { + dev_err(&pdev->dev, "phy_ref_clk not found\n"); + return PTR_ERR(priv->clk); + } + priv->freq = clk_get_rate(priv->clk); + + return 0; +} + +static int netsec_acpi_probe(struct platform_device *pdev, + struct netsec_priv *priv, u32 *phy_addr) +{ + int ret; + + if (!IS_ENABLED(CONFIG_ACPI)) + return -ENODEV; + + ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr); + if (ret) { + dev_err(&pdev->dev, + "missing required property 'phy-channel'\n"); + return ret; + } + + ret = device_property_read_u32(&pdev->dev, + "socionext,phy-clock-frequency", + &priv->freq); + if (ret) + dev_err(&pdev->dev, + "missing required property 'socionext,phy-clock-frequency'\n"); + return ret; +} + +static void netsec_unregister_mdio(struct netsec_priv *priv) +{ + struct phy_device *phydev = priv->phydev; + + if (!dev_of_node(priv->dev) && phydev) { + phy_device_remove(phydev); + phy_device_free(phydev); + } + + mdiobus_unregister(priv->mii_bus); +} + +static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) +{ + struct mii_bus *bus; + int ret; + + bus = devm_mdiobus_alloc(priv->dev); + if (!bus) + return -ENOMEM; + + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev)); + bus->priv = priv; + bus->name = "SNI NETSEC MDIO"; + bus->read = netsec_phy_read; + bus->write = netsec_phy_write; + bus->parent = priv->dev; + priv->mii_bus = bus; + + if (dev_of_node(priv->dev)) { + struct device_node *mdio_node, *parent = dev_of_node(priv->dev); + + mdio_node = of_get_child_by_name(parent, "mdio"); + if (mdio_node) { + parent = mdio_node; + } else { + /* older f/w doesn't populate the mdio subnode, + * allow relaxed upgrade of f/w in due time. + */ + dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n"); + } + + ret = of_mdiobus_register(bus, parent); + of_node_put(mdio_node); + + if (ret) { + dev_err(priv->dev, "mdiobus register err(%d)\n", ret); + return ret; + } + } else { + /* Mask out all PHYs from auto probing. */ + bus->phy_mask = ~0; + ret = mdiobus_register(bus); + if (ret) { + dev_err(priv->dev, "mdiobus register err(%d)\n", ret); + return ret; + } + + priv->phydev = get_phy_device(bus, phy_addr, false); + if (IS_ERR(priv->phydev)) { + ret = PTR_ERR(priv->phydev); + dev_err(priv->dev, "get_phy_device err(%d)\n", ret); + priv->phydev = NULL; + return -ENODEV; + } + + ret = phy_device_register(priv->phydev); + if (ret) { + mdiobus_unregister(bus); + dev_err(priv->dev, + "phy_device_register err(%d)\n", ret); + } + } + + return ret; +} + +static int netsec_probe(struct platform_device *pdev) +{ + struct resource *mmio_res, *eeprom_res, *irq_res; + u8 *mac, macbuf[ETH_ALEN]; + struct netsec_priv *priv; + u32 hw_ver, phy_addr = 0; + struct net_device *ndev; + int ret; + + mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mmio_res) { + dev_err(&pdev->dev, "No MMIO resource found.\n"); + return -ENODEV; + } + + eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!eeprom_res) { + dev_info(&pdev->dev, "No EEPROM resource found.\n"); + return -ENODEV; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + dev_err(&pdev->dev, "No IRQ resource found.\n"); + return -ENODEV; + } + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + + spin_lock_init(&priv->reglock); + SET_NETDEV_DEV(ndev, &pdev->dev); + platform_set_drvdata(pdev, priv); + ndev->irq = irq_res->start; + priv->dev = &pdev->dev; + priv->ndev = ndev; + + priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | + NETIF_MSG_LINK | NETIF_MSG_PROBE; + + priv->phy_interface = device_get_phy_mode(&pdev->dev); + if (priv->phy_interface < 0) { + dev_err(&pdev->dev, "missing required property 'phy-mode'\n"); + ret = -ENODEV; + goto free_ndev; + } + + priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start, + resource_size(mmio_res)); + if (!priv->ioaddr) { + dev_err(&pdev->dev, "devm_ioremap() failed\n"); + ret = -ENXIO; + goto free_ndev; + } + + priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start, + resource_size(eeprom_res)); + if (!priv->eeprom_base) { + dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n"); + ret = -ENXIO; + goto free_ndev; + } + + mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf)); + if (mac) + ether_addr_copy(ndev->dev_addr, mac); + + if (priv->eeprom_base && + (!mac || !is_valid_ether_addr(ndev->dev_addr))) { + void __iomem *macp = priv->eeprom_base + + NETSEC_EEPROM_MAC_ADDRESS; + + ndev->dev_addr[0] = readb(macp + 3); + ndev->dev_addr[1] = readb(macp + 2); + ndev->dev_addr[2] = readb(macp + 1); + ndev->dev_addr[3] = readb(macp + 0); + ndev->dev_addr[4] = readb(macp + 7); + ndev->dev_addr[5] = readb(macp + 6); + } + + if (!is_valid_ether_addr(ndev->dev_addr)) { + dev_warn(&pdev->dev, "No MAC address found, using random\n"); + eth_hw_addr_random(ndev); + } + + if (dev_of_node(&pdev->dev)) + ret = netsec_of_probe(pdev, priv); + else + ret = netsec_acpi_probe(pdev, priv, &phy_addr); + if (ret) + goto free_ndev; + + if (!priv->freq) { + dev_err(&pdev->dev, "missing PHY reference clock frequency\n"); + ret = -ENODEV; + goto free_ndev; + } + + /* default for throughput */ + priv->et_coalesce.rx_coalesce_usecs = 500; + priv->et_coalesce.rx_max_coalesced_frames = 8; + priv->et_coalesce.tx_coalesce_usecs = 500; + priv->et_coalesce.tx_max_coalesced_frames = 8; + + ret = device_property_read_u32(&pdev->dev, "max-frame-size", + &ndev->max_mtu); + if (ret < 0) + ndev->max_mtu = ETH_DATA_LEN; + + /* runtime_pm coverage just for probe, open/close also cover it */ + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER); + /* this driver only supports F_TAIKI style NETSEC */ + if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) != + NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) { + ret = -ENODEV; + goto pm_disable; + } + + dev_info(&pdev->dev, "hardware revision %d.%d\n", + hw_ver >> 16, hw_ver & 0xffff); + + netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET); + + ndev->netdev_ops = &netsec_netdev_ops; + ndev->ethtool_ops = &netsec_ethtool_ops; + + ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + ndev->hw_features = ndev->features; + + priv->rx_cksum_offload_flag = true; + + ret = netsec_register_mdio(priv, phy_addr); + if (ret) + goto unreg_napi; + + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); + + ret = register_netdev(ndev); + if (ret) { + netif_err(priv, probe, ndev, "register_netdev() failed\n"); + goto unreg_mii; + } + + pm_runtime_put_sync(&pdev->dev); + return 0; + +unreg_mii: + netsec_unregister_mdio(priv); +unreg_napi: + netif_napi_del(&priv->napi); +pm_disable: + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +free_ndev: + free_netdev(ndev); + dev_err(&pdev->dev, "init failed\n"); + + return ret; +} + +static int netsec_remove(struct platform_device *pdev) +{ + struct netsec_priv *priv = platform_get_drvdata(pdev); + + unregister_netdev(priv->ndev); + + netsec_unregister_mdio(priv); + + netif_napi_del(&priv->napi); + + pm_runtime_disable(&pdev->dev); + free_netdev(priv->ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int netsec_runtime_suspend(struct device *dev) +{ + struct netsec_priv *priv = dev_get_drvdata(dev); + + netsec_write(priv, NETSEC_REG_CLK_EN, 0); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static int netsec_runtime_resume(struct device *dev) +{ + struct netsec_priv *priv = dev_get_drvdata(dev); + + clk_prepare_enable(priv->clk); + + netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D | + NETSEC_CLK_EN_REG_DOM_C | + NETSEC_CLK_EN_REG_DOM_G); + return 0; +} +#endif + +static const struct dev_pm_ops netsec_pm_ops = { + SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL) +}; + +static const struct of_device_id netsec_dt_ids[] = { + { .compatible = "socionext,synquacer-netsec" }, + { } +}; +MODULE_DEVICE_TABLE(of, netsec_dt_ids); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id netsec_acpi_ids[] = { + { "SCX0001" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids); +#endif + +static struct platform_driver netsec_driver = { + .probe = netsec_probe, + .remove = netsec_remove, + .driver = { + .name = "netsec", + .pm = &netsec_pm_ops, + .of_match_table = netsec_dt_ids, + .acpi_match_table = ACPI_PTR(netsec_acpi_ids), + }, +}; +module_platform_driver(netsec_driver); + +MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>"); +MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>"); +MODULE_DESCRIPTION("NETSEC Ethernet driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c new file mode 100644 index 000000000000..111e7ca9df56 --- /dev/null +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -0,0 +1,1736 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * sni_ave.c - Socionext UniPhier AVE ethernet driver + * Copyright 2014 Panasonic Corporation + * Copyright 2015-2017 Socionext Inc. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/reset.h> +#include <linux/types.h> +#include <linux/u64_stats_sync.h> + +/* General Register Group */ +#define AVE_IDR 0x000 /* ID */ +#define AVE_VR 0x004 /* Version */ +#define AVE_GRR 0x008 /* Global Reset */ +#define AVE_CFGR 0x00c /* Configuration */ + +/* Interrupt Register Group */ +#define AVE_GIMR 0x100 /* Global Interrupt Mask */ +#define AVE_GISR 0x104 /* Global Interrupt Status */ + +/* MAC Register Group */ +#define AVE_TXCR 0x200 /* TX Setup */ +#define AVE_RXCR 0x204 /* RX Setup */ +#define AVE_RXMAC1R 0x208 /* MAC address (lower) */ +#define AVE_RXMAC2R 0x20c /* MAC address (upper) */ +#define AVE_MDIOCTR 0x214 /* MDIO Control */ +#define AVE_MDIOAR 0x218 /* MDIO Address */ +#define AVE_MDIOWDR 0x21c /* MDIO Data */ +#define AVE_MDIOSR 0x220 /* MDIO Status */ +#define AVE_MDIORDR 0x224 /* MDIO Rd Data */ + +/* Descriptor Control Register Group */ +#define AVE_DESCC 0x300 /* Descriptor Control */ +#define AVE_TXDC 0x304 /* TX Descriptor Configuration */ +#define AVE_RXDC0 0x308 /* RX Descriptor Ring0 Configuration */ +#define AVE_IIRQC 0x34c /* Interval IRQ Control */ + +/* Packet Filter Register Group */ +#define AVE_PKTF_BASE 0x800 /* PF Base Address */ +#define AVE_PFMBYTE_BASE 0xd00 /* PF Mask Byte Base Address */ +#define AVE_PFMBIT_BASE 0xe00 /* PF Mask Bit Base Address */ +#define AVE_PFSEL_BASE 0xf00 /* PF Selector Base Address */ +#define AVE_PFEN 0xffc /* Packet Filter Enable */ +#define AVE_PKTF(ent) (AVE_PKTF_BASE + (ent) * 0x40) +#define AVE_PFMBYTE(ent) (AVE_PFMBYTE_BASE + (ent) * 8) +#define AVE_PFMBIT(ent) (AVE_PFMBIT_BASE + (ent) * 4) +#define AVE_PFSEL(ent) (AVE_PFSEL_BASE + (ent) * 4) + +/* 64bit descriptor memory */ +#define AVE_DESC_SIZE_64 12 /* Descriptor Size */ + +#define AVE_TXDM_64 0x1000 /* Tx Descriptor Memory */ +#define AVE_RXDM_64 0x1c00 /* Rx Descriptor Memory */ + +#define AVE_TXDM_SIZE_64 0x0ba0 /* Tx Descriptor Memory Size 3KB */ +#define AVE_RXDM_SIZE_64 0x6000 /* Rx Descriptor Memory Size 24KB */ + +/* 32bit descriptor memory */ +#define AVE_DESC_SIZE_32 8 /* Descriptor Size */ + +#define AVE_TXDM_32 0x1000 /* Tx Descriptor Memory */ +#define AVE_RXDM_32 0x1800 /* Rx Descriptor Memory */ + +#define AVE_TXDM_SIZE_32 0x07c0 /* Tx Descriptor Memory Size 2KB */ +#define AVE_RXDM_SIZE_32 0x4000 /* Rx Descriptor Memory Size 16KB */ + +/* RMII Bridge Register Group */ +#define AVE_RSTCTRL 0x8028 /* Reset control */ +#define AVE_RSTCTRL_RMIIRST BIT(16) +#define AVE_LINKSEL 0x8034 /* Link speed setting */ +#define AVE_LINKSEL_100M BIT(0) + +/* AVE_GRR */ +#define AVE_GRR_RXFFR BIT(5) /* Reset RxFIFO */ +#define AVE_GRR_PHYRST BIT(4) /* Reset external PHY */ +#define AVE_GRR_GRST BIT(0) /* Reset all MAC */ + +/* AVE_CFGR */ +#define AVE_CFGR_FLE BIT(31) /* Filter Function */ +#define AVE_CFGR_CHE BIT(30) /* Checksum Function */ +#define AVE_CFGR_MII BIT(27) /* Func mode (1:MII/RMII, 0:RGMII) */ +#define AVE_CFGR_IPFCEN BIT(24) /* IP fragment sum Enable */ + +/* AVE_GISR (common with GIMR) */ +#define AVE_GI_PHY BIT(24) /* PHY interrupt */ +#define AVE_GI_TX BIT(16) /* Tx complete */ +#define AVE_GI_RXERR BIT(8) /* Receive frame more than max size */ +#define AVE_GI_RXOVF BIT(7) /* Overflow at the RxFIFO */ +#define AVE_GI_RXDROP BIT(6) /* Drop packet */ +#define AVE_GI_RXIINT BIT(5) /* Interval interrupt */ + +/* AVE_TXCR */ +#define AVE_TXCR_FLOCTR BIT(18) /* Flow control */ +#define AVE_TXCR_TXSPD_1G BIT(17) +#define AVE_TXCR_TXSPD_100 BIT(16) + +/* AVE_RXCR */ +#define AVE_RXCR_RXEN BIT(30) /* Rx enable */ +#define AVE_RXCR_FDUPEN BIT(22) /* Interface mode */ +#define AVE_RXCR_FLOCTR BIT(21) /* Flow control */ +#define AVE_RXCR_AFEN BIT(19) /* MAC address filter */ +#define AVE_RXCR_DRPEN BIT(18) /* Drop pause frame */ +#define AVE_RXCR_MPSIZ_MASK GENMASK(10, 0) + +/* AVE_MDIOCTR */ +#define AVE_MDIOCTR_RREQ BIT(3) /* Read request */ +#define AVE_MDIOCTR_WREQ BIT(2) /* Write request */ + +/* AVE_MDIOSR */ +#define AVE_MDIOSR_STS BIT(0) /* access status */ + +/* AVE_DESCC */ +#define AVE_DESCC_STATUS_MASK GENMASK(31, 16) +#define AVE_DESCC_RD0 BIT(8) /* Enable Rx descriptor Ring0 */ +#define AVE_DESCC_RDSTP BIT(4) /* Pause Rx descriptor */ +#define AVE_DESCC_TD BIT(0) /* Enable Tx descriptor */ + +/* AVE_TXDC */ +#define AVE_TXDC_SIZE GENMASK(27, 16) /* Size of Tx descriptor */ +#define AVE_TXDC_ADDR GENMASK(11, 0) /* Start address */ +#define AVE_TXDC_ADDR_START 0 + +/* AVE_RXDC0 */ +#define AVE_RXDC0_SIZE GENMASK(30, 16) /* Size of Rx descriptor */ +#define AVE_RXDC0_ADDR GENMASK(14, 0) /* Start address */ +#define AVE_RXDC0_ADDR_START 0 + +/* AVE_IIRQC */ +#define AVE_IIRQC_EN0 BIT(27) /* Enable interval interrupt Ring0 */ +#define AVE_IIRQC_BSCK GENMASK(15, 0) /* Interval count unit */ + +/* Command status for descriptor */ +#define AVE_STS_OWN BIT(31) /* Descriptor ownership */ +#define AVE_STS_INTR BIT(29) /* Request for interrupt */ +#define AVE_STS_OK BIT(27) /* Normal transmit */ +/* TX */ +#define AVE_STS_NOCSUM BIT(28) /* No use HW checksum */ +#define AVE_STS_1ST BIT(26) /* Head of buffer chain */ +#define AVE_STS_LAST BIT(25) /* Tail of buffer chain */ +#define AVE_STS_OWC BIT(21) /* Out of window,Late Collision */ +#define AVE_STS_EC BIT(20) /* Excess collision occurred */ +#define AVE_STS_PKTLEN_TX_MASK GENMASK(15, 0) +/* RX */ +#define AVE_STS_CSSV BIT(21) /* Checksum check performed */ +#define AVE_STS_CSER BIT(20) /* Checksum error detected */ +#define AVE_STS_PKTLEN_RX_MASK GENMASK(10, 0) + +/* Packet filter */ +#define AVE_PFMBYTE_MASK0 (GENMASK(31, 8) | GENMASK(5, 0)) +#define AVE_PFMBYTE_MASK1 GENMASK(25, 0) +#define AVE_PFMBIT_MASK GENMASK(15, 0) + +#define AVE_PF_SIZE 17 /* Number of all packet filter */ +#define AVE_PF_MULTICAST_SIZE 7 /* Number of multicast filter */ + +#define AVE_PFNUM_FILTER 0 /* No.0 */ +#define AVE_PFNUM_UNICAST 1 /* No.1 */ +#define AVE_PFNUM_BROADCAST 2 /* No.2 */ +#define AVE_PFNUM_MULTICAST 11 /* No.11-17 */ + +/* NETIF Message control */ +#define AVE_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) + +/* Parameter for descriptor */ +#define AVE_NR_TXDESC 32 /* Tx descriptor */ +#define AVE_NR_RXDESC 64 /* Rx descriptor */ + +#define AVE_DESC_OFS_CMDSTS 0 +#define AVE_DESC_OFS_ADDRL 4 +#define AVE_DESC_OFS_ADDRU 8 + +/* Parameter for ethernet frame */ +#define AVE_MAX_ETHFRAME 1518 + +/* Parameter for interrupt */ +#define AVE_INTM_COUNT 20 +#define AVE_FORCE_TXINTCNT 1 + +#define IS_DESC_64BIT(p) ((p)->data->is_desc_64bit) + +enum desc_id { + AVE_DESCID_RX, + AVE_DESCID_TX, +}; + +enum desc_state { + AVE_DESC_RX_PERMIT, + AVE_DESC_RX_SUSPEND, + AVE_DESC_START, + AVE_DESC_STOP, +}; + +struct ave_desc { + struct sk_buff *skbs; + dma_addr_t skbs_dma; + size_t skbs_dmalen; +}; + +struct ave_desc_info { + u32 ndesc; /* number of descriptor */ + u32 daddr; /* start address of descriptor */ + u32 proc_idx; /* index of processing packet */ + u32 done_idx; /* index of processed packet */ + struct ave_desc *desc; /* skb info related descriptor */ +}; + +struct ave_soc_data { + bool is_desc_64bit; +}; + +struct ave_stats { + struct u64_stats_sync syncp; + u64 packets; + u64 bytes; + u64 errors; + u64 dropped; + u64 collisions; + u64 fifo_errors; +}; + +struct ave_private { + void __iomem *base; + int irq; + int phy_id; + unsigned int desc_size; + u32 msg_enable; + struct clk *clk; + struct reset_control *rst; + phy_interface_t phy_mode; + struct phy_device *phydev; + struct mii_bus *mdio; + + /* stats */ + struct ave_stats stats_rx; + struct ave_stats stats_tx; + + /* NAPI support */ + struct net_device *ndev; + struct napi_struct napi_rx; + struct napi_struct napi_tx; + + /* descriptor */ + struct ave_desc_info rx; + struct ave_desc_info tx; + + /* flow control */ + int pause_auto; + int pause_rx; + int pause_tx; + + const struct ave_soc_data *data; +}; + +static u32 ave_desc_read(struct net_device *ndev, enum desc_id id, int entry, + int offset) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 addr; + + addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + + entry * priv->desc_size + offset; + + return readl(priv->base + addr); +} + +static u32 ave_desc_read_cmdsts(struct net_device *ndev, enum desc_id id, + int entry) +{ + return ave_desc_read(ndev, id, entry, AVE_DESC_OFS_CMDSTS); +} + +static void ave_desc_write(struct net_device *ndev, enum desc_id id, + int entry, int offset, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 addr; + + addr = ((id == AVE_DESCID_TX) ? priv->tx.daddr : priv->rx.daddr) + + entry * priv->desc_size + offset; + + writel(val, priv->base + addr); +} + +static void ave_desc_write_cmdsts(struct net_device *ndev, enum desc_id id, + int entry, u32 val) +{ + ave_desc_write(ndev, id, entry, AVE_DESC_OFS_CMDSTS, val); +} + +static void ave_desc_write_addr(struct net_device *ndev, enum desc_id id, + int entry, dma_addr_t paddr) +{ + struct ave_private *priv = netdev_priv(ndev); + + ave_desc_write(ndev, id, entry, AVE_DESC_OFS_ADDRL, + lower_32_bits(paddr)); + if (IS_DESC_64BIT(priv)) + ave_desc_write(ndev, id, + entry, AVE_DESC_OFS_ADDRU, + upper_32_bits(paddr)); +} + +static u32 ave_irq_disable_all(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 ret; + + ret = readl(priv->base + AVE_GIMR); + writel(0, priv->base + AVE_GIMR); + + return ret; +} + +static void ave_irq_restore(struct net_device *ndev, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(val, priv->base + AVE_GIMR); +} + +static void ave_irq_enable(struct net_device *ndev, u32 bitflag) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(readl(priv->base + AVE_GIMR) | bitflag, priv->base + AVE_GIMR); + writel(bitflag, priv->base + AVE_GISR); +} + +static void ave_hw_write_macaddr(struct net_device *ndev, + const unsigned char *mac_addr, + int reg1, int reg2) +{ + struct ave_private *priv = netdev_priv(ndev); + + writel(mac_addr[0] | mac_addr[1] << 8 | + mac_addr[2] << 16 | mac_addr[3] << 24, priv->base + reg1); + writel(mac_addr[4] | mac_addr[5] << 8, priv->base + reg2); +} + +static void ave_hw_read_version(struct net_device *ndev, char *buf, int len) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 major, minor, vr; + + vr = readl(priv->base + AVE_VR); + major = (vr & GENMASK(15, 8)) >> 8; + minor = (vr & GENMASK(7, 0)); + snprintf(buf, len, "v%u.%u", major, minor); +} + +static void ave_ethtool_get_drvinfo(struct net_device *ndev, + struct ethtool_drvinfo *info) +{ + struct device *dev = ndev->dev.parent; + + strlcpy(info->driver, dev->driver->name, sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(dev), sizeof(info->bus_info)); + ave_hw_read_version(ndev, info->fw_version, sizeof(info->fw_version)); +} + +static u32 ave_ethtool_get_msglevel(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + + return priv->msg_enable; +} + +static void ave_ethtool_set_msglevel(struct net_device *ndev, u32 val) +{ + struct ave_private *priv = netdev_priv(ndev); + + priv->msg_enable = val; +} + +static void ave_ethtool_get_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + if (ndev->phydev) + phy_ethtool_get_wol(ndev->phydev, wol); +} + +static int ave_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + int ret; + + if (!ndev->phydev || + (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))) + return -EOPNOTSUPP; + + ret = phy_ethtool_set_wol(ndev->phydev, wol); + if (!ret) + device_set_wakeup_enable(&ndev->dev, !!wol->wolopts); + + return ret; +} + +static void ave_ethtool_get_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ave_private *priv = netdev_priv(ndev); + + pause->autoneg = priv->pause_auto; + pause->rx_pause = priv->pause_rx; + pause->tx_pause = priv->pause_tx; +} + +static int ave_ethtool_set_pauseparam(struct net_device *ndev, + struct ethtool_pauseparam *pause) +{ + struct ave_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + + if (!phydev) + return -EINVAL; + + priv->pause_auto = pause->autoneg; + priv->pause_rx = pause->rx_pause; + priv->pause_tx = pause->tx_pause; + + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + if (pause->rx_pause) + phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + if (pause->tx_pause) + phydev->advertising ^= ADVERTISED_Asym_Pause; + + if (pause->autoneg) { + if (netif_running(ndev)) + phy_start_aneg(phydev); + } + + return 0; +} + +static const struct ethtool_ops ave_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_drvinfo = ave_ethtool_get_drvinfo, + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_msglevel = ave_ethtool_get_msglevel, + .set_msglevel = ave_ethtool_set_msglevel, + .get_wol = ave_ethtool_get_wol, + .set_wol = ave_ethtool_set_wol, + .get_pauseparam = ave_ethtool_get_pauseparam, + .set_pauseparam = ave_ethtool_set_pauseparam, +}; + +static int ave_mdiobus_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct net_device *ndev = bus->priv; + struct ave_private *priv; + u32 mdioctl, mdiosr; + int ret; + + priv = netdev_priv(ndev); + + /* write address */ + writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); + + /* read request */ + mdioctl = readl(priv->base + AVE_MDIOCTR); + writel((mdioctl | AVE_MDIOCTR_RREQ) & ~AVE_MDIOCTR_WREQ, + priv->base + AVE_MDIOCTR); + + ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, + !(mdiosr & AVE_MDIOSR_STS), 20, 2000); + if (ret) { + netdev_err(ndev, "failed to read (phy:%d reg:%x)\n", + phyid, regnum); + return ret; + } + + return readl(priv->base + AVE_MDIORDR) & GENMASK(15, 0); +} + +static int ave_mdiobus_write(struct mii_bus *bus, int phyid, int regnum, + u16 val) +{ + struct net_device *ndev = bus->priv; + struct ave_private *priv; + u32 mdioctl, mdiosr; + int ret; + + priv = netdev_priv(ndev); + + /* write address */ + writel((phyid << 8) | regnum, priv->base + AVE_MDIOAR); + + /* write data */ + writel(val, priv->base + AVE_MDIOWDR); + + /* write request */ + mdioctl = readl(priv->base + AVE_MDIOCTR); + writel((mdioctl | AVE_MDIOCTR_WREQ) & ~AVE_MDIOCTR_RREQ, + priv->base + AVE_MDIOCTR); + + ret = readl_poll_timeout(priv->base + AVE_MDIOSR, mdiosr, + !(mdiosr & AVE_MDIOSR_STS), 20, 2000); + if (ret) + netdev_err(ndev, "failed to write (phy:%d reg:%x)\n", + phyid, regnum); + + return ret; +} + +static int ave_dma_map(struct net_device *ndev, struct ave_desc *desc, + void *ptr, size_t len, enum dma_data_direction dir, + dma_addr_t *paddr) +{ + dma_addr_t map_addr; + + map_addr = dma_map_single(ndev->dev.parent, ptr, len, dir); + if (unlikely(dma_mapping_error(ndev->dev.parent, map_addr))) + return -ENOMEM; + + desc->skbs_dma = map_addr; + desc->skbs_dmalen = len; + *paddr = map_addr; + + return 0; +} + +static void ave_dma_unmap(struct net_device *ndev, struct ave_desc *desc, + enum dma_data_direction dir) +{ + if (!desc->skbs_dma) + return; + + dma_unmap_single(ndev->dev.parent, + desc->skbs_dma, desc->skbs_dmalen, dir); + desc->skbs_dma = 0; +} + +/* Prepare Rx descriptor and memory */ +static int ave_rxdesc_prepare(struct net_device *ndev, int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + struct sk_buff *skb; + dma_addr_t paddr; + int ret; + + skb = priv->rx.desc[entry].skbs; + if (!skb) { + skb = netdev_alloc_skb_ip_align(ndev, + AVE_MAX_ETHFRAME); + if (!skb) { + netdev_err(ndev, "can't allocate skb for Rx\n"); + return -ENOMEM; + } + } + + /* set disable to cmdsts */ + ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, + AVE_STS_INTR | AVE_STS_OWN); + + /* map Rx buffer + * Rx buffer set to the Rx descriptor has two restrictions: + * - Rx buffer address is 4 byte aligned. + * - Rx buffer begins with 2 byte headroom, and data will be put from + * (buffer + 2). + * To satisfy this, specify the address to put back the buffer + * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(), + * and expand the map size by NET_IP_ALIGN. + */ + ret = ave_dma_map(ndev, &priv->rx.desc[entry], + skb->data - NET_IP_ALIGN, + AVE_MAX_ETHFRAME + NET_IP_ALIGN, + DMA_FROM_DEVICE, &paddr); + if (ret) { + netdev_err(ndev, "can't map skb for Rx\n"); + dev_kfree_skb_any(skb); + return ret; + } + priv->rx.desc[entry].skbs = skb; + + /* set buffer pointer */ + ave_desc_write_addr(ndev, AVE_DESCID_RX, entry, paddr); + + /* set enable to cmdsts */ + ave_desc_write_cmdsts(ndev, AVE_DESCID_RX, entry, + AVE_STS_INTR | AVE_MAX_ETHFRAME); + + return ret; +} + +/* Switch state of descriptor */ +static int ave_desc_switch(struct net_device *ndev, enum desc_state state) +{ + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + u32 val; + + switch (state) { + case AVE_DESC_START: + writel(AVE_DESCC_TD | AVE_DESCC_RD0, priv->base + AVE_DESCC); + break; + + case AVE_DESC_STOP: + writel(0, priv->base + AVE_DESCC); + if (readl_poll_timeout(priv->base + AVE_DESCC, val, !val, + 150, 15000)) { + netdev_err(ndev, "can't stop descriptor\n"); + ret = -EBUSY; + } + break; + + case AVE_DESC_RX_SUSPEND: + val = readl(priv->base + AVE_DESCC); + val |= AVE_DESCC_RDSTP; + val &= ~AVE_DESCC_STATUS_MASK; + writel(val, priv->base + AVE_DESCC); + if (readl_poll_timeout(priv->base + AVE_DESCC, val, + val & (AVE_DESCC_RDSTP << 16), + 150, 150000)) { + netdev_err(ndev, "can't suspend descriptor\n"); + ret = -EBUSY; + } + break; + + case AVE_DESC_RX_PERMIT: + val = readl(priv->base + AVE_DESCC); + val &= ~AVE_DESCC_RDSTP; + val &= ~AVE_DESCC_STATUS_MASK; + writel(val, priv->base + AVE_DESCC); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int ave_tx_complete(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 proc_idx, done_idx, ndesc, cmdsts; + unsigned int nr_freebuf = 0; + unsigned int tx_packets = 0; + unsigned int tx_bytes = 0; + + proc_idx = priv->tx.proc_idx; + done_idx = priv->tx.done_idx; + ndesc = priv->tx.ndesc; + + /* free pre-stored skb from done_idx to proc_idx */ + while (proc_idx != done_idx) { + cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_TX, done_idx); + + /* do nothing if owner is HW (==1 for Tx) */ + if (cmdsts & AVE_STS_OWN) + break; + + /* check Tx status and updates statistics */ + if (cmdsts & AVE_STS_OK) { + tx_bytes += cmdsts & AVE_STS_PKTLEN_TX_MASK; + /* success */ + if (cmdsts & AVE_STS_LAST) + tx_packets++; + } else { + /* error */ + if (cmdsts & AVE_STS_LAST) { + priv->stats_tx.errors++; + if (cmdsts & (AVE_STS_OWC | AVE_STS_EC)) + priv->stats_tx.collisions++; + } + } + + /* release skb */ + if (priv->tx.desc[done_idx].skbs) { + ave_dma_unmap(ndev, &priv->tx.desc[done_idx], + DMA_TO_DEVICE); + dev_consume_skb_any(priv->tx.desc[done_idx].skbs); + priv->tx.desc[done_idx].skbs = NULL; + nr_freebuf++; + } + done_idx = (done_idx + 1) % ndesc; + } + + priv->tx.done_idx = done_idx; + + /* update stats */ + u64_stats_update_begin(&priv->stats_tx.syncp); + priv->stats_tx.packets += tx_packets; + priv->stats_tx.bytes += tx_bytes; + u64_stats_update_end(&priv->stats_tx.syncp); + + /* wake queue for freeing buffer */ + if (unlikely(netif_queue_stopped(ndev)) && nr_freebuf) + netif_wake_queue(ndev); + + return nr_freebuf; +} + +static int ave_rx_receive(struct net_device *ndev, int num) +{ + struct ave_private *priv = netdev_priv(ndev); + unsigned int rx_packets = 0; + unsigned int rx_bytes = 0; + u32 proc_idx, done_idx; + struct sk_buff *skb; + unsigned int pktlen; + int restpkt, npkts; + u32 ndesc, cmdsts; + + proc_idx = priv->rx.proc_idx; + done_idx = priv->rx.done_idx; + ndesc = priv->rx.ndesc; + restpkt = ((proc_idx + ndesc - 1) - done_idx) % ndesc; + + for (npkts = 0; npkts < num; npkts++) { + /* we can't receive more packet, so fill desc quickly */ + if (--restpkt < 0) + break; + + cmdsts = ave_desc_read_cmdsts(ndev, AVE_DESCID_RX, proc_idx); + + /* do nothing if owner is HW (==0 for Rx) */ + if (!(cmdsts & AVE_STS_OWN)) + break; + + if (!(cmdsts & AVE_STS_OK)) { + priv->stats_rx.errors++; + proc_idx = (proc_idx + 1) % ndesc; + continue; + } + + pktlen = cmdsts & AVE_STS_PKTLEN_RX_MASK; + + /* get skbuff for rx */ + skb = priv->rx.desc[proc_idx].skbs; + priv->rx.desc[proc_idx].skbs = NULL; + + ave_dma_unmap(ndev, &priv->rx.desc[proc_idx], DMA_FROM_DEVICE); + + skb->dev = ndev; + skb_put(skb, pktlen); + skb->protocol = eth_type_trans(skb, ndev); + + if ((cmdsts & AVE_STS_CSSV) && (!(cmdsts & AVE_STS_CSER))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + rx_packets++; + rx_bytes += pktlen; + + netif_receive_skb(skb); + + proc_idx = (proc_idx + 1) % ndesc; + } + + priv->rx.proc_idx = proc_idx; + + /* update stats */ + u64_stats_update_begin(&priv->stats_rx.syncp); + priv->stats_rx.packets += rx_packets; + priv->stats_rx.bytes += rx_bytes; + u64_stats_update_end(&priv->stats_rx.syncp); + + /* refill the Rx buffers */ + while (proc_idx != done_idx) { + if (ave_rxdesc_prepare(ndev, done_idx)) + break; + done_idx = (done_idx + 1) % ndesc; + } + + priv->rx.done_idx = done_idx; + + return npkts; +} + +static int ave_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct ave_private *priv; + struct net_device *ndev; + int num; + + priv = container_of(napi, struct ave_private, napi_rx); + ndev = priv->ndev; + + num = ave_rx_receive(ndev, budget); + if (num < budget) { + napi_complete_done(napi, num); + + /* enable Rx interrupt when NAPI finishes */ + ave_irq_enable(ndev, AVE_GI_RXIINT); + } + + return num; +} + +static int ave_napi_poll_tx(struct napi_struct *napi, int budget) +{ + struct ave_private *priv; + struct net_device *ndev; + int num; + + priv = container_of(napi, struct ave_private, napi_tx); + ndev = priv->ndev; + + num = ave_tx_complete(ndev); + napi_complete(napi); + + /* enable Tx interrupt when NAPI finishes */ + ave_irq_enable(ndev, AVE_GI_TX); + + return num; +} + +static void ave_global_reset(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + /* set config register */ + val = AVE_CFGR_FLE | AVE_CFGR_IPFCEN | AVE_CFGR_CHE; + if (!phy_interface_mode_is_rgmii(priv->phy_mode)) + val |= AVE_CFGR_MII; + writel(val, priv->base + AVE_CFGR); + + /* reset RMII register */ + val = readl(priv->base + AVE_RSTCTRL); + val &= ~AVE_RSTCTRL_RMIIRST; + writel(val, priv->base + AVE_RSTCTRL); + + /* assert reset */ + writel(AVE_GRR_GRST | AVE_GRR_PHYRST, priv->base + AVE_GRR); + msleep(20); + + /* 1st, negate PHY reset only */ + writel(AVE_GRR_GRST, priv->base + AVE_GRR); + msleep(40); + + /* negate reset */ + writel(0, priv->base + AVE_GRR); + msleep(40); + + /* negate RMII register */ + val = readl(priv->base + AVE_RSTCTRL); + val |= AVE_RSTCTRL_RMIIRST; + writel(val, priv->base + AVE_RSTCTRL); + + ave_irq_disable_all(ndev); +} + +static void ave_rxfifo_reset(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 rxcr_org; + + /* save and disable MAC receive op */ + rxcr_org = readl(priv->base + AVE_RXCR); + writel(rxcr_org & (~AVE_RXCR_RXEN), priv->base + AVE_RXCR); + + /* suspend Rx descriptor */ + ave_desc_switch(ndev, AVE_DESC_RX_SUSPEND); + + /* receive all packets before descriptor starts */ + ave_rx_receive(ndev, priv->rx.ndesc); + + /* assert reset */ + writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); + usleep_range(40, 50); + + /* negate reset */ + writel(0, priv->base + AVE_GRR); + usleep_range(10, 20); + + /* negate interrupt status */ + writel(AVE_GI_RXOVF, priv->base + AVE_GISR); + + /* permit descriptor */ + ave_desc_switch(ndev, AVE_DESC_RX_PERMIT); + + /* restore MAC reccieve op */ + writel(rxcr_org, priv->base + AVE_RXCR); +} + +static irqreturn_t ave_irq_handler(int irq, void *netdev) +{ + struct net_device *ndev = (struct net_device *)netdev; + struct ave_private *priv = netdev_priv(ndev); + u32 gimr_val, gisr_val; + + gimr_val = ave_irq_disable_all(ndev); + + /* get interrupt status */ + gisr_val = readl(priv->base + AVE_GISR); + + /* PHY */ + if (gisr_val & AVE_GI_PHY) + writel(AVE_GI_PHY, priv->base + AVE_GISR); + + /* check exceeding packet */ + if (gisr_val & AVE_GI_RXERR) { + writel(AVE_GI_RXERR, priv->base + AVE_GISR); + netdev_err(ndev, "receive a packet exceeding frame buffer\n"); + } + + gisr_val &= gimr_val; + if (!gisr_val) + goto exit_isr; + + /* RxFIFO overflow */ + if (gisr_val & AVE_GI_RXOVF) { + priv->stats_rx.fifo_errors++; + ave_rxfifo_reset(ndev); + goto exit_isr; + } + + /* Rx drop */ + if (gisr_val & AVE_GI_RXDROP) { + priv->stats_rx.dropped++; + writel(AVE_GI_RXDROP, priv->base + AVE_GISR); + } + + /* Rx interval */ + if (gisr_val & AVE_GI_RXIINT) { + napi_schedule(&priv->napi_rx); + /* still force to disable Rx interrupt until NAPI finishes */ + gimr_val &= ~AVE_GI_RXIINT; + } + + /* Tx completed */ + if (gisr_val & AVE_GI_TX) { + napi_schedule(&priv->napi_tx); + /* still force to disable Tx interrupt until NAPI finishes */ + gimr_val &= ~AVE_GI_TX; + } + +exit_isr: + ave_irq_restore(ndev, gimr_val); + + return IRQ_HANDLED; +} + +static int ave_pfsel_start(struct net_device *ndev, unsigned int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + + val = readl(priv->base + AVE_PFEN); + writel(val | BIT(entry), priv->base + AVE_PFEN); + + return 0; +} + +static int ave_pfsel_stop(struct net_device *ndev, unsigned int entry) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 val; + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + + val = readl(priv->base + AVE_PFEN); + writel(val & ~BIT(entry), priv->base + AVE_PFEN); + + return 0; +} + +static int ave_pfsel_set_macaddr(struct net_device *ndev, + unsigned int entry, + const unsigned char *mac_addr, + unsigned int set_size) +{ + struct ave_private *priv = netdev_priv(ndev); + + if (WARN_ON(entry > AVE_PF_SIZE)) + return -EINVAL; + if (WARN_ON(set_size > 6)) + return -EINVAL; + + ave_pfsel_stop(ndev, entry); + + /* set MAC address for the filter */ + ave_hw_write_macaddr(ndev, mac_addr, + AVE_PKTF(entry), AVE_PKTF(entry) + 4); + + /* set byte mask */ + writel(GENMASK(31, set_size) & AVE_PFMBYTE_MASK0, + priv->base + AVE_PFMBYTE(entry)); + writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); + + /* set bit mask filter */ + writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); + + /* set selector to ring 0 */ + writel(0, priv->base + AVE_PFSEL(entry)); + + /* restart filter */ + ave_pfsel_start(ndev, entry); + + return 0; +} + +static void ave_pfsel_set_promisc(struct net_device *ndev, + unsigned int entry, u32 rxring) +{ + struct ave_private *priv = netdev_priv(ndev); + + if (WARN_ON(entry > AVE_PF_SIZE)) + return; + + ave_pfsel_stop(ndev, entry); + + /* set byte mask */ + writel(AVE_PFMBYTE_MASK0, priv->base + AVE_PFMBYTE(entry)); + writel(AVE_PFMBYTE_MASK1, priv->base + AVE_PFMBYTE(entry) + 4); + + /* set bit mask filter */ + writel(AVE_PFMBIT_MASK, priv->base + AVE_PFMBIT(entry)); + + /* set selector to rxring */ + writel(rxring, priv->base + AVE_PFSEL(entry)); + + ave_pfsel_start(ndev, entry); +} + +static void ave_pfsel_init(struct net_device *ndev) +{ + unsigned char bcast_mac[ETH_ALEN]; + int i; + + eth_broadcast_addr(bcast_mac); + + for (i = 0; i < AVE_PF_SIZE; i++) + ave_pfsel_stop(ndev, i); + + /* promiscious entry, select ring 0 */ + ave_pfsel_set_promisc(ndev, AVE_PFNUM_FILTER, 0); + + /* unicast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); + + /* broadcast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_BROADCAST, bcast_mac, 6); +} + +static void ave_phy_adjust_link(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + u32 val, txcr, rxcr, rxcr_org; + u16 rmt_adv = 0, lcl_adv = 0; + u8 cap; + + /* set RGMII speed */ + val = readl(priv->base + AVE_TXCR); + val &= ~(AVE_TXCR_TXSPD_100 | AVE_TXCR_TXSPD_1G); + + if (phy_interface_is_rgmii(phydev) && phydev->speed == SPEED_1000) + val |= AVE_TXCR_TXSPD_1G; + else if (phydev->speed == SPEED_100) + val |= AVE_TXCR_TXSPD_100; + + writel(val, priv->base + AVE_TXCR); + + /* set RMII speed (100M/10M only) */ + if (!phy_interface_is_rgmii(phydev)) { + val = readl(priv->base + AVE_LINKSEL); + if (phydev->speed == SPEED_10) + val &= ~AVE_LINKSEL_100M; + else + val |= AVE_LINKSEL_100M; + writel(val, priv->base + AVE_LINKSEL); + } + + /* check current RXCR/TXCR */ + rxcr = readl(priv->base + AVE_RXCR); + txcr = readl(priv->base + AVE_TXCR); + rxcr_org = rxcr; + + if (phydev->duplex) { + rxcr |= AVE_RXCR_FDUPEN; + + if (phydev->pause) + rmt_adv |= LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + if (phydev->advertising & ADVERTISED_Pause) + lcl_adv |= ADVERTISE_PAUSE_CAP; + if (phydev->advertising & ADVERTISED_Asym_Pause) + lcl_adv |= ADVERTISE_PAUSE_ASYM; + + cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (cap & FLOW_CTRL_TX) + txcr |= AVE_TXCR_FLOCTR; + else + txcr &= ~AVE_TXCR_FLOCTR; + if (cap & FLOW_CTRL_RX) + rxcr |= AVE_RXCR_FLOCTR; + else + rxcr &= ~AVE_RXCR_FLOCTR; + } else { + rxcr &= ~AVE_RXCR_FDUPEN; + rxcr &= ~AVE_RXCR_FLOCTR; + txcr &= ~AVE_TXCR_FLOCTR; + } + + if (rxcr_org != rxcr) { + /* disable Rx mac */ + writel(rxcr & ~AVE_RXCR_RXEN, priv->base + AVE_RXCR); + /* change and enable TX/Rx mac */ + writel(txcr, priv->base + AVE_TXCR); + writel(rxcr, priv->base + AVE_RXCR); + } + + phy_print_status(phydev); +} + +static void ave_macaddr_init(struct net_device *ndev) +{ + ave_hw_write_macaddr(ndev, ndev->dev_addr, AVE_RXMAC1R, AVE_RXMAC2R); + + /* pfsel unicast entry */ + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_UNICAST, ndev->dev_addr, 6); +} + +static int ave_init(struct net_device *ndev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct ave_private *priv = netdev_priv(ndev); + struct device *dev = ndev->dev.parent; + struct device_node *np = dev->of_node; + struct device_node *mdio_np; + struct phy_device *phydev; + int ret; + + /* enable clk because of hw access until ndo_open */ + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "can't enable clock\n"); + return ret; + } + ret = reset_control_deassert(priv->rst); + if (ret) { + dev_err(dev, "can't deassert reset\n"); + goto out_clk_disable; + } + + ave_global_reset(ndev); + + mdio_np = of_get_child_by_name(np, "mdio"); + if (!mdio_np) { + dev_err(dev, "mdio node not found\n"); + ret = -EINVAL; + goto out_reset_assert; + } + ret = of_mdiobus_register(priv->mdio, mdio_np); + of_node_put(mdio_np); + if (ret) { + dev_err(dev, "failed to register mdiobus\n"); + goto out_reset_assert; + } + + phydev = of_phy_get_and_connect(ndev, np, ave_phy_adjust_link); + if (!phydev) { + dev_err(dev, "could not attach to PHY\n"); + ret = -ENODEV; + goto out_mdio_unregister; + } + + priv->phydev = phydev; + + phy_ethtool_get_wol(phydev, &wol); + device_set_wakeup_capable(&ndev->dev, !!wol.supported); + + if (!phy_interface_is_rgmii(phydev)) { + phydev->supported &= ~PHY_GBIT_FEATURES; + phydev->supported |= PHY_BASIC_FEATURES; + } + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + + phy_attached_info(phydev); + + return 0; + +out_mdio_unregister: + mdiobus_unregister(priv->mdio); +out_reset_assert: + reset_control_assert(priv->rst); +out_clk_disable: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static void ave_uninit(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mdio); + + /* disable clk because of hw access after ndo_stop */ + reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk); +} + +static int ave_open(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int entry; + int ret; + u32 val; + + ret = request_irq(priv->irq, ave_irq_handler, IRQF_SHARED, ndev->name, + ndev); + if (ret) + return ret; + + priv->tx.desc = kcalloc(priv->tx.ndesc, sizeof(*priv->tx.desc), + GFP_KERNEL); + if (!priv->tx.desc) { + ret = -ENOMEM; + goto out_free_irq; + } + + priv->rx.desc = kcalloc(priv->rx.ndesc, sizeof(*priv->rx.desc), + GFP_KERNEL); + if (!priv->rx.desc) { + kfree(priv->tx.desc); + ret = -ENOMEM; + goto out_free_irq; + } + + /* initialize Tx work and descriptor */ + priv->tx.proc_idx = 0; + priv->tx.done_idx = 0; + for (entry = 0; entry < priv->tx.ndesc; entry++) { + ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, entry, 0); + ave_desc_write_addr(ndev, AVE_DESCID_TX, entry, 0); + } + writel(AVE_TXDC_ADDR_START | + (((priv->tx.ndesc * priv->desc_size) << 16) & AVE_TXDC_SIZE), + priv->base + AVE_TXDC); + + /* initialize Rx work and descriptor */ + priv->rx.proc_idx = 0; + priv->rx.done_idx = 0; + for (entry = 0; entry < priv->rx.ndesc; entry++) { + if (ave_rxdesc_prepare(ndev, entry)) + break; + } + writel(AVE_RXDC0_ADDR_START | + (((priv->rx.ndesc * priv->desc_size) << 16) & AVE_RXDC0_SIZE), + priv->base + AVE_RXDC0); + + ave_desc_switch(ndev, AVE_DESC_START); + + ave_pfsel_init(ndev); + ave_macaddr_init(ndev); + + /* set Rx configuration */ + /* full duplex, enable pause drop, enalbe flow control */ + val = AVE_RXCR_RXEN | AVE_RXCR_FDUPEN | AVE_RXCR_DRPEN | + AVE_RXCR_FLOCTR | (AVE_MAX_ETHFRAME & AVE_RXCR_MPSIZ_MASK); + writel(val, priv->base + AVE_RXCR); + + /* set Tx configuration */ + /* enable flow control, disable loopback */ + writel(AVE_TXCR_FLOCTR, priv->base + AVE_TXCR); + + /* enable timer, clear EN,INTM, and mask interval unit(BSCK) */ + val = readl(priv->base + AVE_IIRQC) & AVE_IIRQC_BSCK; + val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); + writel(val, priv->base + AVE_IIRQC); + + val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; + ave_irq_restore(ndev, val); + + napi_enable(&priv->napi_rx); + napi_enable(&priv->napi_tx); + + phy_start(ndev->phydev); + phy_start_aneg(ndev->phydev); + netif_start_queue(ndev); + + return 0; + +out_free_irq: + disable_irq(priv->irq); + free_irq(priv->irq, ndev); + + return ret; +} + +static int ave_stop(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + int entry; + + ave_irq_disable_all(ndev); + disable_irq(priv->irq); + free_irq(priv->irq, ndev); + + netif_tx_disable(ndev); + phy_stop(ndev->phydev); + napi_disable(&priv->napi_tx); + napi_disable(&priv->napi_rx); + + ave_desc_switch(ndev, AVE_DESC_STOP); + + /* free Tx buffer */ + for (entry = 0; entry < priv->tx.ndesc; entry++) { + if (!priv->tx.desc[entry].skbs) + continue; + + ave_dma_unmap(ndev, &priv->tx.desc[entry], DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx.desc[entry].skbs); + priv->tx.desc[entry].skbs = NULL; + } + priv->tx.proc_idx = 0; + priv->tx.done_idx = 0; + + /* free Rx buffer */ + for (entry = 0; entry < priv->rx.ndesc; entry++) { + if (!priv->rx.desc[entry].skbs) + continue; + + ave_dma_unmap(ndev, &priv->rx.desc[entry], DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx.desc[entry].skbs); + priv->rx.desc[entry].skbs = NULL; + } + priv->rx.proc_idx = 0; + priv->rx.done_idx = 0; + + kfree(priv->tx.desc); + kfree(priv->rx.desc); + + return 0; +} + +static int ave_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + u32 proc_idx, done_idx, ndesc, cmdsts; + int ret, freepkt; + dma_addr_t paddr; + + proc_idx = priv->tx.proc_idx; + done_idx = priv->tx.done_idx; + ndesc = priv->tx.ndesc; + freepkt = ((done_idx + ndesc - 1) - proc_idx) % ndesc; + + /* stop queue when not enough entry */ + if (unlikely(freepkt < 1)) { + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + /* add padding for short packet */ + if (skb_put_padto(skb, ETH_ZLEN)) { + priv->stats_tx.dropped++; + return NETDEV_TX_OK; + } + + /* map Tx buffer + * Tx buffer set to the Tx descriptor doesn't have any restriction. + */ + ret = ave_dma_map(ndev, &priv->tx.desc[proc_idx], + skb->data, skb->len, DMA_TO_DEVICE, &paddr); + if (ret) { + dev_kfree_skb_any(skb); + priv->stats_tx.dropped++; + return NETDEV_TX_OK; + } + + priv->tx.desc[proc_idx].skbs = skb; + + ave_desc_write_addr(ndev, AVE_DESCID_TX, proc_idx, paddr); + + cmdsts = AVE_STS_OWN | AVE_STS_1ST | AVE_STS_LAST | + (skb->len & AVE_STS_PKTLEN_TX_MASK); + + /* set interrupt per AVE_FORCE_TXINTCNT or when queue is stopped */ + if (!(proc_idx % AVE_FORCE_TXINTCNT) || netif_queue_stopped(ndev)) + cmdsts |= AVE_STS_INTR; + + /* disable checksum calculation when skb doesn't calurate checksum */ + if (skb->ip_summed == CHECKSUM_NONE || + skb->ip_summed == CHECKSUM_UNNECESSARY) + cmdsts |= AVE_STS_NOCSUM; + + ave_desc_write_cmdsts(ndev, AVE_DESCID_TX, proc_idx, cmdsts); + + priv->tx.proc_idx = (proc_idx + 1) % ndesc; + + return NETDEV_TX_OK; +} + +static int ave_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + return phy_mii_ioctl(ndev->phydev, ifr, cmd); +} + +static const u8 v4multi_macadr[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 v6multi_macadr[] = { 0x33, 0x00, 0x00, 0x00, 0x00, 0x00 }; + +static void ave_set_rx_mode(struct net_device *ndev) +{ + struct ave_private *priv = netdev_priv(ndev); + struct netdev_hw_addr *hw_adr; + int count, mc_cnt; + u32 val; + + /* MAC addr filter enable for promiscious mode */ + mc_cnt = netdev_mc_count(ndev); + val = readl(priv->base + AVE_RXCR); + if (ndev->flags & IFF_PROMISC || !mc_cnt) + val &= ~AVE_RXCR_AFEN; + else + val |= AVE_RXCR_AFEN; + writel(val, priv->base + AVE_RXCR); + + /* set all multicast address */ + if ((ndev->flags & IFF_ALLMULTI) || mc_cnt > AVE_PF_MULTICAST_SIZE) { + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST, + v4multi_macadr, 1); + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + 1, + v6multi_macadr, 1); + } else { + /* stop all multicast filter */ + for (count = 0; count < AVE_PF_MULTICAST_SIZE; count++) + ave_pfsel_stop(ndev, AVE_PFNUM_MULTICAST + count); + + /* set multicast addresses */ + count = 0; + netdev_for_each_mc_addr(hw_adr, ndev) { + if (count == mc_cnt) + break; + ave_pfsel_set_macaddr(ndev, AVE_PFNUM_MULTICAST + count, + hw_adr->addr, 6); + count++; + } + } +} + +static void ave_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct ave_private *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&priv->stats_rx.syncp); + stats->rx_packets = priv->stats_rx.packets; + stats->rx_bytes = priv->stats_rx.bytes; + } while (u64_stats_fetch_retry_irq(&priv->stats_rx.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&priv->stats_tx.syncp); + stats->tx_packets = priv->stats_tx.packets; + stats->tx_bytes = priv->stats_tx.bytes; + } while (u64_stats_fetch_retry_irq(&priv->stats_tx.syncp, start)); + + stats->rx_errors = priv->stats_rx.errors; + stats->tx_errors = priv->stats_tx.errors; + stats->rx_dropped = priv->stats_rx.dropped; + stats->tx_dropped = priv->stats_tx.dropped; + stats->rx_fifo_errors = priv->stats_rx.fifo_errors; + stats->collisions = priv->stats_tx.collisions; +} + +static int ave_set_mac_address(struct net_device *ndev, void *p) +{ + int ret = eth_mac_addr(ndev, p); + + if (ret) + return ret; + + ave_macaddr_init(ndev); + + return 0; +} + +static const struct net_device_ops ave_netdev_ops = { + .ndo_init = ave_init, + .ndo_uninit = ave_uninit, + .ndo_open = ave_open, + .ndo_stop = ave_stop, + .ndo_start_xmit = ave_start_xmit, + .ndo_do_ioctl = ave_ioctl, + .ndo_set_rx_mode = ave_set_rx_mode, + .ndo_get_stats64 = ave_get_stats64, + .ndo_set_mac_address = ave_set_mac_address, +}; + +static int ave_probe(struct platform_device *pdev) +{ + const struct ave_soc_data *data; + struct device *dev = &pdev->dev; + char buf[ETHTOOL_FWVERS_LEN]; + phy_interface_t phy_mode; + struct ave_private *priv; + struct net_device *ndev; + struct device_node *np; + struct resource *res; + const void *mac_addr; + void __iomem *base; + u64 dma_mask; + int irq, ret; + u32 ave_id; + + data = of_device_get_match_data(dev); + if (WARN_ON(!data)) + return -EINVAL; + + np = dev->of_node; + phy_mode = of_get_phy_mode(np); + if (phy_mode < 0) { + dev_err(dev, "phy-mode not found\n"); + return -EINVAL; + } + if ((!phy_interface_mode_is_rgmii(phy_mode)) && + phy_mode != PHY_INTERFACE_MODE_RMII && + phy_mode != PHY_INTERFACE_MODE_MII) { + dev_err(dev, "phy-mode is invalid\n"); + return -EINVAL; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "IRQ not found\n"); + return irq; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + ndev = alloc_etherdev(sizeof(struct ave_private)); + if (!ndev) { + dev_err(dev, "can't allocate ethernet device\n"); + return -ENOMEM; + } + + ndev->netdev_ops = &ave_netdev_ops; + ndev->ethtool_ops = &ave_ethtool_ops; + SET_NETDEV_DEV(ndev, dev); + + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_RXCSUM); + + ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN); + + mac_addr = of_get_mac_address(np); + if (mac_addr) + ether_addr_copy(ndev->dev_addr, mac_addr); + + /* if the mac address is invalid, use random mac address */ + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(dev, "Using random MAC address: %pM\n", + ndev->dev_addr); + } + + priv = netdev_priv(ndev); + priv->base = base; + priv->irq = irq; + priv->ndev = ndev; + priv->msg_enable = netif_msg_init(-1, AVE_DEFAULT_MSG_ENABLE); + priv->phy_mode = phy_mode; + priv->data = data; + + if (IS_DESC_64BIT(priv)) { + priv->desc_size = AVE_DESC_SIZE_64; + priv->tx.daddr = AVE_TXDM_64; + priv->rx.daddr = AVE_RXDM_64; + dma_mask = DMA_BIT_MASK(64); + } else { + priv->desc_size = AVE_DESC_SIZE_32; + priv->tx.daddr = AVE_TXDM_32; + priv->rx.daddr = AVE_RXDM_32; + dma_mask = DMA_BIT_MASK(32); + } + ret = dma_set_mask(dev, dma_mask); + if (ret) + goto out_free_netdev; + + priv->tx.ndesc = AVE_NR_TXDESC; + priv->rx.ndesc = AVE_NR_RXDESC; + + u64_stats_init(&priv->stats_tx.syncp); + u64_stats_init(&priv->stats_rx.syncp); + + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto out_free_netdev; + } + + priv->rst = devm_reset_control_get_optional_shared(dev, NULL); + if (IS_ERR(priv->rst)) { + ret = PTR_ERR(priv->rst); + goto out_free_netdev; + } + + priv->mdio = devm_mdiobus_alloc(dev); + if (!priv->mdio) { + ret = -ENOMEM; + goto out_free_netdev; + } + priv->mdio->priv = ndev; + priv->mdio->parent = dev; + priv->mdio->read = ave_mdiobus_read; + priv->mdio->write = ave_mdiobus_write; + priv->mdio->name = "uniphier-mdio"; + snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + + /* Register as a NAPI supported driver */ + netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc); + netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx, + priv->tx.ndesc); + + platform_set_drvdata(pdev, ndev); + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "failed to register netdevice\n"); + goto out_del_napi; + } + + /* get ID and version */ + ave_id = readl(priv->base + AVE_IDR); + ave_hw_read_version(ndev, buf, sizeof(buf)); + + dev_info(dev, "Socionext %c%c%c%c Ethernet IP %s (irq=%d, phy=%s)\n", + (ave_id >> 24) & 0xff, (ave_id >> 16) & 0xff, + (ave_id >> 8) & 0xff, (ave_id >> 0) & 0xff, + buf, priv->irq, phy_modes(phy_mode)); + + return 0; + +out_del_napi: + netif_napi_del(&priv->napi_rx); + netif_napi_del(&priv->napi_tx); +out_free_netdev: + free_netdev(ndev); + + return ret; +} + +static int ave_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct ave_private *priv = netdev_priv(ndev); + + unregister_netdev(ndev); + netif_napi_del(&priv->napi_rx); + netif_napi_del(&priv->napi_tx); + free_netdev(ndev); + + return 0; +} + +static const struct ave_soc_data ave_pro4_data = { + .is_desc_64bit = false, +}; + +static const struct ave_soc_data ave_pxs2_data = { + .is_desc_64bit = false, +}; + +static const struct ave_soc_data ave_ld11_data = { + .is_desc_64bit = false, +}; + +static const struct ave_soc_data ave_ld20_data = { + .is_desc_64bit = true, +}; + +static const struct of_device_id of_ave_match[] = { + { + .compatible = "socionext,uniphier-pro4-ave4", + .data = &ave_pro4_data, + }, + { + .compatible = "socionext,uniphier-pxs2-ave4", + .data = &ave_pxs2_data, + }, + { + .compatible = "socionext,uniphier-ld11-ave4", + .data = &ave_ld11_data, + }, + { + .compatible = "socionext,uniphier-ld20-ave4", + .data = &ave_ld20_data, + }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, of_ave_match); + +static struct platform_driver ave_driver = { + .probe = ave_probe, + .remove = ave_remove, + .driver = { + .name = "ave", + .of_match_table = of_ave_match, + }, +}; +module_platform_driver(ave_driver); + +MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 7e089bf906b4..2fd8456999f6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -406,7 +406,7 @@ static void dwmac4_display_ring(void *head, unsigned int size, bool rx) pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); for (i = 0; i < size; i++) { - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(p), le32_to_cpu(p->des0), le32_to_cpu(p->des1), le32_to_cpu(p->des2), le32_to_cpu(p->des3)); diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 2a828a312814..b47cb5c4da51 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -428,7 +428,7 @@ static void enh_desc_display_ring(void *head, unsigned int size, bool rx) u64 x; x = *(u64 *)ep; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), (unsigned int)x, (unsigned int)(x >> 32), ep->basic.des2, ep->basic.des3); diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index db4cee57bb24..ebd9e5e00f16 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -288,7 +288,7 @@ static void ndesc_display_ring(void *head, unsigned int size, bool rx) u64 x; x = *(u64 *)p; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", + pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x", i, (unsigned int)virt_to_phys(p), (unsigned int)x, (unsigned int)(x >> 32), p->des2, p->des3); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0323d672e1c5..f99f14c35063 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t) bool stmmac_eee_init(struct stmmac_priv *priv) { struct net_device *ndev = priv->dev; + int interface = priv->plat->interface; unsigned long flags; bool ret = false; + if ((interface != PHY_INTERFACE_MODE_MII) && + (interface != PHY_INTERFACE_MODE_GMII) && + !phy_interface_mode_is_rgmii(interface)) + goto out; + /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ @@ -3436,9 +3442,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) if (netif_msg_rx_status(priv)) { netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", p, entry, des); - if (frame_len > ETH_FRAME_LEN) - netdev_dbg(priv->dev, "frame size %d, COE: %d\n", - frame_len, status); + netdev_dbg(priv->dev, "frame size %d, COE: %d\n", + frame_len, status); } /* The zero-copy is always used for all the sizes diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 667c44f68dbc..195e0d0add8d 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(rt)) return PTR_ERR(rt); + if (skb_dst(skb)) { + int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) - + GENEVE_BASE_HLEN - info->options_len - 14; + + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); + } + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); @@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); + if (skb_dst(skb)) { + int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) - + GENEVE_BASE_HLEN - info->options_len - 14; + + skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); + } + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 1d025ab9568f..f522715c6595 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -393,7 +393,12 @@ static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) #define MACSEC_PORT_SCB (0x0000) #define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) -#define DEFAULT_SAK_LEN 16 +#define MACSEC_GCM_AES_128_SAK_LEN 16 +#define MACSEC_GCM_AES_256_SAK_LEN 32 + +#define MAX_SAK_LEN MACSEC_GCM_AES_256_SAK_LEN + +#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN #define DEFAULT_SEND_SCI true #define DEFAULT_ENCRYPT false #define DEFAULT_ENCODING_SA 0 @@ -1600,7 +1605,7 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY, .len = MACSEC_KEYID_LEN, }, [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, - .len = MACSEC_MAX_KEY_LEN, }, + .len = MAX_SAK_LEN, }, }; static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) @@ -2362,15 +2367,26 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) { struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); + u64 csid; if (!secy_nest) return 1; + switch (secy->key_len) { + case MACSEC_GCM_AES_128_SAK_LEN: + csid = MACSEC_CIPHER_ID_GCM_AES_128; + break; + case MACSEC_GCM_AES_256_SAK_LEN: + csid = MACSEC_CIPHER_ID_GCM_AES_256; + break; + default: + goto cancel; + } + if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci, MACSEC_SECY_ATTR_PAD) || nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, - MACSEC_DEFAULT_CIPHER_ID, - MACSEC_SECY_ATTR_PAD) || + csid, MACSEC_SECY_ATTR_PAD) || nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || @@ -3015,8 +3031,8 @@ static void macsec_setup(struct net_device *dev) eth_zero_addr(dev->broadcast); } -static void macsec_changelink_common(struct net_device *dev, - struct nlattr *data[]) +static int macsec_changelink_common(struct net_device *dev, + struct nlattr *data[]) { struct macsec_secy *secy; struct macsec_tx_sc *tx_sc; @@ -3056,6 +3072,22 @@ static void macsec_changelink_common(struct net_device *dev, if (data[IFLA_MACSEC_VALIDATION]) secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); + + if (data[IFLA_MACSEC_CIPHER_SUITE]) { + switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) { + case MACSEC_CIPHER_ID_GCM_AES_128: + case MACSEC_DEFAULT_CIPHER_ALT: + secy->key_len = MACSEC_GCM_AES_128_SAK_LEN; + break; + case MACSEC_CIPHER_ID_GCM_AES_256: + secy->key_len = MACSEC_GCM_AES_256_SAK_LEN; + break; + default: + return -EINVAL; + } + } + + return 0; } static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], @@ -3071,9 +3103,7 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], data[IFLA_MACSEC_PORT]) return -EINVAL; - macsec_changelink_common(dev, data); - - return 0; + return macsec_changelink_common(dev, data); } static void macsec_del_dev(struct macsec_dev *macsec) @@ -3270,8 +3300,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err) goto unlink; - if (data) - macsec_changelink_common(dev, data); + if (data) { + err = macsec_changelink_common(dev, data); + if (err) + goto del_dev; + } err = register_macsec_dev(real_dev, dev); if (err < 0) @@ -3320,7 +3353,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[], } switch (csid) { - case MACSEC_DEFAULT_CIPHER_ID: + case MACSEC_CIPHER_ID_GCM_AES_128: + case MACSEC_CIPHER_ID_GCM_AES_256: case MACSEC_DEFAULT_CIPHER_ALT: if (icv_len < MACSEC_MIN_ICV_LEN || icv_len > MACSEC_STD_ICV_LEN) @@ -3390,12 +3424,24 @@ static int macsec_fill_info(struct sk_buff *skb, { struct macsec_secy *secy = &macsec_priv(dev)->secy; struct macsec_tx_sc *tx_sc = &secy->tx_sc; + u64 csid; + + switch (secy->key_len) { + case MACSEC_GCM_AES_128_SAK_LEN: + csid = MACSEC_CIPHER_ID_GCM_AES_128; + break; + case MACSEC_GCM_AES_256_SAK_LEN: + csid = MACSEC_CIPHER_ID_GCM_AES_256; + break; + default: + goto nla_put_failure; + } if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE, - MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) || + csid, IFLA_MACSEC_PAD) || nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a178c5efd33e..a0f2be81d52e 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1444,9 +1444,14 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, return 0; unregister_netdev: + /* macvlan_uninit would free the macvlan port */ unregister_netdevice(dev); + return err; destroy_macvlan_port: - if (create) + /* the macvlan port may be freed by macvlan_uninit when fail to register. + * so we destroy the macvlan port only when it's valid. + */ + if (create && macvlan_port_get_rtnl(dev)) macvlan_port_destroy(port->dev); return err; } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index a243fa7ae02f..5134d5c1306c 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -66,7 +66,7 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) return 0; } -static const struct bpf_ext_analyzer_ops nsim_bpf_analyzer_ops = { +static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { .insn_hook = nsim_bpf_verify_insn, }; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 1e190f3bca63..411cf1072bae 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -215,34 +215,22 @@ static int at803x_suspend(struct phy_device *phydev) int value; int wol_enabled; - mutex_lock(&phydev->lock); - value = phy_read(phydev, AT803X_INTR_ENABLE); wol_enabled = value & AT803X_INTR_ENABLE_WOL; - value = phy_read(phydev, MII_BMCR); - if (wol_enabled) - value |= BMCR_ISOLATE; + value = BMCR_ISOLATE; else - value |= BMCR_PDOWN; + value = BMCR_PDOWN; - phy_write(phydev, MII_BMCR, value); - - mutex_unlock(&phydev->lock); + phy_modify(phydev, MII_BMCR, 0, value); return 0; } static int at803x_resume(struct phy_device *phydev) { - int value; - - value = phy_read(phydev, MII_BMCR); - value &= ~(BMCR_PDOWN | BMCR_ISOLATE); - phy_write(phydev, MII_BMCR, value); - - return 0; + return phy_modify(phydev, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE, 0); } static int at803x_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index eb5167210681..001fe1df7557 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -115,37 +115,6 @@ int fixed_phy_set_link_update(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed) -{ - struct fixed_mdio_bus *fmb = &platform_fmb; - struct fixed_phy *fp; - - if (!phydev || phydev->mdio.bus != fmb->mii_bus) - return -EINVAL; - - list_for_each_entry(fp, &fmb->phys, node) { - if (fp->addr == phydev->mdio.addr) { - write_seqcount_begin(&fp->seqcount); -#define _UPD(x) if (changed->x) \ - fp->status.x = status->x - _UPD(link); - _UPD(speed); - _UPD(duplex); - _UPD(pause); - _UPD(asym_pause); -#undef _UPD - fixed_phy_update(fp); - write_seqcount_end(&fp->seqcount); - return 0; - } - } - - return -ENOENT; -} -EXPORT_SYMBOL(fixed_phy_update_state); - int fixed_phy_add(unsigned int irq, int phy_addr, struct fixed_phy_status *status, int link_gpio) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 342325a89d5f..22d9bc9c33a4 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -83,7 +83,7 @@ #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) -#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4))) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (BIT(5) | BIT(4)) #define MII_88E1121_MISC_TEST 0x1a #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 @@ -96,6 +96,17 @@ #define MII_88E1510_TEMP_SENSOR 0x1b #define MII_88E1510_TEMP_SENSOR_MASK 0xff +#define MII_88E6390_MISC_TEST 0x1b +#define MII_88E6390_MISC_TEST_SAMPLE_1S 0 +#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14) +#define MII_88E6390_MISC_TEST_SAMPLE_DISABLE BIT(15) +#define MII_88E6390_MISC_TEST_SAMPLE_ENABLE 0 +#define MII_88E6390_MISC_TEST_SAMPLE_MASK (0x3 << 14) + +#define MII_88E6390_TEMP_SENSOR 0x1c +#define MII_88E6390_TEMP_SENSOR_MASK 0xff +#define MII_88E6390_TEMP_SENSOR_SAMPLES 10 + #define MII_88E1318S_PHY_MSCR1_REG 16 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) @@ -177,27 +188,19 @@ struct marvell_priv { struct device *hwmon_dev; }; -static int marvell_get_page(struct phy_device *phydev) +static int marvell_read_page(struct phy_device *phydev) { - return phy_read(phydev, MII_MARVELL_PHY_PAGE); + return __phy_read(phydev, MII_MARVELL_PHY_PAGE); } -static int marvell_set_page(struct phy_device *phydev, int page) +static int marvell_write_page(struct phy_device *phydev, int page) { - return phy_write(phydev, MII_MARVELL_PHY_PAGE, page); + return __phy_write(phydev, MII_MARVELL_PHY_PAGE, page); } -static int marvell_get_set_page(struct phy_device *phydev, int page) +static int marvell_set_page(struct phy_device *phydev, int page) { - int oldpage = marvell_get_page(phydev); - - if (oldpage < 0) - return oldpage; - - if (page != oldpage) - return marvell_set_page(phydev, page); - - return 0; + return phy_write(phydev, MII_MARVELL_PHY_PAGE, page); } static int marvell_ack_interrupt(struct phy_device *phydev) @@ -399,7 +402,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev) static int marvell_of_reg_init(struct phy_device *phydev) { const __be32 *paddr; - int len, i, saved_page, current_page, ret; + int len, i, saved_page, current_page, ret = 0; if (!phydev->mdio.dev.of_node) return 0; @@ -409,12 +412,11 @@ static int marvell_of_reg_init(struct phy_device *phydev) if (!paddr || len < (4 * sizeof(*paddr))) return 0; - saved_page = marvell_get_page(phydev); + saved_page = phy_save_page(phydev); if (saved_page < 0) - return saved_page; + goto err; current_page = saved_page; - ret = 0; len /= sizeof(*paddr); for (i = 0; i < len - 3; i += 4) { u16 page = be32_to_cpup(paddr + i); @@ -425,14 +427,14 @@ static int marvell_of_reg_init(struct phy_device *phydev) if (page != current_page) { current_page = page; - ret = marvell_set_page(phydev, page); + ret = marvell_write_page(phydev, page); if (ret < 0) goto err; } val = 0; if (mask) { - val = phy_read(phydev, reg); + val = __phy_read(phydev, reg); if (val < 0) { ret = val; goto err; @@ -441,17 +443,12 @@ static int marvell_of_reg_init(struct phy_device *phydev) } val |= val_bits; - ret = phy_write(phydev, reg, val); + ret = __phy_write(phydev, reg, val); if (ret < 0) goto err; } err: - if (current_page != saved_page) { - i = marvell_set_page(phydev, saved_page); - if (ret == 0) - ret = i; - } - return ret; + return phy_restore_page(phydev, saved_page, ret); } #else static int marvell_of_reg_init(struct phy_device *phydev) @@ -462,34 +459,21 @@ static int marvell_of_reg_init(struct phy_device *phydev) static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) { - int err, oldpage, mscr; - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (oldpage < 0) - return oldpage; - - mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG); - if (mscr < 0) { - err = mscr; - goto out; - } - - mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK; + int mscr; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) - mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | - MII_88E1121_PHY_MSCR_TX_DELAY); + mscr = MII_88E1121_PHY_MSCR_RX_DELAY | + MII_88E1121_PHY_MSCR_TX_DELAY; else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) - mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; + mscr = MII_88E1121_PHY_MSCR_RX_DELAY; else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; - - err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); - -out: - marvell_set_page(phydev, oldpage); + mscr = MII_88E1121_PHY_MSCR_TX_DELAY; + else + mscr = 0; - return err; + return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1121_PHY_MSCR_REG, + MII_88E1121_PHY_MSCR_DELAY_MASK, mscr); } static int m88e1121_config_aneg(struct phy_device *phydev) @@ -498,7 +482,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (phy_interface_is_rgmii(phydev)) { err = m88e1121_config_aneg_rgmii_delays(phydev); - if (err) + if (err < 0) return err; } @@ -515,20 +499,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev) static int m88e1318_config_aneg(struct phy_device *phydev) { - int err, oldpage, mscr; - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (oldpage < 0) - return oldpage; - - mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG); - mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD; - - err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr); - if (err < 0) - return err; + int err; - err = marvell_set_page(phydev, oldpage); + err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE, + MII_88E1318S_PHY_MSCR1_REG, + 0, MII_88E1318S_PHY_MSCR1_PAD_ODD); if (err < 0) return err; @@ -700,19 +675,14 @@ static int m88e1116r_config_init(struct phy_device *phydev) static int m88e3016_config_init(struct phy_device *phydev) { - int reg; + int ret; /* Enable Scrambler and Auto-Crossover */ - reg = phy_read(phydev, MII_88E3016_PHY_SPEC_CTRL); - if (reg < 0) - return reg; - - reg &= ~MII_88E3016_DISABLE_SCRAMBLER; - reg |= MII_88E3016_AUTO_MDIX_CROSSOVER; - - reg = phy_write(phydev, MII_88E3016_PHY_SPEC_CTRL, reg); - if (reg < 0) - return reg; + ret = phy_modify(phydev, MII_88E3016_PHY_SPEC_CTRL, + MII_88E3016_DISABLE_SCRAMBLER, + MII_88E3016_AUTO_MDIX_CROSSOVER); + if (ret < 0) + return ret; return marvell_config_init(phydev); } @@ -721,42 +691,33 @@ static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev, u16 mode, int fibre_copper_auto) { - int temp; - - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_AUTO | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= mode; - if (fibre_copper_auto) - temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; + mode |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; - return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + return phy_modify(phydev, MII_M1111_PHY_EXT_SR, + MII_M1111_HWCFG_MODE_MASK | + MII_M1111_HWCFG_FIBER_COPPER_AUTO | + MII_M1111_HWCFG_FIBER_COPPER_RES, + mode); } static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev) { - int temp; - - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; + int delay; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY); + delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - temp &= ~MII_M1111_RGMII_TX_DELAY; - temp |= MII_M1111_RGMII_RX_DELAY; + delay = MII_M1111_RGMII_RX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - temp &= ~MII_M1111_RGMII_RX_DELAY; - temp |= MII_M1111_RGMII_TX_DELAY; + delay = MII_M1111_RGMII_TX_DELAY; + } else { + delay = 0; } - return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + return phy_modify(phydev, MII_M1111_PHY_EXT_CR, + MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY, + delay); } static int m88e1111_config_init_rgmii(struct phy_device *phydev) @@ -802,7 +763,7 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev) int err; err = m88e1111_config_init_rgmii_delays(phydev); - if (err) + if (err < 0) return err; err = m88e1111_config_init_hwcfg_mode( @@ -829,7 +790,7 @@ static int m88e1111_config_init(struct phy_device *phydev) if (phy_interface_is_rgmii(phydev)) { err = m88e1111_config_init_rgmii(phydev); - if (err) + if (err < 0) return err; } @@ -854,20 +815,15 @@ static int m88e1111_config_init(struct phy_device *phydev) static int m88e1121_config_init(struct phy_device *phydev) { - int err, oldpage; - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_LED_PAGE); - if (oldpage < 0) - return oldpage; + int err; /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ - err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL, - MII_88E1121_PHY_LED_DEF); + err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, + MII_88E1121_PHY_LED_CTRL, + MII_88E1121_PHY_LED_DEF); if (err < 0) return err; - marvell_set_page(phydev, oldpage); - /* Set marvell,reg-init configuration from device tree */ return marvell_config_init(phydev); } @@ -875,7 +831,6 @@ static int m88e1121_config_init(struct phy_device *phydev) static int m88e1510_config_init(struct phy_device *phydev) { int err; - int temp; /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { @@ -887,16 +842,15 @@ static int m88e1510_config_init(struct phy_device *phydev) return err; /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */ - temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1); - temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK; - temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII; - err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, + MII_88E1510_GEN_CTRL_REG_1_MODE_MASK, + MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII); if (err < 0) return err; /* PHY reset is necessary after changing MODE[2:0] */ - temp |= MII_88E1510_GEN_CTRL_REG_1_RESET; - err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + err = phy_modify(phydev, MII_88E1510_GEN_CTRL_REG_1, 0, + MII_88E1510_GEN_CTRL_REG_1_RESET); if (err < 0) return err; @@ -1002,7 +956,6 @@ static int m88e1149_config_init(struct phy_device *phydev) static int m88e1145_config_init_rgmii(struct phy_device *phydev) { - int temp; int err; err = m88e1111_config_init_rgmii_delays(phydev); @@ -1014,15 +967,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, 0x1e); - if (temp < 0) - return temp; - - temp &= 0xf03f; - temp |= 2 << 9; /* 36 ohm */ - temp |= 2 << 6; /* 39 ohm */ - - err = phy_write(phydev, 0x1e, temp); + err = phy_modify(phydev, 0x1e, 0x0fc0, + 2 << 9 | /* 36 ohm */ + 2 << 6); /* 39 ohm */ if (err < 0) return err; @@ -1398,100 +1345,98 @@ static int m88e1121_did_interrupt(struct phy_device *phydev) static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { + int oldpage, ret = 0; + wol->supported = WAKE_MAGIC; wol->wolopts = 0; - if (marvell_set_page(phydev, MII_MARVELL_WOL_PAGE) < 0) - return; + oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE); + if (oldpage < 0) + goto error; - if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) & - MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE) + ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL); + if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE) wol->wolopts |= WAKE_MAGIC; - if (marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE) < 0) - return; +error: + phy_restore_page(phydev, oldpage, ret); } static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { - int err, oldpage, temp; + int err = 0, oldpage; - oldpage = marvell_get_page(phydev); + oldpage = phy_save_page(phydev); + if (oldpage < 0) + goto error; if (wol->wolopts & WAKE_MAGIC) { /* Explicitly switch to page 0x00, just to be sure */ - err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_COPPER_PAGE); if (err < 0) - return err; + goto error; /* Enable the WOL interrupt */ - temp = phy_read(phydev, MII_88E1318S_PHY_CSIER); - temp |= MII_88E1318S_PHY_CSIER_WOL_EIE; - err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, + MII_88E1318S_PHY_CSIER_WOL_EIE); if (err < 0) - return err; + goto error; - err = marvell_set_page(phydev, MII_MARVELL_LED_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_LED_PAGE); if (err < 0) - return err; + goto error; /* Setup LED[2] as interrupt pin (active low) */ - temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR); - temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT; - temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE; - temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW; - err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR, + MII_88E1318S_PHY_LED_TCR_FORCE_INT, + MII_88E1318S_PHY_LED_TCR_INTn_ENABLE | + MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW); if (err < 0) - return err; + goto error; - err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); if (err < 0) - return err; + goto error; /* Store the device address for the magic packet */ - err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2, + err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2, ((phydev->attached_dev->dev_addr[5] << 8) | phydev->attached_dev->dev_addr[4])); if (err < 0) - return err; - err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1, + goto error; + err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1, ((phydev->attached_dev->dev_addr[3] << 8) | phydev->attached_dev->dev_addr[2])); if (err < 0) - return err; - err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0, + goto error; + err = __phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0, ((phydev->attached_dev->dev_addr[1] << 8) | phydev->attached_dev->dev_addr[0])); if (err < 0) - return err; + goto error; /* Clear WOL status and enable magic packet matching */ - temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL); - temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS; - temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE; - err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, 0, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS | + MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE); if (err < 0) - return err; + goto error; } else { - err = marvell_set_page(phydev, MII_MARVELL_WOL_PAGE); + err = marvell_write_page(phydev, MII_MARVELL_WOL_PAGE); if (err < 0) - return err; + goto error; /* Clear WOL status and disable magic packet matching */ - temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL); - temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS; - temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE; - err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp); + err = __phy_modify(phydev, MII_88E1318S_PHY_WOL_CTRL, + MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS); if (err < 0) - return err; + goto error; } - err = marvell_set_page(phydev, oldpage); - if (err < 0) - return err; - - return 0; +error: + return phy_restore_page(phydev, oldpage, err); } static int marvell_get_sset_count(struct phy_device *phydev) @@ -1519,14 +1464,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) { struct marvell_hw_stat stat = marvell_hw_stats[i]; struct marvell_priv *priv = phydev->priv; - int oldpage, val; + int val; u64 ret; - oldpage = marvell_get_set_page(phydev, stat.page); - if (oldpage < 0) - return UINT64_MAX; - - val = phy_read(phydev, stat.reg); + val = phy_read_paged(phydev, stat.page, stat.reg); if (val < 0) { ret = UINT64_MAX; } else { @@ -1535,8 +1476,6 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) ret = priv->stats[i]; } - marvell_set_page(phydev, oldpage); - return ret; } @@ -1553,51 +1492,44 @@ static void marvell_get_stats(struct phy_device *phydev, static int m88e1121_get_temp(struct phy_device *phydev, long *temp) { int oldpage; - int ret; + int ret = 0; int val; *temp = 0; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } + oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE); + if (oldpage < 0) + goto error; /* Enable temperature sensor */ - ret = phy_read(phydev, MII_88E1121_MISC_TEST); + ret = __phy_read(phydev, MII_88E1121_MISC_TEST); if (ret < 0) goto error; - ret = phy_write(phydev, MII_88E1121_MISC_TEST, - ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + ret = __phy_write(phydev, MII_88E1121_MISC_TEST, + ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); if (ret < 0) goto error; /* Wait for temperature to stabilize */ usleep_range(10000, 12000); - val = phy_read(phydev, MII_88E1121_MISC_TEST); + val = __phy_read(phydev, MII_88E1121_MISC_TEST); if (val < 0) { ret = val; goto error; } /* Disable temperature sensor */ - ret = phy_write(phydev, MII_88E1121_MISC_TEST, - ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); + ret = __phy_write(phydev, MII_88E1121_MISC_TEST, + ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN); if (ret < 0) goto error; *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000; error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return phy_restore_page(phydev, oldpage, ret); } static int m88e1121_hwmon_read(struct device *dev, @@ -1671,118 +1603,64 @@ static const struct hwmon_chip_info m88e1121_hwmon_chip_info = { static int m88e1510_get_temp(struct phy_device *phydev, long *temp) { - int oldpage; int ret; *temp = 0; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR); + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1510_TEMP_SENSOR); if (ret < 0) - goto error; + return ret; *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000; -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return 0; } static int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp) { - int oldpage; int ret; *temp = 0; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1121_MISC_TEST); + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1121_MISC_TEST); if (ret < 0) - goto error; + return ret; *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >> MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25; /* convert to mC */ *temp *= 1000; -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return 0; } static int m88e1510_set_temp_critical(struct phy_device *phydev, long temp) { - int oldpage; - int ret; - - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1121_MISC_TEST); - if (ret < 0) - goto error; - temp = temp / 1000; temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); - ret = phy_write(phydev, MII_88E1121_MISC_TEST, - (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) | - (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT)); -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); - - return ret; + return phy_modify_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1121_MISC_TEST, + MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK, + temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT); } static int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm) { - int oldpage; int ret; *alarm = false; - mutex_lock(&phydev->lock); - - oldpage = marvell_get_set_page(phydev, MII_MARVELL_MISC_TEST_PAGE); - if (oldpage < 0) { - mutex_unlock(&phydev->lock); - return oldpage; - } - - ret = phy_read(phydev, MII_88E1121_MISC_TEST); + ret = phy_read_paged(phydev, MII_MARVELL_MISC_TEST_PAGE, + MII_88E1121_MISC_TEST); if (ret < 0) - goto error; - *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); + return ret; -error: - marvell_set_page(phydev, oldpage); - mutex_unlock(&phydev->lock); + *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ); - return ret; + return 0; } static int m88e1510_hwmon_read(struct device *dev, @@ -1871,6 +1749,123 @@ static const struct hwmon_chip_info m88e1510_hwmon_chip_info = { .info = m88e1510_hwmon_info, }; +static int m88e6390_get_temp(struct phy_device *phydev, long *temp) +{ + int sum = 0; + int oldpage; + int ret = 0; + int i; + + *temp = 0; + + oldpage = phy_select_page(phydev, MII_MARVELL_MISC_TEST_PAGE); + if (oldpage < 0) + goto error; + + /* Enable temperature sensor */ + ret = __phy_read(phydev, MII_88E6390_MISC_TEST); + if (ret < 0) + goto error; + + ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK; + ret |= MII_88E6390_MISC_TEST_SAMPLE_ENABLE | + MII_88E6390_MISC_TEST_SAMPLE_1S; + + ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + /* Reading the temperature sense has an errata. You need to read + * a number of times and take an average. + */ + for (i = 0; i < MII_88E6390_TEMP_SENSOR_SAMPLES; i++) { + ret = __phy_read(phydev, MII_88E6390_TEMP_SENSOR); + if (ret < 0) + goto error; + sum += ret & MII_88E6390_TEMP_SENSOR_MASK; + } + + sum /= MII_88E6390_TEMP_SENSOR_SAMPLES; + *temp = (sum - 75) * 1000; + + /* Disable temperature sensor */ + ret = __phy_read(phydev, MII_88E6390_MISC_TEST); + if (ret < 0) + goto error; + + ret = ret & ~MII_88E6390_MISC_TEST_SAMPLE_MASK; + ret |= MII_88E6390_MISC_TEST_SAMPLE_DISABLE; + + ret = __phy_write(phydev, MII_88E6390_MISC_TEST, ret); + +error: + phy_restore_page(phydev, oldpage, ret); + + return ret; +} + +static int m88e6390_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + struct phy_device *phydev = dev_get_drvdata(dev); + int err; + + switch (attr) { + case hwmon_temp_input: + err = m88e6390_get_temp(phydev, temp); + break; + default: + return -EOPNOTSUPP; + } + + return err; +} + +static umode_t m88e6390_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + default: + return 0; + } +} + +static u32 m88e6390_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0 +}; + +static const struct hwmon_channel_info m88e6390_hwmon_temp = { + .type = hwmon_temp, + .config = m88e6390_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *m88e6390_hwmon_info[] = { + &m88e1121_hwmon_chip, + &m88e6390_hwmon_temp, + NULL +}; + +static const struct hwmon_ops m88e6390_hwmon_hwmon_ops = { + .is_visible = m88e6390_hwmon_is_visible, + .read = m88e6390_hwmon_read, +}; + +static const struct hwmon_chip_info m88e6390_hwmon_chip_info = { + .ops = &m88e6390_hwmon_hwmon_ops, + .info = m88e6390_hwmon_info, +}; + static int marvell_hwmon_name(struct phy_device *phydev) { struct marvell_priv *priv = phydev->priv; @@ -1917,6 +1912,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev) { return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info); } + +static int m88e6390_hwmon_probe(struct phy_device *phydev) +{ + return marvell_hwmon_probe(phydev, &m88e6390_hwmon_chip_info); +} #else static int m88e1121_hwmon_probe(struct phy_device *phydev) { @@ -1927,6 +1927,11 @@ static int m88e1510_hwmon_probe(struct phy_device *phydev) { return 0; } + +static int m88e6390_hwmon_probe(struct phy_device *phydev) +{ + return 0; +} #endif static int marvell_probe(struct phy_device *phydev) @@ -1964,6 +1969,17 @@ static int m88e1510_probe(struct phy_device *phydev) return m88e1510_hwmon_probe(phydev); } +static int m88e6390_probe(struct phy_device *phydev) +{ + int err; + + err = marvell_probe(phydev); + if (err) + return err; + + return m88e6390_hwmon_probe(phydev); +} + static struct phy_driver marvell_drivers[] = { { .phy_id = MARVELL_PHY_ID_88E1101, @@ -1978,6 +1994,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -1995,6 +2013,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2013,6 +2033,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2030,6 +2052,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2049,6 +2073,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2070,6 +2096,8 @@ static struct phy_driver marvell_drivers[] = { .set_wol = &m88e1318_set_wol, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2088,6 +2116,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2105,6 +2135,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2122,6 +2154,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2138,6 +2172,8 @@ static struct phy_driver marvell_drivers[] = { .config_intr = &marvell_config_intr, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2159,6 +2195,8 @@ static struct phy_driver marvell_drivers[] = { .set_wol = &m88e1318_set_wol, .resume = &marvell_resume, .suspend = &marvell_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2179,6 +2217,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2198,6 +2238,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2217,6 +2259,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, @@ -2227,7 +2271,7 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E6390", .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .probe = m88e1510_probe, + .probe = m88e6390_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2236,6 +2280,8 @@ static struct phy_driver marvell_drivers[] = { .did_interrupt = &m88e1121_did_interrupt, .resume = &genphy_resume, .suspend = &genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index f0cfba4e758b..8a0bd98fdec7 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -6,12 +6,18 @@ * * There appears to be several different data paths through the PHY which * are automatically managed by the PHY. The following has been determined - * via observation and experimentation: + * via observation and experimentation for a setup using single-lane Serdes: * * SGMII PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for <= 1G) * 10GBASE-KR PHYXS -- BASE-T PCS -- 10G PMA -- AN -- Copper (for 10G) * 10GBASE-KR PHYXS -- BASE-R PCS -- Fiber * + * With XAUI, observation shows: + * + * XAUI PHYXS -- <appropriate PCS as above> + * + * and no switching of the host interface mode occurs. + * * If both the fiber and copper ports are connected, the first to gain * link takes priority and the other port is completely locked out. */ @@ -23,19 +29,17 @@ enum { MV_PCS_BASE_R = 0x1000, MV_PCS_1000BASEX = 0x2000, + MV_PCS_PAIRSWAP = 0x8182, + MV_PCS_PAIRSWAP_MASK = 0x0003, + MV_PCS_PAIRSWAP_AB = 0x0002, + MV_PCS_PAIRSWAP_NONE = 0x0003, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. */ MV_AN_CTRL1000 = 0x8000, /* 1000base-T control register */ MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */ - - /* This register appears to reflect the copper status */ - MV_AN_RESULT = 0xa016, - MV_AN_RESULT_SPD_10 = BIT(12), - MV_AN_RESULT_SPD_100 = BIT(13), - MV_AN_RESULT_SPD_1000 = BIT(14), - MV_AN_RESULT_SPD_10000 = BIT(15), }; static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, @@ -149,12 +153,18 @@ static int mv3310_config_init(struct phy_device *phydev) if (val & MDIO_PMA_EXTABLE_1000BKX) __set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, supported); - if (val & MDIO_PMA_EXTABLE_100BTX) + if (val & MDIO_PMA_EXTABLE_100BTX) { __set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, supported); - if (val & MDIO_PMA_EXTABLE_10BT) + __set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + supported); + } + if (val & MDIO_PMA_EXTABLE_10BT) { __set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported); + __set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + supported); + } } if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported)) @@ -174,6 +184,9 @@ static int mv3310_config_aneg(struct phy_device *phydev) u32 advertising; int ret; + /* We don't support manual MDI control */ + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + if (phydev->autoneg == AUTONEG_DISABLE) { ret = genphy_c45_pma_setup_forced(phydev); if (ret < 0) @@ -232,6 +245,24 @@ static int mv3310_aneg_done(struct phy_device *phydev) return genphy_c45_aneg_done(phydev); } +static void mv3310_update_interface(struct phy_device *phydev) +{ + if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || + phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) { + /* The PHY automatically switches its serdes interface (and + * active PHYXS instance) between Cisco SGMII and 10GBase-KR + * modes according to the speed. Florian suggests setting + * phydev->interface to communicate this to the MAC. Only do + * this if we are already in either SGMII or 10GBase-KR mode. + */ + if (phydev->speed == SPEED_10000) + phydev->interface = PHY_INTERFACE_MODE_10GKR; + else if (phydev->speed >= SPEED_10 && + phydev->speed < SPEED_10000) + phydev->interface = PHY_INTERFACE_MODE_SGMII; + } +} + /* 10GBASE-ER,LR,LRM,SR do not support autonegotiation. */ static int mv3310_read_10gbr_status(struct phy_device *phydev) { @@ -239,8 +270,7 @@ static int mv3310_read_10gbr_status(struct phy_device *phydev) phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) - phydev->interface = PHY_INTERFACE_MODE_10GKR; + mv3310_update_interface(phydev); return 0; } @@ -263,6 +293,7 @@ static int mv3310_read_status(struct phy_device *phydev) phydev->link = 0; phydev->pause = 0; phydev->asym_pause = 0; + phydev->mdix = 0; val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_BASE_R + MDIO_STAT1); if (val < 0) @@ -293,22 +324,8 @@ static int mv3310_read_status(struct phy_device *phydev) phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val); - if (phydev->autoneg == AUTONEG_ENABLE) { - val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_RESULT); - if (val < 0) - return val; - - if (val & MV_AN_RESULT_SPD_10000) - phydev->speed = SPEED_10000; - else if (val & MV_AN_RESULT_SPD_1000) - phydev->speed = SPEED_1000; - else if (val & MV_AN_RESULT_SPD_100) - phydev->speed = SPEED_100; - else if (val & MV_AN_RESULT_SPD_10) - phydev->speed = SPEED_10; - - phydev->duplex = DUPLEX_FULL; - } + if (phydev->autoneg == AUTONEG_ENABLE) + phy_resolve_aneg_linkmode(phydev); } if (phydev->autoneg != AUTONEG_ENABLE) { @@ -317,21 +334,30 @@ static int mv3310_read_status(struct phy_device *phydev) return val; } - if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || - phydev->interface == PHY_INTERFACE_MODE_10GKR) && phydev->link) { - /* The PHY automatically switches its serdes interface (and - * active PHYXS instance) between Cisco SGMII and 10GBase-KR - * modes according to the speed. Florian suggests setting - * phydev->interface to communicate this to the MAC. Only do - * this if we are already in either SGMII or 10GBase-KR mode. - */ - if (phydev->speed == SPEED_10000) - phydev->interface = PHY_INTERFACE_MODE_10GKR; - else if (phydev->speed >= SPEED_10 && - phydev->speed < SPEED_10000) - phydev->interface = PHY_INTERFACE_MODE_SGMII; + if (phydev->speed == SPEED_10000) { + val = genphy_c45_read_mdix(phydev); + if (val < 0) + return val; + } else { + val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PAIRSWAP); + if (val < 0) + return val; + + switch (val & MV_PCS_PAIRSWAP_MASK) { + case MV_PCS_PAIRSWAP_AB: + phydev->mdix = ETH_TP_MDI_X; + break; + case MV_PCS_PAIRSWAP_NONE: + phydev->mdix = ETH_TP_MDI; + break; + default: + phydev->mdix = ETH_TP_MDI_INVALID; + break; + } } + mv3310_update_interface(phydev); + return 0; } @@ -341,7 +367,9 @@ static struct phy_driver mv3310_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "mv88x3310", .features = SUPPORTED_10baseT_Full | + SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 08e0647b85e2..8d370667fa1b 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -205,6 +205,8 @@ static int unimac_mdio_probe(struct platform_device *pdev) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) + return -EINVAL; /* Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c index 135296508a7e..6425ce04d3f9 100644 --- a/drivers/net/phy/mdio-sun4i.c +++ b/drivers/net/phy/mdio-sun4i.c @@ -118,8 +118,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev) data->regulator = devm_regulator_get(&pdev->dev, "phy"); if (IS_ERR(data->regulator)) { - if (PTR_ERR(data->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(data->regulator) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_out_free_mdiobus; + } dev_info(&pdev->dev, "no regulator found\n"); data->regulator = NULL; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index a0f34c385cad..88272b3ac2e2 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -47,13 +47,10 @@ #include "mdio-boardinfo.h" -int mdiobus_register_device(struct mdio_device *mdiodev) +static int mdiobus_register_gpiod(struct mdio_device *mdiodev) { struct gpio_desc *gpiod = NULL; - if (mdiodev->bus->mdio_map[mdiodev->addr]) - return -EBUSY; - /* Deassert the optional reset signal */ if (mdiodev->dev.of_node) gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode, @@ -69,6 +66,22 @@ int mdiobus_register_device(struct mdio_device *mdiodev) /* Assert the reset signal again */ mdio_device_reset(mdiodev, 1); + return 0; +} + +int mdiobus_register_device(struct mdio_device *mdiodev) +{ + int err; + + if (mdiodev->bus->mdio_map[mdiodev->addr]) + return -EBUSY; + + if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) { + err = mdiobus_register_gpiod(mdiodev); + if (err) + return err; + } + mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; return 0; @@ -515,6 +528,55 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) EXPORT_SYMBOL(mdiobus_scan); /** + * __mdiobus_read - Unlocked version of the mdiobus_read function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to read + * + * Read a MDIO bus register. Caller must hold the mdio bus lock. + * + * NOTE: MUST NOT be called from interrupt context. + */ +int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) +{ + int retval; + + WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock)); + + retval = bus->read(bus, addr, regnum); + + trace_mdio_access(bus, 1, addr, regnum, retval, retval); + + return retval; +} +EXPORT_SYMBOL(__mdiobus_read); + +/** + * __mdiobus_write - Unlocked version of the mdiobus_write function + * @bus: the mii_bus struct + * @addr: the phy address + * @regnum: register number to write + * @val: value to write to @regnum + * + * Write a MDIO bus register. Caller must hold the mdio bus lock. + * + * NOTE: MUST NOT be called from interrupt context. + */ +int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) +{ + int err; + + WARN_ON_ONCE(!mutex_is_locked(&bus->mdio_lock)); + + err = bus->write(bus, addr, regnum, val); + + trace_mdio_access(bus, 0, addr, regnum, val, err); + + return err; +} +EXPORT_SYMBOL(__mdiobus_write); + +/** * mdiobus_read_nested - Nested version of the mdiobus_read function * @bus: the mii_bus struct * @addr: the phy address @@ -534,11 +596,9 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) BUG_ON(in_interrupt()); mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - retval = bus->read(bus, addr, regnum); + retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 1, addr, regnum, retval, retval); - return retval; } EXPORT_SYMBOL(mdiobus_read_nested); @@ -560,11 +620,9 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); - retval = bus->read(bus, addr, regnum); + retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 1, addr, regnum, retval, retval); - return retval; } EXPORT_SYMBOL(mdiobus_read); @@ -590,11 +648,9 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) BUG_ON(in_interrupt()); mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); - err = bus->write(bus, addr, regnum, val); + err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 0, addr, regnum, val, err); - return err; } EXPORT_SYMBOL(mdiobus_write_nested); @@ -617,11 +673,9 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); - err = bus->write(bus, addr, regnum, val); + err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); - trace_mdio_access(bus, 0, addr, regnum, val, err); - return err; } EXPORT_SYMBOL(mdiobus_write); diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 843c1dde93e4..c924700cf37b 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -126,7 +126,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value) gpiod_set_value(mdiodev->reset, value); - d = value ? mdiodev->reset_delay : mdiodev->reset_post_delay; + d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay; if (d) usleep_range(d, d + max_t(unsigned int, d / 10, 100)); } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index fd500b18e77f..0f45310300f6 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev) phydev->link = 0; if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev)) phydev->drv->config_intr(phydev); + return genphy_config_aneg(phydev); } return 0; diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index dada819c6b78..a4576859afae 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -233,6 +233,39 @@ int genphy_c45_read_pma(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(genphy_c45_read_pma); +/** + * genphy_c45_read_mdix - read mdix status from PMA + * @phydev: target phy_device struct + */ +int genphy_c45_read_mdix(struct phy_device *phydev) +{ + int val; + + if (phydev->speed == SPEED_10000) { + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, + MDIO_PMA_10GBT_SWAPPOL); + if (val < 0) + return val; + + switch (val) { + case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX: + phydev->mdix = ETH_TP_MDI; + break; + + case 0: + phydev->mdix = ETH_TP_MDI_X; + break; + + default: + phydev->mdix = ETH_TP_MDI_INVALID; + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(genphy_c45_read_mdix); + /* The gen10g_* functions are the old Clause 45 stub */ static int gen10g_config_aneg(struct phy_device *phydev) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 21f75ae244b3..e75989ce8850 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -189,17 +189,61 @@ size_t phy_speeds(unsigned int *speeds, size_t size, return count; } +/** + * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings + * @phydev: The phy_device struct + * + * Resolve our and the link partner advertisments into their corresponding + * speed and duplex. If full duplex was negotiated, extract the pause mode + * from the link partner mask. + */ +void phy_resolve_aneg_linkmode(struct phy_device *phydev) +{ + u32 common = phydev->lp_advertising & phydev->advertising; + + if (common & ADVERTISED_10000baseT_Full) { + phydev->speed = SPEED_10000; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_1000baseT_Full) { + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_1000baseT_Half) { + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_HALF; + } else if (common & ADVERTISED_100baseT_Full) { + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_100baseT_Half) { + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_HALF; + } else if (common & ADVERTISED_10baseT_Full) { + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_FULL; + } else if (common & ADVERTISED_10baseT_Half) { + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_HALF; + } + + if (phydev->duplex == DUPLEX_FULL) { + phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause); + phydev->asym_pause = !!(phydev->lp_advertising & + ADVERTISED_Asym_Pause); + } +} +EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode); + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { /* Write the desired MMD Devad */ - bus->write(bus, phy_addr, MII_MMD_CTRL, devad); + __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devad); /* Write the desired MMD register address */ - bus->write(bus, phy_addr, MII_MMD_DATA, regnum); + __mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum); /* Select the Function : DATA with no post increment */ - bus->write(bus, phy_addr, MII_MMD_CTRL, devad | MII_MMD_CTRL_NOINCR); + __mdiobus_write(bus, phy_addr, MII_MMD_CTRL, + devad | MII_MMD_CTRL_NOINCR); } /** @@ -232,7 +276,7 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum) mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Read the content of the MMD's selected register */ - val = bus->read(bus, phy_addr, MII_MMD_DATA); + val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA); mutex_unlock(&bus->mdio_lock); } return val; @@ -271,7 +315,7 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) mmd_phy_indirect(bus, phy_addr, devad, regnum); /* Write the data into MMD's selected register */ - bus->write(bus, phy_addr, MII_MMD_DATA, val); + __mdiobus_write(bus, phy_addr, MII_MMD_DATA, val); mutex_unlock(&bus->mdio_lock); ret = 0; @@ -279,3 +323,208 @@ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val) return ret; } EXPORT_SYMBOL(phy_write_mmd); + +/** + * __phy_modify() - Convenience function for modifying a PHY register + * @phydev: a pointer to a &struct phy_device + * @regnum: register number + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Unlocked helper function which allows a PHY register to be modified as + * new register value = (old register value & ~mask) | set + */ +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret, res; + + ret = __phy_read(phydev, regnum); + if (ret >= 0) { + res = __phy_write(phydev, regnum, (ret & ~mask) | set); + if (res < 0) + ret = res; + } + + return ret; +} +EXPORT_SYMBOL_GPL(__phy_modify); + +/** + * phy_modify - Convenience function for modifying a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @mask: bit mask of bits to clear + * @set: new value of bits set in mask to write to @regnum + * + * NOTE: MUST NOT be called from interrupt context, + * because the bus read/write functions may wait for an interrupt + * to conclude the operation. + */ +int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set) +{ + int ret; + + mutex_lock(&phydev->mdio.bus->mdio_lock); + ret = __phy_modify(phydev, regnum, mask, set); + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_modify); + +static int __phy_read_page(struct phy_device *phydev) +{ + return phydev->drv->read_page(phydev); +} + +static int __phy_write_page(struct phy_device *phydev, int page) +{ + return phydev->drv->write_page(phydev, page); +} + +/** + * phy_save_page() - take the bus lock and save the current page + * @phydev: a pointer to a &struct phy_device + * + * Take the MDIO bus lock, and return the current page number. On error, + * returns a negative errno. phy_restore_page() must always be called + * after this, irrespective of success or failure of this call. + */ +int phy_save_page(struct phy_device *phydev) +{ + mutex_lock(&phydev->mdio.bus->mdio_lock); + return __phy_read_page(phydev); +} +EXPORT_SYMBOL_GPL(phy_save_page); + +/** + * phy_select_page() - take the bus lock, save the current page, and set a page + * @phydev: a pointer to a &struct phy_device + * @page: desired page + * + * Take the MDIO bus lock to protect against concurrent access, save the + * current PHY page, and set the current page. On error, returns a + * negative errno, otherwise returns the previous page number. + * phy_restore_page() must always be called after this, irrespective + * of success or failure of this call. + */ +int phy_select_page(struct phy_device *phydev, int page) +{ + int ret, oldpage; + + oldpage = ret = phy_save_page(phydev); + if (ret < 0) + return ret; + + if (oldpage != page) { + ret = __phy_write_page(phydev, page); + if (ret < 0) + return ret; + } + + return oldpage; +} +EXPORT_SYMBOL_GPL(phy_select_page); + +/** + * phy_restore_page() - restore the page register and release the bus lock + * @phydev: a pointer to a &struct phy_device + * @oldpage: the old page, return value from phy_save_page() or phy_select_page() + * @ret: operation's return code + * + * Release the MDIO bus lock, restoring @oldpage if it is a valid page. + * This function propagates the earliest error code from the group of + * operations. + * + * Returns: + * @oldpage if it was a negative value, otherwise + * @ret if it was a negative errno value, otherwise + * phy_write_page()'s negative value if it were in error, otherwise + * @ret. + */ +int phy_restore_page(struct phy_device *phydev, int oldpage, int ret) +{ + int r; + + if (oldpage >= 0) { + r = __phy_write_page(phydev, oldpage); + + /* Propagate the operation return code if the page write + * was successful. + */ + if (ret >= 0 && r < 0) + ret = r; + } else { + /* Propagate the phy page selection error code */ + ret = oldpage; + } + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(phy_restore_page); + +/** + * phy_read_paged() - Convenience function for reading a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * + * Same rules as for phy_read(). + */ +int phy_read_paged(struct phy_device *phydev, int page, u32 regnum) +{ + int ret = 0, oldpage; + + oldpage = phy_select_page(phydev, page); + if (oldpage >= 0) + ret = __phy_read(phydev, regnum); + + return phy_restore_page(phydev, oldpage, ret); +} +EXPORT_SYMBOL(phy_read_paged); + +/** + * phy_write_paged() - Convenience function for writing a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * @val: value to write + * + * Same rules as for phy_write(). + */ +int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val) +{ + int ret = 0, oldpage; + + oldpage = phy_select_page(phydev, page); + if (oldpage >= 0) + ret = __phy_write(phydev, regnum, val); + + return phy_restore_page(phydev, oldpage, ret); +} +EXPORT_SYMBOL(phy_write_paged); + +/** + * phy_modify_paged() - Convenience function for modifying a paged register + * @phydev: a pointer to a &struct phy_device + * @page: the page for the phy + * @regnum: register number + * @mask: bit mask of bits to clear + * @set: bit mask of bits to set + * + * Same rules as for phy_read() and phy_write(). + */ +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set) +{ + int ret = 0, oldpage; + + oldpage = phy_select_page(phydev, page); + if (oldpage >= 0) + ret = __phy_modify(phydev, regnum, mask, set); + + return phy_restore_page(phydev, oldpage, ret); +} +EXPORT_SYMBOL(phy_modify_paged); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index be13b5d6a8bf..6bd11a070ec8 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1368,9 +1368,8 @@ static int genphy_config_eee_advert(struct phy_device *phydev) */ int genphy_setup_forced(struct phy_device *phydev) { - int ctl = phy_read(phydev, MII_BMCR); + u16 ctl = 0; - ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN; phydev->pause = 0; phydev->asym_pause = 0; @@ -1382,7 +1381,8 @@ int genphy_setup_forced(struct phy_device *phydev) if (DUPLEX_FULL == phydev->duplex) ctl |= BMCR_FULLDPLX; - return phy_write(phydev, MII_BMCR, ctl); + return phy_modify(phydev, MII_BMCR, + BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); } EXPORT_SYMBOL(genphy_setup_forced); @@ -1392,17 +1392,9 @@ EXPORT_SYMBOL(genphy_setup_forced); */ int genphy_restart_aneg(struct phy_device *phydev) { - int ctl = phy_read(phydev, MII_BMCR); - - if (ctl < 0) - return ctl; - - ctl |= BMCR_ANENABLE | BMCR_ANRESTART; - /* Don't isolate the PHY if we're negotiating */ - ctl &= ~BMCR_ISOLATE; - - return phy_write(phydev, MII_BMCR, ctl); + return phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, + BMCR_ANENABLE | BMCR_ANRESTART); } EXPORT_SYMBOL(genphy_restart_aneg); @@ -1668,44 +1660,20 @@ EXPORT_SYMBOL(genphy_config_init); int genphy_suspend(struct phy_device *phydev) { - int value; - - mutex_lock(&phydev->lock); - - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value | BMCR_PDOWN); - - mutex_unlock(&phydev->lock); - - return 0; + return phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN); } EXPORT_SYMBOL(genphy_suspend); int genphy_resume(struct phy_device *phydev) { - int value; - - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); - - return 0; + return phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0); } EXPORT_SYMBOL(genphy_resume); int genphy_loopback(struct phy_device *phydev, bool enable) { - int value; - - value = phy_read(phydev, MII_BMCR); - if (value < 0) - return value; - - if (enable) - value |= BMCR_LOOPBACK; - else - value &= ~BMCR_LOOPBACK; - - return phy_write(phydev, MII_BMCR, value); + return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, + enable ? BMCR_LOOPBACK : 0); } EXPORT_SYMBOL(genphy_loopback); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 2ec140ec7923..6ac8b29b2dc3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -567,6 +567,7 @@ struct phylink *phylink_create(struct net_device *ndev, pl->link_config.pause = MLO_PAUSE_AN; pl->link_config.speed = SPEED_UNKNOWN; pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->link_config.an_enabled = true; pl->ops = ops; __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); @@ -1139,6 +1140,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, mutex_lock(&pl->state_mutex); /* Configure the MAC to match the new settings */ linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.interface = config.interface; pl->link_config.speed = our_kset.base.speed; pl->link_config.duplex = our_kset.base.duplex; pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; @@ -1523,6 +1525,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii->phy_id = pl->phydev->mdio.addr; + /* fall through */ case SIOCGMIIREG: ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); @@ -1545,6 +1548,7 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCGMIIPHY: mii->phy_id = 0; + /* fall through */ case SIOCGMIIREG: ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); @@ -1576,7 +1580,7 @@ static int phylink_sfp_module_insert(void *upstream, __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; struct phylink_link_state config; phy_interface_t iface; - int mode, ret = 0; + int ret = 0; bool changed; u8 port; @@ -1591,7 +1595,6 @@ static int phylink_sfp_module_insert(void *upstream, case PHY_INTERFACE_MODE_1000BASEX: case PHY_INTERFACE_MODE_2500BASEX: case PHY_INTERFACE_MODE_10GKR: - mode = MLO_AN_INBAND; break; default: return -EINVAL; @@ -1609,13 +1612,15 @@ static int phylink_sfp_module_insert(void *upstream, ret = phylink_validate(pl, support, &config); if (ret) { netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", - phylink_an_mode_str(mode), phy_modes(config.interface), + phylink_an_mode_str(MLO_AN_INBAND), + phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); return ret; } netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", - phylink_an_mode_str(mode), phy_modes(config.interface), + phylink_an_mode_str(MLO_AN_INBAND), + phy_modes(config.interface), __ETHTOOL_LINK_MODE_MASK_NBITS, support); if (phy_interface_mode_is_8023z(iface) && pl->phydev) @@ -1628,15 +1633,15 @@ static int phylink_sfp_module_insert(void *upstream, linkmode_copy(pl->link_config.advertising, config.advertising); } - if (pl->link_an_mode != mode || + if (pl->link_an_mode != MLO_AN_INBAND || pl->link_config.interface != config.interface) { pl->link_config.interface = config.interface; - pl->link_an_mode = mode; + pl->link_an_mode = MLO_AN_INBAND; changed = true; netdev_info(pl->netdev, "switched to %s/%s link mode\n", - phylink_an_mode_str(mode), + phylink_an_mode_str(MLO_AN_INBAND), phy_modes(config.interface)); } @@ -1656,9 +1661,8 @@ static void phylink_sfp_link_down(void *upstream) ASSERT_RTNL(); set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + queue_work(system_power_efficient_wq, &pl->resolve); flush_work(&pl->resolve); - - netif_carrier_off(pl->netdev); } static void phylink_sfp_link_up(void *upstream) diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index 1356dba0d9d3..bdc4bb3c8288 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -57,21 +57,19 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFP_CONNECTOR_MT_RJ: case SFP_CONNECTOR_MU: case SFP_CONNECTOR_OPTICAL_PIGTAIL: - if (support) - phylink_set(support, FIBRE); port = PORT_FIBRE; break; case SFP_CONNECTOR_RJ45: - if (support) - phylink_set(support, TP); port = PORT_TP; break; + case SFP_CONNECTOR_COPPER_PIGTAIL: + port = PORT_DA; + break; + case SFP_CONNECTOR_UNSPEC: if (id->base.e1000_base_t) { - if (support) - phylink_set(support, TP); port = PORT_TP; break; } @@ -80,7 +78,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, case SFP_CONNECTOR_MPO_1X12: case SFP_CONNECTOR_MPO_2X16: case SFP_CONNECTOR_HSSDC_II: - case SFP_CONNECTOR_COPPER_PIGTAIL: case SFP_CONNECTOR_NOSEPARATE: case SFP_CONNECTOR_MXC_2X16: port = PORT_OTHER; @@ -92,6 +89,18 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, break; } + if (support) { + switch (port) { + case PORT_FIBRE: + phylink_set(support, FIBRE); + break; + + case PORT_TP: + phylink_set(support, TP); + break; + } + } + return port; } EXPORT_SYMBOL_GPL(sfp_parse_port); @@ -143,6 +152,11 @@ phy_interface_t sfp_parse_interface(struct sfp_bus *bus, break; default: + if (id->base.e1000_base_cx) { + iface = PHY_INTERFACE_MODE_1000BASEX; + break; + } + iface = PHY_INTERFACE_MODE_NA; dev_err(bus->sfp_dev, "SFP module encoding does not support 8b10b nor 64b66b\n"); @@ -165,10 +179,26 @@ EXPORT_SYMBOL_GPL(sfp_parse_interface); void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support) { + unsigned int br_min, br_nom, br_max; + phylink_set(support, Autoneg); phylink_set(support, Pause); phylink_set(support, Asym_Pause); + /* Decode the bitrate information to MBd */ + br_min = br_nom = br_max = 0; + if (id->base.br_nominal) { + if (id->base.br_nominal != 255) { + br_nom = id->base.br_nominal * 100; + br_min = br_nom + id->base.br_nominal * id->ext.br_min; + br_max = br_nom + id->base.br_nominal * id->ext.br_max; + } else if (id->ext.br_max) { + br_nom = 250 * id->ext.br_max; + br_max = br_nom + br_nom * id->ext.br_min / 100; + br_min = br_nom - br_nom * id->ext.br_min / 100; + } + } + /* Set ethtool support from the compliance fields. */ if (id->base.e10g_base_sr) phylink_set(support, 10000baseSR_Full); @@ -187,6 +217,34 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, phylink_set(support, 1000baseT_Full); } + /* 1000Base-PX or 1000Base-BX10 */ + if ((id->base.e_base_px || id->base.e_base_bx10) && + br_min <= 1300 && br_max >= 1200) + phylink_set(support, 1000baseX_Full); + + /* For active or passive cables, select the link modes + * based on the bit rates and the cable compliance bytes. + */ + if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) { + /* This may look odd, but some manufacturers use 12000MBd */ + if (br_min <= 12000 && br_max >= 10300) + phylink_set(support, 10000baseCR_Full); + if (br_min <= 3200 && br_max >= 3100) + phylink_set(support, 2500baseX_Full); + if (br_min <= 1300 && br_max >= 1200) + phylink_set(support, 1000baseX_Full); + } + if (id->base.sfp_ct_passive) { + if (id->base.passive.sff8431_app_e) + phylink_set(support, 10000baseCR_Full); + } + if (id->base.sfp_ct_active) { + if (id->base.active.sff8431_app_e || + id->base.active.sff8431_lim) { + phylink_set(support, 10000baseCR_Full); + } + } + switch (id->base.extended_cc) { case 0x00: /* Unspecified */ break; @@ -220,35 +278,6 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, if (id->base.br_nominal >= 12) phylink_set(support, 1000baseX_Full); } - - switch (id->base.connector) { - case SFP_CONNECTOR_SC: - case SFP_CONNECTOR_FIBERJACK: - case SFP_CONNECTOR_LC: - case SFP_CONNECTOR_MT_RJ: - case SFP_CONNECTOR_MU: - case SFP_CONNECTOR_OPTICAL_PIGTAIL: - break; - - case SFP_CONNECTOR_UNSPEC: - if (id->base.e1000_base_t) - break; - - case SFP_CONNECTOR_SG: /* guess */ - case SFP_CONNECTOR_MPO_1X12: - case SFP_CONNECTOR_MPO_2X16: - case SFP_CONNECTOR_HSSDC_II: - case SFP_CONNECTOR_COPPER_PIGTAIL: - case SFP_CONNECTOR_NOSEPARATE: - case SFP_CONNECTOR_MXC_2X16: - default: - /* a guess at the supported link modes */ - dev_warn(bus->sfp_dev, - "Guessing link modes, please report...\n"); - phylink_set(support, 1000baseT_Half); - phylink_set(support, 1000baseT_Full); - break; - } } EXPORT_SYMBOL_GPL(sfp_parse_support); @@ -460,7 +489,8 @@ EXPORT_SYMBOL_GPL(sfp_register_upstream); void sfp_unregister_upstream(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->sfp) + sfp_unregister_bus(bus); bus->upstream = NULL; bus->netdev = NULL; rtnl_unlock(); @@ -563,7 +593,8 @@ EXPORT_SYMBOL_GPL(sfp_register_socket); void sfp_unregister_socket(struct sfp_bus *bus) { rtnl_lock(); - sfp_unregister_bus(bus); + if (bus->netdev) + sfp_unregister_bus(bus); bus->sfp_dev = NULL; bus->sfp = NULL; bus->socket_ops = NULL; diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index ee6b2e041171..6c7d9289078d 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -466,11 +466,6 @@ static int sfp_sm_mod_probe(struct sfp *sfp) { /* SFP module inserted - read I2C data */ struct sfp_eeprom_id id; - char vendor[17]; - char part[17]; - char sn[17]; - char date[9]; - char rev[5]; u8 check; int err; @@ -506,19 +501,12 @@ static int sfp_sm_mod_probe(struct sfp *sfp) sfp->id = id; - memcpy(vendor, sfp->id.base.vendor_name, 16); - vendor[16] = '\0'; - memcpy(part, sfp->id.base.vendor_pn, 16); - part[16] = '\0'; - memcpy(rev, sfp->id.base.vendor_rev, 4); - rev[4] = '\0'; - memcpy(sn, sfp->id.ext.vendor_sn, 16); - sn[16] = '\0'; - memcpy(date, sfp->id.ext.datecode, 8); - date[8] = '\0'; - - dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n", - vendor, part, rev, sn, date); + dev_info(sfp->dev, "module %.*s %.*s rev %.*s sn %.*s dc %.*s\n", + (int)sizeof(id.base.vendor_name), id.base.vendor_name, + (int)sizeof(id.base.vendor_pn), id.base.vendor_pn, + (int)sizeof(id.base.vendor_rev), id.base.vendor_rev, + (int)sizeof(id.ext.vendor_sn), id.ext.vendor_sn, + (int)sizeof(id.ext.datecode), id.ext.datecode); /* Check whether we support this module */ if (!sfp->type->module_supported(&sfp->id)) { diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 0a886fda0129..7c38659b2a76 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -330,7 +330,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) if (!q) return RX_HANDLER_PASS; - if (__skb_array_full(&q->skb_array)) + if (__ptr_ring_full(&q->ring)) goto drop; skb_push(skb, ETH_HLEN); @@ -348,7 +348,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) goto drop; if (!segs) { - if (skb_array_produce(&q->skb_array, skb)) + if (ptr_ring_produce(&q->ring, skb)) goto drop; goto wake_up; } @@ -358,7 +358,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) struct sk_buff *nskb = segs->next; segs->next = NULL; - if (skb_array_produce(&q->skb_array, segs)) { + if (ptr_ring_produce(&q->ring, segs)) { kfree_skb(segs); kfree_skb_list(nskb); break; @@ -375,7 +375,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) !(features & NETIF_F_CSUM_MASK) && skb_checksum_help(skb)) goto drop; - if (skb_array_produce(&q->skb_array, skb)) + if (ptr_ring_produce(&q->ring, skb)) goto drop; } @@ -497,7 +497,7 @@ static void tap_sock_destruct(struct sock *sk) { struct tap_queue *q = container_of(sk, struct tap_queue, sk); - skb_array_cleanup(&q->skb_array); + ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); } static int tap_open(struct inode *inode, struct file *file) @@ -517,7 +517,7 @@ static int tap_open(struct inode *inode, struct file *file) &tap_proto, 0); if (!q) goto err; - if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) { + if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { sk_free(&q->sk); goto err; } @@ -546,7 +546,7 @@ static int tap_open(struct inode *inode, struct file *file) err = tap_set_queue(tap, file, q); if (err) { - /* tap_sock_destruct() will take care of freeing skb_array */ + /* tap_sock_destruct() will take care of freeing ptr_ring */ goto err_put; } @@ -583,7 +583,7 @@ static unsigned int tap_poll(struct file *file, poll_table *wait) mask = 0; poll_wait(file, &q->wq.wait, wait); - if (!skb_array_empty(&q->skb_array)) + if (!ptr_ring_empty(&q->ring)) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || @@ -844,7 +844,7 @@ static ssize_t tap_do_read(struct tap_queue *q, TASK_INTERRUPTIBLE); /* Read frames from the queue */ - skb = skb_array_consume(&q->skb_array); + skb = ptr_ring_consume(&q->ring); if (skb) break; if (noblock) { @@ -1176,7 +1176,7 @@ static int tap_peek_len(struct socket *sock) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); - return skb_array_peek_len(&q->skb_array); + return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); } /* Ops structure to mimic raw sockets with tun */ @@ -1202,7 +1202,7 @@ struct socket *tap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tap_get_socket); -struct skb_array *tap_get_skb_array(struct file *file) +struct ptr_ring *tap_get_ptr_ring(struct file *file) { struct tap_queue *q; @@ -1211,29 +1211,30 @@ struct skb_array *tap_get_skb_array(struct file *file) q = file->private_data; if (!q) return ERR_PTR(-EBADFD); - return &q->skb_array; + return &q->ring; } -EXPORT_SYMBOL_GPL(tap_get_skb_array); +EXPORT_SYMBOL_GPL(tap_get_ptr_ring); int tap_queue_resize(struct tap_dev *tap) { struct net_device *dev = tap->dev; struct tap_queue *q; - struct skb_array **arrays; + struct ptr_ring **rings; int n = tap->numqueues; int ret, i = 0; - arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); - if (!arrays) + rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); + if (!rings) return -ENOMEM; list_for_each_entry(q, &tap->queue_list, next) - arrays[i++] = &q->skb_array; + rings[i++] = &q->ring; - ret = skb_array_resize_multiple(arrays, n, - dev->tx_queue_len, GFP_KERNEL); + ret = ptr_ring_resize_multiple(rings, n, + dev->tx_queue_len, GFP_KERNEL, + __skb_array_destroy_skb); - kfree(arrays); + kfree(rings); return ret; } EXPORT_SYMBOL_GPL(tap_queue_resize); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index e367d6310353..2fba3be5719e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -179,7 +179,8 @@ struct tun_file { struct mutex napi_mutex; /* Protects access to the above napi */ struct list_head next; struct tun_struct *detached; - struct skb_array tx_array; + struct ptr_ring tx_ring; + struct xdp_rxq_info xdp_rxq; }; struct tun_flow_entry { @@ -240,6 +241,24 @@ struct tun_struct { struct tun_steering_prog __rcu *steering_prog; }; +bool tun_is_xdp_buff(void *ptr) +{ + return (unsigned long)ptr & TUN_XDP_FLAG; +} +EXPORT_SYMBOL(tun_is_xdp_buff); + +void *tun_xdp_to_ptr(void *ptr) +{ + return (void *)((unsigned long)ptr | TUN_XDP_FLAG); +} +EXPORT_SYMBOL(tun_xdp_to_ptr); + +void *tun_ptr_to_xdp(void *ptr) +{ + return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG); +} +EXPORT_SYMBOL(tun_ptr_to_xdp); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -630,12 +649,25 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile) return tun; } +static void tun_ptr_free(void *ptr) +{ + if (!ptr) + return; + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + put_page(virt_to_head_page(xdp->data)); + } else { + __skb_array_destroy_skb(ptr); + } +} + static void tun_queue_purge(struct tun_file *tfile) { - struct sk_buff *skb; + void *ptr; - while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) - kfree_skb(skb); + while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL) + tun_ptr_free(ptr); skb_queue_purge(&tfile->sk.sk_write_queue); skb_queue_purge(&tfile->sk.sk_error_queue); @@ -687,8 +719,10 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - if (tun) - skb_array_cleanup(&tfile->tx_array); + if (tun) { + ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); + xdp_rxq_info_unreg(&tfile->xdp_rxq); + } sock_put(&tfile->sk); } } @@ -728,11 +762,13 @@ static void tun_detach_all(struct net_device *dev) tun_napi_del(tun, tfile); /* Drop read queue */ tun_queue_purge(tfile); + xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); + xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); } BUG_ON(tun->numdisabled != 0); @@ -777,13 +813,29 @@ static int tun_attach(struct tun_struct *tun, struct file *file, } if (!tfile->detached && - skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { + ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { err = -ENOMEM; goto out; } tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; + + if (tfile->detached) { + /* Re-attach detached tfile, updating XDP queue_index */ + WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq)); + + if (tfile->xdp_rxq.queue_index != tfile->queue_index) + tfile->xdp_rxq.queue_index = tfile->queue_index; + } else { + /* Setup XDP RX-queue info, for new tfile getting attached */ + err = xdp_rxq_info_reg(&tfile->xdp_rxq, + tun->dev, tfile->queue_index); + if (err < 0) + goto out; + err = 0; + } + rcu_assign_pointer(tfile->tun, tun); rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); tun->numqueues++; @@ -1027,7 +1079,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset(skb); - if (skb_array_produce(&tfile->tx_array, skb)) + if (ptr_ring_produce(&tfile->tx_ring, skb)) goto drop; /* Notify and wake up reader process */ @@ -1200,6 +1252,67 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, }; +static int tun_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct tun_struct *tun = netdev_priv(dev); + struct xdp_buff *buff = xdp->data_hard_start; + int headroom = xdp->data - xdp->data_hard_start; + struct tun_file *tfile; + u32 numqueues; + int ret = 0; + + /* Assure headroom is available and buff is properly aligned */ + if (unlikely(headroom < sizeof(*xdp) || tun_is_xdp_buff(xdp))) + return -ENOSPC; + + *buff = *xdp; + + rcu_read_lock(); + + numqueues = READ_ONCE(tun->numqueues); + if (!numqueues) { + ret = -ENOSPC; + goto out; + } + + tfile = rcu_dereference(tun->tfiles[smp_processor_id() % + numqueues]); + /* Encode the XDP flag into lowest bit for consumer to differ + * XDP buffer from sk_buff. + */ + if (ptr_ring_produce(&tfile->tx_ring, tun_xdp_to_ptr(buff))) { + this_cpu_inc(tun->pcpu_stats->tx_dropped); + ret = -ENOSPC; + } + +out: + rcu_read_unlock(); + return ret; +} + +static void tun_xdp_flush(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile; + u32 numqueues; + + rcu_read_lock(); + + numqueues = READ_ONCE(tun->numqueues); + if (!numqueues) + goto out; + + tfile = rcu_dereference(tun->tfiles[smp_processor_id() % + numqueues]); + /* Notify and wake up reader process */ + if (tfile->flags & TUN_FASYNC) + kill_fasync(&tfile->fasync, SIGIO, POLL_IN); + tfile->socket.sk->sk_data_ready(tfile->socket.sk); + +out: + rcu_read_unlock(); +} + static const struct net_device_ops tap_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -1217,6 +1330,8 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, .ndo_bpf = tun_xdp, + .ndo_xdp_xmit = tun_xdp_xmit, + .ndo_xdp_flush = tun_xdp_flush, }; static void tun_flow_init(struct tun_struct *tun) @@ -1295,7 +1410,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) poll_wait(file, sk_sleep(sk), wait); - if (!skb_array_empty(&tfile->tx_array)) + if (!ptr_ring_empty(&tfile->tx_ring)) mask |= POLLIN | POLLRDNORM; if (tun->dev->flags & IFF_UP && @@ -1508,6 +1623,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, xdp.data = buf + pad; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.rxq = &tfile->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -1840,6 +1956,40 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) return result; } +static ssize_t tun_put_user_xdp(struct tun_struct *tun, + struct tun_file *tfile, + struct xdp_buff *xdp, + struct iov_iter *iter) +{ + int vnet_hdr_sz = 0; + size_t size = xdp->data_end - xdp->data; + struct tun_pcpu_stats *stats; + size_t ret; + + if (tun->flags & IFF_VNET_HDR) { + struct virtio_net_hdr gso = { 0 }; + + vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz); + if (unlikely(iov_iter_count(iter) < vnet_hdr_sz)) + return -EINVAL; + if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) != + sizeof(gso))) + return -EFAULT; + iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso)); + } + + ret = copy_to_iter(xdp->data, size, iter) + vnet_hdr_sz; + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += ret; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(tun->pcpu_stats); + + return ret; +} + /* Put packet to the user space buffer */ static ssize_t tun_put_user(struct tun_struct *tun, struct tun_file *tfile, @@ -1937,15 +2087,14 @@ done: return total; } -static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, - int *err) +static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err) { DECLARE_WAITQUEUE(wait, current); - struct sk_buff *skb = NULL; + void *ptr = NULL; int error = 0; - skb = skb_array_consume(&tfile->tx_array); - if (skb) + ptr = ptr_ring_consume(&tfile->tx_ring); + if (ptr) goto out; if (noblock) { error = -EAGAIN; @@ -1956,8 +2105,8 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, current->state = TASK_INTERRUPTIBLE; while (1) { - skb = skb_array_consume(&tfile->tx_array); - if (skb) + ptr = ptr_ring_consume(&tfile->tx_ring); + if (ptr) break; if (signal_pending(current)) { error = -ERESTARTSYS; @@ -1976,12 +2125,12 @@ static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, out: *err = error; - return skb; + return ptr; } static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, - int noblock, struct sk_buff *skb) + int noblock, void *ptr) { ssize_t ret; int err; @@ -1989,23 +2138,31 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, tun_debug(KERN_INFO, tun, "tun_do_read\n"); if (!iov_iter_count(to)) { - if (skb) - kfree_skb(skb); + tun_ptr_free(ptr); return 0; } - if (!skb) { + if (!ptr) { /* Read frames from ring */ - skb = tun_ring_recv(tfile, noblock, &err); - if (!skb) + ptr = tun_ring_recv(tfile, noblock, &err); + if (!ptr) return err; } - ret = tun_put_user(tun, tfile, skb, to); - if (unlikely(ret < 0)) - kfree_skb(skb); - else - consume_skb(skb); + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + ret = tun_put_user_xdp(tun, tfile, xdp, to); + put_page(virt_to_head_page(xdp->data)); + } else { + struct sk_buff *skb = ptr; + + ret = tun_put_user(tun, tfile, skb, to); + if (unlikely(ret < 0)) + kfree_skb(skb); + else + consume_skb(skb); + } return ret; } @@ -2142,12 +2299,12 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, { struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); - struct sk_buff *skb = m->msg_control; + void *ptr = m->msg_control; int ret; if (!tun) { ret = -EBADFD; - goto out_free_skb; + goto out_free; } if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) { @@ -2159,7 +2316,7 @@ static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, SOL_PACKET, TUN_TX_TIMESTAMP); goto out; } - ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, skb); + ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr); if (ret > (ssize_t)total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; @@ -2170,12 +2327,25 @@ out: out_put_tun: tun_put(tun); -out_free_skb: - if (skb) - kfree_skb(skb); +out_free: + tun_ptr_free(ptr); return ret; } +static int tun_ptr_peek_len(void *ptr) +{ + if (likely(ptr)) { + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + return xdp->data_end - xdp->data; + } + return __skb_array_len_with_tag(ptr); + } else { + return 0; + } +} + static int tun_peek_len(struct socket *sock) { struct tun_file *tfile = container_of(sock, struct tun_file, socket); @@ -2186,7 +2356,7 @@ static int tun_peek_len(struct socket *sock) if (!tun) return 0; - ret = skb_array_peek_len(&tfile->tx_array); + ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len); tun_put(tun); return ret; @@ -3092,25 +3262,26 @@ static int tun_queue_resize(struct tun_struct *tun) { struct net_device *dev = tun->dev; struct tun_file *tfile; - struct skb_array **arrays; + struct ptr_ring **rings; int n = tun->numqueues + tun->numdisabled; int ret, i; - arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); - if (!arrays) + rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); + if (!rings) return -ENOMEM; for (i = 0; i < tun->numqueues; i++) { tfile = rtnl_dereference(tun->tfiles[i]); - arrays[i] = &tfile->tx_array; + rings[i] = &tfile->tx_ring; } list_for_each_entry(tfile, &tun->disabled, next) - arrays[i++] = &tfile->tx_array; + rings[i++] = &tfile->tx_ring; - ret = skb_array_resize_multiple(arrays, n, - dev->tx_queue_len, GFP_KERNEL); + ret = ptr_ring_resize_multiple(rings, n, + dev->tx_queue_len, GFP_KERNEL, + tun_ptr_free); - kfree(arrays); + kfree(rings); return ret; } @@ -3196,7 +3367,7 @@ struct socket *tun_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(tun_get_socket); -struct skb_array *tun_get_skb_array(struct file *file) +struct ptr_ring *tun_get_tx_ring(struct file *file) { struct tun_file *tfile; @@ -3205,9 +3376,9 @@ struct skb_array *tun_get_skb_array(struct file *file) tfile = file->private_data; if (!tfile) return ERR_PTR(-EBADFD); - return &tfile->tx_array; + return &tfile->tx_ring; } -EXPORT_SYMBOL_GPL(tun_get_skb_array); +EXPORT_SYMBOL_GPL(tun_get_tx_ring); module_init(tun_init); module_exit(tun_cleanup); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cfaa07f230e5..ae0580b577b8 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1100,6 +1100,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x9084, 4)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 0)}, {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, + {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6fb7b658a6cc..12dfc5fee58e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -31,6 +31,7 @@ #include <linux/average.h> #include <linux/filter.h> #include <net/route.h> +#include <net/xdp.h> static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -115,6 +116,8 @@ struct receive_queue { /* Name of this receive queue: input.$index */ char name[40]; + + struct xdp_rxq_info xdp_rxq; }; struct virtnet_info { @@ -559,6 +562,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.data = xdp.data_hard_start + xdp_headroom; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; + xdp.rxq = &rq->xdp_rxq; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -692,6 +696,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp.data = data + vi->hdr_len; xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + (len - vi->hdr_len); + xdp.rxq = &rq->xdp_rxq; + act = bpf_prog_run_xdp(xdp_prog, &xdp); if (act != XDP_PASS) @@ -1225,13 +1231,18 @@ static int virtnet_poll(struct napi_struct *napi, int budget) static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); - int i; + int i, err; for (i = 0; i < vi->max_queue_pairs; i++) { if (i < vi->curr_queue_pairs) /* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); + + err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i); + if (err < 0) + return err; + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); } @@ -1560,6 +1571,7 @@ static int virtnet_close(struct net_device *dev) cancel_delayed_work_sync(&vi->refill); for (i = 0; i < vi->max_queue_pairs; i++) { + xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq); napi_disable(&vi->rq[i].napi); virtnet_napi_tx_disable(&vi->sq[i].napi); } @@ -1894,6 +1906,24 @@ static void virtnet_init_settings(struct net_device *dev) vi->duplex = DUPLEX_UNKNOWN; } +static void virtnet_update_settings(struct virtnet_info *vi) +{ + u32 speed; + u8 duplex; + + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) + return; + + speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, + speed)); + if (ethtool_validate_speed(speed)) + vi->speed = speed; + duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, + duplex)); + if (ethtool_validate_duplex(duplex)) + vi->duplex = duplex; +} + static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, @@ -2147,6 +2177,7 @@ static void virtnet_config_changed_work(struct work_struct *work) vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { + virtnet_update_settings(vi); netif_carrier_on(vi->dev); netif_tx_wake_all_queues(vi->dev); } else { @@ -2695,6 +2726,7 @@ static int virtnet_probe(struct virtio_device *vdev) schedule_work(&vi->config_work); } else { vi->status = VIRTIO_NET_S_LINK_UP; + virtnet_update_settings(vi); netif_carrier_on(dev); } @@ -2796,7 +2828,8 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ - VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS + VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \ + VIRTIO_NET_F_SPEED_DUPLEX static unsigned int features[] = { VIRTNET_FEATURES, diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index e8189c07b41f..78367373185f 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -728,16 +728,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) val != PS_MANUAL_POLL) return -EINVAL; - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); if (val == PS_MANUAL_POLL) { + if (data->ps != PS_ENABLED) + return -EINVAL; + local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + local_bh_enable(); + return 0; + } + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index c5a34671abda..9bd7ddeeb6a5 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1326,6 +1326,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) netif_carrier_off(netdev); + xenbus_switch_state(dev, XenbusStateInitialising); return netdev; exit: diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index e949e3302af4..c586bcdb5190 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -211,12 +211,12 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, return ret; } -static int btt_log_read_pair(struct arena_info *arena, u32 lane, - struct log_entry *ent) +static int btt_log_group_read(struct arena_info *arena, u32 lane, + struct log_group *log) { return arena_read_bytes(arena, - arena->logoff + (2 * lane * LOG_ENT_SIZE), ent, - 2 * LOG_ENT_SIZE, 0); + arena->logoff + (lane * LOG_GRP_SIZE), log, + LOG_GRP_SIZE, 0); } static struct dentry *debugfs_root; @@ -256,6 +256,8 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent, debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); debugfs_create_x32("flags", S_IRUGO, d, &a->flags); + debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); + debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); } static void btt_debugfs_init(struct btt *btt) @@ -274,6 +276,11 @@ static void btt_debugfs_init(struct btt *btt) } } +static u32 log_seq(struct log_group *log, int log_idx) +{ + return le32_to_cpu(log->ent[log_idx].seq); +} + /* * This function accepts two log entries, and uses the * sequence number to find the 'older' entry. @@ -283,8 +290,10 @@ static void btt_debugfs_init(struct btt *btt) * * TODO The logic feels a bit kludge-y. make it better.. */ -static int btt_log_get_old(struct log_entry *ent) +static int btt_log_get_old(struct arena_info *a, struct log_group *log) { + int idx0 = a->log_index[0]; + int idx1 = a->log_index[1]; int old; /* @@ -292,23 +301,23 @@ static int btt_log_get_old(struct log_entry *ent) * the next time, the following logic works out to put this * (next) entry into [1] */ - if (ent[0].seq == 0) { - ent[0].seq = cpu_to_le32(1); + if (log_seq(log, idx0) == 0) { + log->ent[idx0].seq = cpu_to_le32(1); return 0; } - if (ent[0].seq == ent[1].seq) + if (log_seq(log, idx0) == log_seq(log, idx1)) return -EINVAL; - if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5) + if (log_seq(log, idx0) + log_seq(log, idx1) > 5) return -EINVAL; - if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) { - if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1) + if (log_seq(log, idx0) < log_seq(log, idx1)) { + if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) old = 0; else old = 1; } else { - if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1) + if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) old = 1; else old = 0; @@ -328,17 +337,18 @@ static int btt_log_read(struct arena_info *arena, u32 lane, { int ret; int old_ent, ret_ent; - struct log_entry log[2]; + struct log_group log; - ret = btt_log_read_pair(arena, lane, log); + ret = btt_log_group_read(arena, lane, &log); if (ret) return -EIO; - old_ent = btt_log_get_old(log); + old_ent = btt_log_get_old(arena, &log); if (old_ent < 0 || old_ent > 1) { dev_err(to_dev(arena), "log corruption (%d): lane %d seq [%d, %d]\n", - old_ent, lane, log[0].seq, log[1].seq); + old_ent, lane, log.ent[arena->log_index[0]].seq, + log.ent[arena->log_index[1]].seq); /* TODO set error state? */ return -EIO; } @@ -346,7 +356,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane, ret_ent = (old_flag ? old_ent : (1 - old_ent)); if (ent != NULL) - memcpy(ent, &log[ret_ent], LOG_ENT_SIZE); + memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); return ret_ent; } @@ -360,17 +370,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane, u32 sub, struct log_entry *ent, unsigned long flags) { int ret; - /* - * Ignore the padding in log_entry for calculating log_half. - * The entry is 'committed' when we write the sequence number, - * and we want to ensure that that is the last thing written. - * We don't bother writing the padding as that would be extra - * media wear and write amplification - */ - unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2; - u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE); + u32 group_slot = arena->log_index[sub]; + unsigned int log_half = LOG_ENT_SIZE / 2; void *src = ent; + u64 ns_off; + ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + + (group_slot * LOG_ENT_SIZE); /* split the 16B write into atomic, durable halves */ ret = arena_write_bytes(arena, ns_off, src, log_half, flags); if (ret) @@ -453,7 +459,7 @@ static int btt_log_init(struct arena_info *arena) { size_t logsize = arena->info2off - arena->logoff; size_t chunk_size = SZ_4K, offset = 0; - struct log_entry log; + struct log_entry ent; void *zerobuf; int ret; u32 i; @@ -485,11 +491,11 @@ static int btt_log_init(struct arena_info *arena) } for (i = 0; i < arena->nfree; i++) { - log.lba = cpu_to_le32(i); - log.old_map = cpu_to_le32(arena->external_nlba + i); - log.new_map = cpu_to_le32(arena->external_nlba + i); - log.seq = cpu_to_le32(LOG_SEQ_INIT); - ret = __btt_log_write(arena, i, 0, &log, 0); + ent.lba = cpu_to_le32(i); + ent.old_map = cpu_to_le32(arena->external_nlba + i); + ent.new_map = cpu_to_le32(arena->external_nlba + i); + ent.seq = cpu_to_le32(LOG_SEQ_INIT); + ret = __btt_log_write(arena, i, 0, &ent, 0); if (ret) goto free; } @@ -594,6 +600,123 @@ static int btt_freelist_init(struct arena_info *arena) return 0; } +static bool ent_is_padding(struct log_entry *ent) +{ + return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) + && (ent->seq == 0); +} + +/* + * Detecting valid log indices: We read a log group (see the comments in btt.h + * for a description of a 'log_group' and its 'slots'), and iterate over its + * four slots. We expect that a padding slot will be all-zeroes, and use this + * to detect a padding slot vs. an actual entry. + * + * If a log_group is in the initial state, i.e. hasn't been used since the + * creation of this BTT layout, it will have three of the four slots with + * zeroes. We skip over these log_groups for the detection of log_index. If + * all log_groups are in the initial state (i.e. the BTT has never been + * written to), it is safe to assume the 'new format' of log entries in slots + * (0, 1). + */ +static int log_set_indices(struct arena_info *arena) +{ + bool idx_set = false, initial_state = true; + int ret, log_index[2] = {-1, -1}; + u32 i, j, next_idx = 0; + struct log_group log; + u32 pad_count = 0; + + for (i = 0; i < arena->nfree; i++) { + ret = btt_log_group_read(arena, i, &log); + if (ret < 0) + return ret; + + for (j = 0; j < 4; j++) { + if (!idx_set) { + if (ent_is_padding(&log.ent[j])) { + pad_count++; + continue; + } else { + /* Skip if index has been recorded */ + if ((next_idx == 1) && + (j == log_index[0])) + continue; + /* valid entry, record index */ + log_index[next_idx] = j; + next_idx++; + } + if (next_idx == 2) { + /* two valid entries found */ + idx_set = true; + } else if (next_idx > 2) { + /* too many valid indices */ + return -ENXIO; + } + } else { + /* + * once the indices have been set, just verify + * that all subsequent log groups are either in + * their initial state or follow the same + * indices. + */ + if (j == log_index[0]) { + /* entry must be 'valid' */ + if (ent_is_padding(&log.ent[j])) + return -ENXIO; + } else if (j == log_index[1]) { + ; + /* + * log_index[1] can be padding if the + * lane never got used and it is still + * in the initial state (three 'padding' + * entries) + */ + } else { + /* entry must be invalid (padding) */ + if (!ent_is_padding(&log.ent[j])) + return -ENXIO; + } + } + } + /* + * If any of the log_groups have more than one valid, + * non-padding entry, then the we are no longer in the + * initial_state + */ + if (pad_count < 3) + initial_state = false; + pad_count = 0; + } + + if (!initial_state && !idx_set) + return -ENXIO; + + /* + * If all the entries in the log were in the initial state, + * assume new padding scheme + */ + if (initial_state) + log_index[1] = 1; + + /* + * Only allow the known permutations of log/padding indices, + * i.e. (0, 1), and (0, 2) + */ + if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2))) + ; /* known index possibilities */ + else { + dev_err(to_dev(arena), "Found an unknown padding scheme\n"); + return -ENXIO; + } + + arena->log_index[0] = log_index[0]; + arena->log_index[1] = log_index[1]; + dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); + dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); + return 0; +} + static int btt_rtt_init(struct arena_info *arena) { arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); @@ -650,8 +773,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, available -= 2 * BTT_PG_SIZE; /* The log takes a fixed amount of space based on nfree */ - logsize = roundup(2 * arena->nfree * sizeof(struct log_entry), - BTT_PG_SIZE); + logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); available -= logsize; /* Calculate optimal split between map and data area */ @@ -668,6 +790,10 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size, arena->mapoff = arena->dataoff + datasize; arena->logoff = arena->mapoff + mapsize; arena->info2off = arena->logoff + logsize; + + /* Default log indices are (0,1) */ + arena->log_index[0] = 0; + arena->log_index[1] = 1; return arena; } @@ -758,6 +884,13 @@ static int discover_arenas(struct btt *btt) arena->external_lba_start = cur_nlba; parse_arena_meta(arena, super, cur_off); + ret = log_set_indices(arena); + if (ret) { + dev_err(to_dev(arena), + "Unable to deduce log/padding indices\n"); + goto out; + } + mutex_init(&arena->err_lock); ret = btt_freelist_init(arena); if (ret) diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h index 578c2057524d..db3cb6d4d0d4 100644 --- a/drivers/nvdimm/btt.h +++ b/drivers/nvdimm/btt.h @@ -27,6 +27,7 @@ #define MAP_ERR_MASK (1 << MAP_ERR_SHIFT) #define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT))) #define MAP_ENT_NORMAL 0xC0000000 +#define LOG_GRP_SIZE sizeof(struct log_group) #define LOG_ENT_SIZE sizeof(struct log_entry) #define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */ #define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */ @@ -50,12 +51,52 @@ enum btt_init_state { INIT_READY }; +/* + * A log group represents one log 'lane', and consists of four log entries. + * Two of the four entries are valid entries, and the remaining two are + * padding. Due to an old bug in the padding location, we need to perform a + * test to determine the padding scheme being used, and use that scheme + * thereafter. + * + * In kernels prior to 4.15, 'log group' would have actual log entries at + * indices (0, 2) and padding at indices (1, 3), where as the correct/updated + * format has log entries at indices (0, 1) and padding at indices (2, 3). + * + * Old (pre 4.15) format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | lba/old/new/seq | pad | + * +-----------------+-----------------+ + * + * New format: + * +-----------------+-----------------+ + * | ent[0] | ent[1] | + * | 16B | 16B | + * | lba/old/new/seq | lba/old/new/seq | + * +-----------------------------------+ + * | ent[2] | ent[3] | + * | 16B | 16B | + * | pad | pad | + * +-----------------+-----------------+ + * + * We detect during start-up which format is in use, and set + * arena->log_index[(0, 1)] with the detected format. + */ + struct log_entry { __le32 lba; __le32 old_map; __le32 new_map; __le32 seq; - __le64 padding[2]; +}; + +struct log_group { + struct log_entry ent[4]; }; struct btt_sb { @@ -125,6 +166,8 @@ struct aligned_lock { * @list: List head for list of arenas * @debugfs_dir: Debugfs dentry * @flags: Arena flags - may signify error states. + * @err_lock: Mutex for synchronizing error clearing. + * @log_index: Indices of the valid log entries in a log_group * * arena_info is a per-arena handle. Once an arena is narrowed down for an * IO, this struct is passed around for the duration of the IO. @@ -157,6 +200,7 @@ struct arena_info { /* Arena flags */ u32 flags; struct mutex err_lock; + int log_index[2]; }; /** @@ -176,6 +220,7 @@ struct arena_info { * @init_lock: Mutex used for the BTT initialization * @init_state: Flag describing the initialization state for the BTT * @num_arenas: Number of arenas in the BTT instance + * @phys_bb: Pointer to the namespace's badblocks structure */ struct btt { struct gendisk *btt_disk; diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 65cc171c721d..2adada1a5855 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -364,9 +364,9 @@ struct device *nd_pfn_create(struct nd_region *nd_region) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; - unsigned long align; enum nd_pfn_mode mode; struct nd_namespace_io *nsio; + unsigned long align, start_pad; struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; struct nd_namespace_common *ndns = nd_pfn->ndns; const u8 *parent_uuid = nd_dev_to_uuid(&ndns->dev); @@ -410,6 +410,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) align = le32_to_cpu(pfn_sb->align); offset = le64_to_cpu(pfn_sb->dataoff); + start_pad = le32_to_cpu(pfn_sb->start_pad); if (align == 0) align = 1UL << ilog2(offset); mode = le32_to_cpu(pfn_sb->mode); @@ -468,7 +469,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) return -EBUSY; } - if ((align && !IS_ALIGNED(offset, align)) + if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled align: %#lx\n", @@ -582,6 +583,12 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, return altmap; } +static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) +{ + return min_t(u64, PHYS_SECTION_ALIGN_DOWN(phys), + ALIGN_DOWN(phys, nd_pfn->align)); +} + static int nd_pfn_init(struct nd_pfn *nd_pfn) { u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; @@ -637,13 +644,16 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) start = nsio->res.start; size = PHYS_SECTION_ALIGN_UP(start + size) - start; if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, - IORES_DESC_NONE) == REGION_MIXED) { + IORES_DESC_NONE) == REGION_MIXED + || !IS_ALIGNED(start + resource_size(&nsio->res), + nd_pfn->align)) { size = resource_size(&nsio->res); - end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); + end_trunc = start + size - phys_pmem_align_down(nd_pfn, + start + size); } if (start_pad + end_trunc) - dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", + dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n", dev_name(&ndns->dev), start_pad + end_trunc); /* diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c index a346b4923550..41d3a3c1104e 100644 --- a/drivers/nvmem/meson-mx-efuse.c +++ b/drivers/nvmem/meson-mx-efuse.c @@ -156,8 +156,8 @@ static int meson_mx_efuse_read(void *context, unsigned int offset, MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE, MESON_MX_EFUSE_CNTL1_AUTO_RD_ENABLE); - for (i = offset; i < offset + bytes; i += efuse->config.word_size) { - addr = i / efuse->config.word_size; + for (i = 0; i < bytes; i += efuse->config.word_size) { + addr = (offset + i) / efuse->config.word_size; err = meson_mx_efuse_read_addr(efuse, addr, &tmp); if (err) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index f4c73292b304..1b9ef35cf0d9 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -77,9 +77,10 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, if (of_property_read_bool(child, "broken-turn-around")) mdio->phy_ignore_ta_mask |= 1 << addr; - of_property_read_u32(child, "reset-delay-us", &phy->mdio.reset_delay); - of_property_read_u32(child, "reset-post-delay-us", - &phy->mdio.reset_post_delay); + of_property_read_u32(child, "reset-assert-us", + &phy->mdio.reset_assert_delay); + of_property_read_u32(child, "reset-deassert-us", + &phy->mdio.reset_deassert_delay); /* Associate the OF node with the device structure so it * can be looked up later */ diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 0b3fb99d9b89..7390fb8ca9d1 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -303,7 +303,7 @@ static void dino_mask_irq(struct irq_data *d) struct dino_device *dino_dev = irq_data_get_irq_chip_data(d); int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); - DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); + DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq); /* Clear the matching bit in the IMR register */ dino_dev->imr &= ~(DINO_MASK_IRQ(local_irq)); @@ -316,7 +316,7 @@ static void dino_unmask_irq(struct irq_data *d) int local_irq = gsc_find_local_irq(d->irq, dino_dev->global_irq, DINO_LOCAL_IRQS); u32 tmp; - DBG(KERN_WARNING "%s(0x%p, %d)\n", __func__, dino_dev, d->irq); + DBG(KERN_WARNING "%s(0x%px, %d)\n", __func__, dino_dev, d->irq); /* ** clear pending IRQ bits @@ -396,7 +396,7 @@ ilr_again: if (mask) { if (--ilr_loop > 0) goto ilr_again; - printk(KERN_ERR "Dino 0x%p: stuck interrupt %d\n", + printk(KERN_ERR "Dino 0x%px: stuck interrupt %d\n", dino_dev->hba.base_addr, mask); return IRQ_NONE; } @@ -553,7 +553,7 @@ dino_fixup_bus(struct pci_bus *bus) struct pci_dev *dev; struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge)); - DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n", + DBG(KERN_WARNING "%s(0x%px) bus %d platform_data 0x%px\n", __func__, bus, bus->busn_res.start, bus->bridge->platform_data); @@ -854,7 +854,7 @@ static int __init dino_common_init(struct parisc_device *dev, res->flags = IORESOURCE_IO; /* do not mark it busy ! */ if (request_resource(&ioport_resource, res) < 0) { printk(KERN_ERR "%s: request I/O Port region failed " - "0x%lx/%lx (hpa 0x%p)\n", + "0x%lx/%lx (hpa 0x%px)\n", name, (unsigned long)res->start, (unsigned long)res->end, dino_dev->hba.base_addr); return 1; diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c index 4dd9b1308128..99a80da6fd2e 100644 --- a/drivers/parisc/eisa_eeprom.c +++ b/drivers/parisc/eisa_eeprom.c @@ -106,7 +106,7 @@ static int __init eisa_eeprom_init(void) return retval; } - printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr); + printk(KERN_INFO "EISA EEPROM at 0x%px\n", eisa_eeprom_addr); return 0; } diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 04dac6a42c9f..6b8d060d07de 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c @@ -985,9 +985,7 @@ static u32 hv_compose_msi_req_v1( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = - (apic->irq_delivery_mode == dest_LowestPrio) ? - dest_LowestPrio : dest_Fixed; + int_pkt->int_desc.delivery_mode = dest_Fixed; /* * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in @@ -1008,9 +1006,7 @@ static u32 hv_compose_msi_req_v2( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.vector_count = 1; - int_pkt->int_desc.delivery_mode = - (apic->irq_delivery_mode == dest_LowestPrio) ? - dest_LowestPrio : dest_Fixed; + int_pkt->int_desc.delivery_mode = dest_Fixed; /* * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index accaaaccb662..6601ad0dfb3a 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -310,7 +310,7 @@ static int cpcap_usb_init_irq(struct platform_device *pdev, int irq, error; irq = platform_get_irq_byname(pdev, name); - if (!irq) + if (irq < 0) return -ENODEV; error = devm_request_threaded_irq(ddata->dev, irq, NULL, diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig index cb09245e9b4c..c845facacb06 100644 --- a/drivers/phy/renesas/Kconfig +++ b/drivers/phy/renesas/Kconfig @@ -12,7 +12,9 @@ config PHY_RCAR_GEN3_USB2 tristate "Renesas R-Car generation 3 USB 2.0 PHY driver" depends on ARCH_RENESAS depends on EXTCON + depends on USB_SUPPORT select GENERIC_PHY + select USB_COMMON help Support for USB 2.0 PHY found on Renesas R-Car generation 3 SoCs. diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index ee85fa0ca4b0..7492c8978217 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c @@ -1137,6 +1137,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev) if (IS_ERR(phy)) { dev_err(dev, "failed to create phy: %s\n", child_np->name); + pm_runtime_disable(dev); return PTR_ERR(phy); } @@ -1146,6 +1147,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev) phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { dev_err(dev, "Failed to register phy provider\n"); + pm_runtime_disable(dev); return PTR_ERR(phy_provider); } diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 4307bf0013e1..63e916d4d069 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -75,14 +75,14 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match); static struct device_node * tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *pads, *np; + + pads = of_get_child_by_name(padctl->dev->of_node, "pads"); + if (!pads) + return NULL; - np = of_find_node_by_name(np, "pads"); - if (np) - np = of_find_node_by_name(np, name); + np = of_get_child_by_name(pads, name); + of_node_put(pads); return np; } @@ -90,16 +90,16 @@ tegra_xusb_find_pad_node(struct tegra_xusb_padctl *padctl, const char *name) static struct device_node * tegra_xusb_pad_find_phy_node(struct tegra_xusb_pad *pad, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(pad->dev.of_node); + struct device_node *np, *lanes; - np = of_find_node_by_name(np, "lanes"); - if (!np) + lanes = of_get_child_by_name(pad->dev.of_node, "lanes"); + if (!lanes) return NULL; - return of_find_node_by_name(np, pad->soc->lanes[index].name); + np = of_get_child_by_name(lanes, pad->soc->lanes[index].name); + of_node_put(lanes); + + return np; } static int @@ -195,7 +195,7 @@ int tegra_xusb_pad_register(struct tegra_xusb_pad *pad, unsigned int i; int err; - children = of_find_node_by_name(pad->dev.of_node, "lanes"); + children = of_get_child_by_name(pad->dev.of_node, "lanes"); if (!children) return -ENODEV; @@ -444,21 +444,21 @@ static struct device_node * tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, unsigned int index) { - /* - * of_find_node_by_name() drops a reference, so make sure to grab one. - */ - struct device_node *np = of_node_get(padctl->dev->of_node); + struct device_node *ports, *np; + char *name; - np = of_find_node_by_name(np, "ports"); - if (np) { - char *name; + ports = of_get_child_by_name(padctl->dev->of_node, "ports"); + if (!ports) + return NULL; - name = kasprintf(GFP_KERNEL, "%s-%u", type, index); - if (!name) - return ERR_PTR(-ENOMEM); - np = of_find_node_by_name(np, name); - kfree(name); + name = kasprintf(GFP_KERNEL, "%s-%u", type, index); + if (!name) { + of_node_put(ports); + return ERR_PTR(-ENOMEM); } + np = of_get_child_by_name(ports, name); + kfree(name); + of_node_put(ports); return np; } @@ -847,7 +847,7 @@ static void tegra_xusb_remove_ports(struct tegra_xusb_padctl *padctl) static int tegra_xusb_padctl_probe(struct platform_device *pdev) { - struct device_node *np = of_node_get(pdev->dev.of_node); + struct device_node *np = pdev->dev.of_node; const struct tegra_xusb_padctl_soc *soc; struct tegra_xusb_padctl *padctl; const struct of_device_id *match; @@ -855,7 +855,7 @@ static int tegra_xusb_padctl_probe(struct platform_device *pdev) int err; /* for backwards compatibility with old device trees */ - np = of_find_node_by_name(np, "pads"); + np = of_get_child_by_name(np, "pads"); if (!np) { dev_warn(&pdev->dev, "deprecated DT, using legacy driver\n"); return tegra_xusb_padctl_legacy_probe(pdev); diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index bdedb6325c72..4471fd94e1fe 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1620,6 +1620,22 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) clear_bit(i, chip->irq.valid_mask); } + /* + * The same set of machines in chv_no_valid_mask[] have incorrectly + * configured GPIOs that generate spurious interrupts so we use + * this same list to apply another quirk for them. + * + * See also https://bugzilla.kernel.org/show_bug.cgi?id=197953. + */ + if (!need_valid_mask) { + /* + * Mask all interrupts the community is able to generate + * but leave the ones that can only generate GPEs unmasked. + */ + chv_writel(GENMASK(31, pctrl->community->nirqs), + pctrl->regs + CHV_INTMASK); + } + /* Clear all interrupts */ chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index e6cd8de793e2..3501491e5bfc 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -222,6 +222,9 @@ static enum pin_config_param pcs_bias[] = { */ static struct lock_class_key pcs_lock_class; +/* Class for the IRQ request mutex */ +static struct lock_class_key pcs_request_class; + /* * REVISIT: Reads and writes could eventually use regmap or something * generic. But at least on omaps, some mux registers are performance @@ -1486,7 +1489,7 @@ static int pcs_irqdomain_map(struct irq_domain *d, unsigned int irq, irq_set_chip_data(irq, pcs_soc); irq_set_chip_and_handler(irq, &pcs->chip, handle_level_irq); - irq_set_lockdep_class(irq, &pcs_lock_class); + irq_set_lockdep_class(irq, &pcs_lock_class, &pcs_request_class); irq_set_noprobe(irq); return 0; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index a276c61be217..e62ab087bfd8 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -290,7 +290,7 @@ static int stm32_gpio_domain_translate(struct irq_domain *d, } static int stm32_gpio_domain_activate(struct irq_domain *d, - struct irq_data *irq_data, bool early) + struct irq_data *irq_data, bool reserve) { struct stm32_gpio_bank *bank = d->host_data; struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 791449a2370f..daa68acbc900 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -1458,5 +1458,5 @@ static void __exit acpi_wmi_exit(void) class_unregister(&wmi_bus_class); } -subsys_initcall(acpi_wmi_init); +subsys_initcall_sync(acpi_wmi_init); module_exit(acpi_wmi_exit); diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index c94b606e0df8..ee14d8e45c97 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -2803,6 +2803,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) erp = dasd_3990_erp_handle_match_erp(cqr, erp); } + + /* + * For path verification work we need to stick with the path that was + * originally chosen so that the per path configuration data is + * assigned correctly. + */ + if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) { + erp->lpm = cqr->lpm; + } + if (device->features & DASD_FEATURE_ERPLOG) { /* print current erp_chain */ dev_err(&device->cdev->dev, diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile index 05ac6ba15a53..614b44e70a28 100644 --- a/drivers/s390/char/Makefile +++ b/drivers/s390/char/Makefile @@ -17,6 +17,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH) CFLAGS_sclp_early_core.o += -march=z900 endif +CFLAGS_sclp_early_core.o += -D__NO_FORTIFY + obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \ sclp_early.o sclp_early_core.o diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bdc28330800e..6abd3bc285e4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -36,6 +36,7 @@ #include <asm/diag.h> #include <asm/cio.h> #include <asm/ccwdev.h> +#include <asm/cpcmd.h> #include "qeth_core.h" @@ -1715,23 +1716,87 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) (prcd[0x11] == _ascebc['M'])); } +static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) +{ + enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; + struct diag26c_vnic_resp *response = NULL; + struct diag26c_vnic_req *request = NULL; + struct ccw_dev_id id; + char userid[80]; + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "vmlayer"); + + cpcmd("QUERY USERID", userid, sizeof(userid), &rc); + if (rc) + goto out; + + request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); + response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); + if (!request || !response) { + rc = -ENOMEM; + goto out; + } + + ccw_device_get_id(CARD_RDEV(card), &id); + request->resp_buf_len = sizeof(*response); + request->resp_version = DIAG26C_VERSION6_VM65918; + request->req_format = DIAG26C_VNIC_INFO; + ASCEBC(userid, 8); + memcpy(&request->sys_name, userid, 8); + request->devno = id.devno; + + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + rc = diag26c(request, response, DIAG26C_PORT_VNIC); + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + if (rc) + goto out; + QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); + + if (request->resp_buf_len < sizeof(*response) || + response->version != request->resp_version) { + rc = -EIO; + goto out; + } + + if (response->protocol == VNIC_INFO_PROT_L2) + disc = QETH_DISCIPLINE_LAYER2; + else if (response->protocol == VNIC_INFO_PROT_L3) + disc = QETH_DISCIPLINE_LAYER3; + +out: + kfree(response); + kfree(request); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "err%x", rc); + return disc; +} + /* Determine whether the device requires a specific layer discipline */ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) { + enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; + if (card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSN) { + card->info.type == QETH_CARD_TYPE_OSN) + disc = QETH_DISCIPLINE_LAYER2; + else if (card->info.guestlan) + disc = (card->info.type == QETH_CARD_TYPE_IQD) ? + QETH_DISCIPLINE_LAYER3 : + qeth_vm_detect_layer(card); + + switch (disc) { + case QETH_DISCIPLINE_LAYER2: QETH_DBF_TEXT(SETUP, 3, "force l2"); - return QETH_DISCIPLINE_LAYER2; - } - - /* virtual HiperSocket is L3 only: */ - if (card->info.guestlan && card->info.type == QETH_CARD_TYPE_IQD) { + break; + case QETH_DISCIPLINE_LAYER3: QETH_DBF_TEXT(SETUP, 3, "force l3"); - return QETH_DISCIPLINE_LAYER3; + break; + default: + QETH_DBF_TEXT(SETUP, 3, "force no"); } - QETH_DBF_TEXT(SETUP, 3, "force no"); - return QETH_DISCIPLINE_UNDETERMINED; + return disc; } static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) @@ -4786,9 +4851,12 @@ int qeth_vm_request_mac(struct qeth_card *card) request->op_code = DIAG26C_GET_MAC; request->devno = id.devno; + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); rc = diag26c(request, response, DIAG26C_MAC_SERVICES); + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); if (rc) goto out; + QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); if (request->resp_buf_len < sizeof(*response) || response->version != request->resp_version) { diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 49f92ebbc5ad..bdd45f4dcace 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h @@ -74,13 +74,15 @@ void qeth_l3_remove_device_attributes(struct device *); int qeth_l3_setrouting_v4(struct qeth_card *); int qeth_l3_setrouting_v6(struct qeth_card *); int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *); -void qeth_l3_del_ipato_entry(struct qeth_card *, enum qeth_prot_versions, - u8 *, int); +int qeth_l3_del_ipato_entry(struct qeth_card *card, + enum qeth_prot_versions proto, u8 *addr, + int mask_bits); int qeth_l3_add_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); -void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); +int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr); int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); -void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, - const u8 *); +int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr); void qeth_l3_update_ipato(struct qeth_card *card); struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 92bcb02671bc..b0c888e86cd4 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -588,10 +588,12 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, return rc; } -void qeth_l3_del_ipato_entry(struct qeth_card *card, - enum qeth_prot_versions proto, u8 *addr, int mask_bits) +int qeth_l3_del_ipato_entry(struct qeth_card *card, + enum qeth_prot_versions proto, u8 *addr, + int mask_bits) { struct qeth_ipato_entry *ipatoe, *tmp; + int rc = -ENOENT; QETH_CARD_TEXT(card, 2, "delipato"); @@ -606,10 +608,12 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, list_del(&ipatoe->entry); qeth_l3_update_ipato(card); kfree(ipatoe); + rc = 0; } } spin_unlock_bh(&card->ip_lock); + return rc; } /* @@ -619,7 +623,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, const u8 *addr) { struct qeth_ipaddr *ipaddr; - int rc = 0; + int rc; ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { @@ -643,7 +647,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, if (qeth_l3_ip_from_hash(card, ipaddr)) rc = -EEXIST; else - qeth_l3_add_ip(card, ipaddr); + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); @@ -652,10 +656,11 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, return rc; } -void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, - const u8 *addr) +int qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr) { struct qeth_ipaddr *ipaddr; + int rc; ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { @@ -670,13 +675,14 @@ void qeth_l3_del_vipa(struct qeth_card *card, enum qeth_prot_versions proto, } ipaddr->type = QETH_IP_TYPE_VIPA; } else - return; + return -ENOMEM; spin_lock_bh(&card->ip_lock); - qeth_l3_delete_ip(card, ipaddr); + rc = qeth_l3_delete_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); + return rc; } /* @@ -686,7 +692,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, const u8 *addr) { struct qeth_ipaddr *ipaddr; - int rc = 0; + int rc; ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { @@ -711,7 +717,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, if (qeth_l3_ip_from_hash(card, ipaddr)) rc = -EEXIST; else - qeth_l3_add_ip(card, ipaddr); + rc = qeth_l3_add_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); @@ -720,10 +726,11 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, return rc; } -void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, - const u8 *addr) +int qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, + const u8 *addr) { struct qeth_ipaddr *ipaddr; + int rc; ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { @@ -738,13 +745,14 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, } ipaddr->type = QETH_IP_TYPE_RXIP; } else - return; + return -ENOMEM; spin_lock_bh(&card->ip_lock); - qeth_l3_delete_ip(card, ipaddr); + rc = qeth_l3_delete_ip(card, ipaddr); spin_unlock_bh(&card->ip_lock); kfree(ipaddr); + return rc; } static int qeth_l3_register_addr_entry(struct qeth_card *card, diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 00a10b66c01f..a645cfe66ddf 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -274,7 +274,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, struct qeth_card *card = dev_get_drvdata(dev); struct qeth_ipaddr *addr; char *tmp; - int i; + int rc, i; if (!card) return -EINVAL; @@ -343,11 +343,11 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, return -ENOMEM; spin_lock_bh(&card->ip_lock); - qeth_l3_add_ip(card, addr); + rc = qeth_l3_add_ip(card, addr); spin_unlock_bh(&card->ip_lock); kfree(addr); - return count; + return rc ? rc : count; } static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show, @@ -585,7 +585,7 @@ static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count, mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); if (!rc) - qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); + rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -705,22 +705,25 @@ static const struct attribute_group qeth_device_ipato_group = { .attrs = qeth_ipato_device_attrs, }; -static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, - enum qeth_prot_versions proto) +static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, + enum qeth_prot_versions proto, + enum qeth_ip_types type) { + struct qeth_card *card = dev_get_drvdata(dev); struct qeth_ipaddr *ipaddr; char addr_str[40]; int str_len = 0; int entry_len; /* length of 1 entry string, differs between v4 and v6 */ int i; + if (!card) + return -EINVAL; + entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; entry_len += 2; /* \n + terminator */ spin_lock_bh(&card->ip_lock); hash_for_each(card->ip_htable, i, ipaddr, hnode) { - if (ipaddr->proto != proto) - continue; - if (ipaddr->type != QETH_IP_TYPE_VIPA) + if (ipaddr->proto != proto || ipaddr->type != type) continue; /* String must not be longer than PAGE_SIZE. So we check if * string length gets near PAGE_SIZE. Then we can savely display @@ -739,14 +742,11 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card, } static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4); + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4, + QETH_IP_TYPE_VIPA); } static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto, @@ -796,7 +796,7 @@ static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count, mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_vipae(buf, proto, addr); if (!rc) - qeth_l3_del_vipa(card, proto, addr); + rc = qeth_l3_del_vipa(card, proto, addr); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -816,14 +816,11 @@ static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL, qeth_l3_dev_vipa_del4_store); static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6); + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6, + QETH_IP_TYPE_VIPA); } static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev, @@ -868,48 +865,12 @@ static const struct attribute_group qeth_device_vipa_group = { .attrs = qeth_vipa_device_attrs, }; -static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card, - enum qeth_prot_versions proto) -{ - struct qeth_ipaddr *ipaddr; - char addr_str[40]; - int str_len = 0; - int entry_len; /* length of 1 entry string, differs between v4 and v6 */ - int i; - - entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; - entry_len += 2; /* \n + terminator */ - spin_lock_bh(&card->ip_lock); - hash_for_each(card->ip_htable, i, ipaddr, hnode) { - if (ipaddr->proto != proto) - continue; - if (ipaddr->type != QETH_IP_TYPE_RXIP) - continue; - /* String must not be longer than PAGE_SIZE. So we check if - * string length gets near PAGE_SIZE. Then we can savely display - * the next IPv6 address (worst case, compared to IPv4) */ - if ((PAGE_SIZE - str_len) <= entry_len) - break; - qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, - addr_str); - str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", - addr_str); - } - spin_unlock_bh(&card->ip_lock); - str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); - - return str_len; -} - static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4); + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4, + QETH_IP_TYPE_RXIP); } static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, @@ -976,7 +937,7 @@ static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count, mutex_lock(&card->conf_mutex); rc = qeth_l3_parse_rxipe(buf, proto, addr); if (!rc) - qeth_l3_del_rxip(card, proto, addr); + rc = qeth_l3_del_rxip(card, proto, addr); mutex_unlock(&card->conf_mutex); return rc ? rc : count; } @@ -996,14 +957,11 @@ static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL, qeth_l3_dev_rxip_del4_store); static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { - struct qeth_card *card = dev_get_drvdata(dev); - - if (!card) - return -EINVAL; - - return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6); + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6, + QETH_IP_TYPE_RXIP); } static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev, diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c index 7d91e53562f8..a980ef756a67 100644 --- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c @@ -25,15 +25,17 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, u32 task_retry_id, u8 fcp_cmd_payload[32]) { - struct fcoe_task_context *ctx = task_params->context; + struct e4_fcoe_task_context *ctx = task_params->context; + const u8 val_byte = ctx->ystorm_ag_context.byte0; + struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx; - struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx; u32 io_size, val; bool slow_sgl; memset(ctx, 0, sizeof(*(ctx))); + ctx->ystorm_ag_context.byte0 = val_byte; slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges, sgl_task_params->small_mid_sge); io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ? @@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, y_st_ctx = &ctx->ystorm_st_context; y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size); y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id); - y_st_ctx->task_type = task_params->task_type; + y_st_ctx->task_type = (u8)task_params->task_type; memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload, fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload)); /* Tstorm ctx */ t_st_ctx = &ctx->tstorm_st_context; - t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ? - FCOE_TASK_DEV_TYPE_TAPE : - FCOE_TASK_DEV_TYPE_DISK); + t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ? + FCOE_TASK_DEV_TYPE_TAPE : + FCOE_TASK_DEV_TYPE_DISK); t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); val = cpu_to_le32(task_params->cq_rss_number); t_st_ctx->read_only.glbl_q_num = val; t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size); - t_st_ctx->read_only.task_type = task_params->task_type; + t_st_ctx->read_only.task_type = (u8)task_params->task_type; SET_FIELD(t_st_ctx->read_write.flags, FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); @@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, SET_FIELD(m_st_ctx->flags, MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL)); + m_st_ctx->sgl_params.sgl_num_sges = + cpu_to_le16(sgl_task_params->num_sges); } else { /* Tstorm ctx */ SET_FIELD(t_st_ctx->read_write.flags, @@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params, sgl_task_params); } + /* Init Sqe */ init_common_sqe(task_params, SEND_FCOE_CMD); + return 0; } @@ -112,14 +118,16 @@ int init_initiator_midpath_unsolicited_fcoe_task( struct scsi_sgl_task_params *rx_sgl_task_params, u8 fw_to_place_fc_header) { - struct fcoe_task_context *ctx = task_params->context; + struct e4_fcoe_task_context *ctx = task_params->context; + const u8 val_byte = ctx->ystorm_ag_context.byte0; + struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct ystorm_fcoe_task_st_ctx *y_st_ctx; struct tstorm_fcoe_task_st_ctx *t_st_ctx; - struct ustorm_fcoe_task_ag_ctx *u_ag_ctx; struct mstorm_fcoe_task_st_ctx *m_st_ctx; u32 val; memset(ctx, 0, sizeof(*(ctx))); + ctx->ystorm_ag_context.byte0 = val_byte; /* Init Ystorm */ y_st_ctx = &ctx->ystorm_st_context; @@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task( SET_FIELD(y_st_ctx->sgl_mode, YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL); y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size); - y_st_ctx->task_type = task_params->task_type; + y_st_ctx->task_type = (u8)task_params->task_type; memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path, mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params)); @@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task( t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid); val = cpu_to_le32(task_params->cq_rss_number); t_st_ctx->read_only.glbl_q_num = val; - t_st_ctx->read_only.task_type = task_params->task_type; + t_st_ctx->read_only.task_type = (u8)task_params->task_type; SET_FIELD(t_st_ctx->read_write.flags, FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1); t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID); @@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params) } int init_initiator_sequence_recovery_fcoe_task( - struct fcoe_task_params *task_params, u32 off) + struct fcoe_task_params *task_params, u32 desired_offset) { init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY); - task_params->sqe->additional_info_union.seq_rec_updated_offset = off; + task_params->sqe->additional_info_union.seq_rec_updated_offset = + desired_offset; return 0; } diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h index f9c50faa748e..b5c236efd465 100644 --- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h +++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h @@ -13,7 +13,7 @@ struct fcoe_task_params { /* Output parameter [set/filled by the HSI function] */ - struct fcoe_task_context *context; + struct e4_fcoe_task_context *context; /* Output parameter [set/filled by the HSI function] */ struct fcoe_wqe *sqe; diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 9bf7b227e69a..c105a2e48ac1 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -129,7 +129,7 @@ struct qedf_ioreq { struct delayed_work timeout_work; struct completion tm_done; struct completion abts_done; - struct fcoe_task_context *task; + struct e4_fcoe_task_context *task; struct fcoe_task_params *task_params; struct scsi_sgl_task_params *sgl_task_params; int idx; @@ -465,7 +465,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, unsigned int timer_msec); extern int qedf_init_mp_req(struct qedf_ioreq *io_req); extern void qedf_init_mp_task(struct qedf_ioreq *io_req, - struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe); + struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe); extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport); extern void qedf_ring_doorbell(struct qedf_rport *fcport); extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 59c18ca4cda9..aa22b11436ba 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -19,7 +19,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, struct qedf_ioreq *els_req; struct qedf_mp_req *mp_req; struct fc_frame_header *fc_hdr; - struct fcoe_task_context *task; + struct e4_fcoe_task_context *task; int rc = 0; uint32_t did, sid; uint16_t xid; diff --git a/drivers/scsi/qedf/qedf_hsi.h b/drivers/scsi/qedf/qedf_hsi.h index 7faef80c5f7a..503c1ae3ccd0 100644 --- a/drivers/scsi/qedf/qedf_hsi.h +++ b/drivers/scsi/qedf/qedf_hsi.h @@ -225,19 +225,6 @@ enum fcoe_cqe_type { MAX_FCOE_CQE_TYPE }; - -/* - * FCoE device type - */ -enum fcoe_device_type { - FCOE_TASK_DEV_TYPE_DISK, - FCOE_TASK_DEV_TYPE_TAPE, - MAX_FCOE_DEVICE_TYPE -}; - - - - /* * FCoE fast path error codes */ @@ -332,31 +319,6 @@ enum fcoe_sp_error_code { MAX_FCOE_SP_ERROR_CODE }; - -/* - * FCoE SQE request type - */ -enum fcoe_sqe_request_type { - SEND_FCOE_CMD, - SEND_FCOE_MIDPATH, - SEND_FCOE_ABTS_REQUEST, - FCOE_EXCHANGE_CLEANUP, - FCOE_SEQUENCE_RECOVERY, - SEND_FCOE_XFER_RDY, - SEND_FCOE_RSP, - SEND_FCOE_RSP_WITH_SENSE_DATA, - SEND_FCOE_TARGET_DATA, - SEND_FCOE_INITIATOR_DATA, - /* - * Xfer Continuation (==1) ready to be sent. Previous XFERs data - * received successfully. - */ - SEND_FCOE_XFER_CONTINUATION_RDY, - SEND_FCOE_TARGET_ABTS_RSP, - MAX_FCOE_SQE_REQUEST_TYPE -}; - - /* * FCoE task TX state */ @@ -389,34 +351,4 @@ enum fcoe_task_tx_state { MAX_FCOE_TASK_TX_STATE }; - -/* - * FCoE task type - */ -enum fcoe_task_type { - FCOE_TASK_TYPE_WRITE_INITIATOR, - FCOE_TASK_TYPE_READ_INITIATOR, - FCOE_TASK_TYPE_MIDPATH, - FCOE_TASK_TYPE_UNSOLICITED, - FCOE_TASK_TYPE_ABTS, - FCOE_TASK_TYPE_EXCHANGE_CLEANUP, - FCOE_TASK_TYPE_SEQUENCE_CLEANUP, - FCOE_TASK_TYPE_WRITE_TARGET, - FCOE_TASK_TYPE_READ_TARGET, - FCOE_TASK_TYPE_RSP, - FCOE_TASK_TYPE_RSP_SENSE_DATA, - FCOE_TASK_TYPE_ABTS_TARGET, - FCOE_TASK_TYPE_ENUM_SIZE, - MAX_FCOE_TASK_TYPE -}; - -struct scsi_glbl_queue_entry { - /* Start physical address for the RQ (receive queue) PBL. */ - struct regpair rq_pbl_addr; - /* Start physical address for the CQ (completion queue) PBL. */ - struct regpair cq_pbl_addr; - /* Start physical address for the CMDQ (command queue) PBL. */ - struct regpair cmdq_pbl_addr; -}; - #endif /* __QEDF_HSI__ */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index ded386036c27..b15e69586a36 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -579,7 +579,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req, } static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, - struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx, + struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { enum fcoe_task_type task_type; @@ -597,7 +597,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, /* Note init_initiator_rw_fcoe_task memsets the task context */ io_req->task = task_ctx; - memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); memset(io_req->task_params, 0, sizeof(struct fcoe_task_params)); memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); @@ -673,7 +673,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport, } void qedf_init_mp_task(struct qedf_ioreq *io_req, - struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) + struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) { struct qedf_mp_req *mp_req = &(io_req->mp_req); struct qedf_rport *fcport = io_req->fcport; @@ -691,7 +691,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req, memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params)); - memset(task_ctx, 0, sizeof(struct fcoe_task_context)); + memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context)); memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params)); /* Setup the task from io_req for easy reference */ @@ -844,7 +844,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req) struct Scsi_Host *host = sc_cmd->device->host; struct fc_lport *lport = shost_priv(host); struct qedf_ctx *qedf = lport_priv(lport); - struct fcoe_task_context *task_ctx; + struct e4_fcoe_task_context *task_ctx; u16 xid; enum fcoe_task_type req_type = 0; struct fcoe_wqe *sqe; @@ -1065,7 +1065,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, struct qedf_ioreq *io_req) { u16 xid, rval; - struct fcoe_task_context *task_ctx; + struct e4_fcoe_task_context *task_ctx; struct scsi_cmnd *sc_cmd; struct fcoe_cqe_rsp_info *fcp_rsp; struct qedf_rport *fcport; @@ -1722,7 +1722,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, struct qedf_rport *fcport; struct qedf_ctx *qedf; uint16_t xid; - struct fcoe_task_context *task; + struct e4_fcoe_task_context *task; int tmo = 0; int rc = SUCCESS; unsigned long flags; @@ -1835,7 +1835,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, uint8_t tm_flags) { struct qedf_ioreq *io_req; - struct fcoe_task_context *task; + struct e4_fcoe_task_context *task; struct qedf_ctx *qedf = fcport->qedf; struct fc_lport *lport = qedf->lport; int rc = 0; @@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, struct qedf_io_work *io_work; u32 bdq_idx; void *bdq_addr; + struct scsi_bd *p_bd_info; + p_bd_info = &cqe->cqe_info.unsolic_info.bd_info; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL, - "address.hi=%x address.lo=%x opaque_data.hi=%x " - "opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n", - le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi), - le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo), - le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi), - le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo), - qedf->bdq_prod_idx, pktlen); - - bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo); + "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n", + le32_to_cpu(p_bd_info->address.hi), + le32_to_cpu(p_bd_info->address.lo), + le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi), + le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo), + qedf->bdq_prod_idx, pktlen); + + bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo); if (bdq_idx >= QEDF_BDQ_SIZE) { QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n", bdq_idx); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 7c0064500cc5..40800dd17d2f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1860,7 +1860,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp) struct qedf_ctx *qedf = fp->qedf; struct global_queue *que; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block *sb = sb_info->sb_virt; + struct status_block_e4 *sb = sb_info->sb_virt; u16 prod_idx; /* Get the pointer to the global CQ this completion is on */ @@ -1887,7 +1887,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp) { struct qedf_ctx *qedf = fp->qedf; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block *sb = sb_info->sb_virt; + struct status_block_e4 *sb = sb_info->sb_virt; struct global_queue *que; u16 prod_idx; struct fcoe_cqe *cqe; @@ -2352,12 +2352,12 @@ void qedf_fp_io_handler(struct work_struct *work) static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int ret; sb_virt = dma_alloc_coherent(&qedf->pdev->dev, - sizeof(struct status_block), &sb_phys, GFP_KERNEL); + sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); if (!sb_virt) { QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " @@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf) for (i = 0; i < QEDF_BDQ_SIZE; i++) { pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma)); pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma)); - pbl->opaque.hi = 0; + pbl->opaque.fcoe_opaque.hi = 0; /* Opaque lo data is an index into the BDQ array */ - pbl->opaque.lo = cpu_to_le32(i); + pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i); pbl++; } diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h index 397b3b8ee51a..c2478056356a 100644 --- a/drivers/scsi/qedf/qedf_version.h +++ b/drivers/scsi/qedf/qedf_version.h @@ -7,9 +7,9 @@ * this source tree. */ -#define QEDF_VERSION "8.20.5.0" +#define QEDF_VERSION "8.33.0.20" #define QEDF_DRIVER_MAJOR_VER 8 -#define QEDF_DRIVER_MINOR_VER 20 -#define QEDF_DRIVER_REV_VER 5 -#define QEDF_DRIVER_ENG_VER 0 +#define QEDF_DRIVER_MINOR_VER 33 +#define QEDF_DRIVER_REV_VER 0 +#define QEDF_DRIVER_ENG_VER 20 diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c index 39d77818a677..fd8a1eea3163 100644 --- a/drivers/scsi/qedi/qedi_debugfs.c +++ b/drivers/scsi/qedi/qedi_debugfs.c @@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused) { struct qedi_fastpath *fp = NULL; struct qed_sb_info *sb_info = NULL; - struct status_block *sb = NULL; + struct status_block_e4 *sb = NULL; struct global_queue *que = NULL; int id; u16 prod_idx; @@ -168,7 +168,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused) sb_info = fp->sb_info; sb = sb_info->sb_virt; prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] & - STATUS_BLOCK_PROD_INDEX_MASK); + STATUS_BLOCK_E4_PROD_INDEX_MASK); seq_printf(s, "SB PROD IDX: %d\n", prod_idx); que = qedi->global_queues[fp->sb_id]; seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx); diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index bd302d3cb9af..092e8f9a6020 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -87,7 +87,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi, { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; - struct iscsi_task_context *task_ctx; + struct e4_iscsi_task_context *task_ctx; struct iscsi_text_rsp *resp_hdr_ptr; struct iscsi_text_response_hdr *cqe_text_response; struct qedi_cmd *cmd; @@ -260,7 +260,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi, { struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_session *session = conn->session; - struct iscsi_task_context *task_ctx; + struct e4_iscsi_task_context *task_ctx; struct iscsi_login_rsp *resp_hdr_ptr; struct iscsi_login_response_hdr *cqe_login_response; struct qedi_cmd *cmd; @@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, (qedi->bdq_prod_idx % qedi->rq_num_entries)); /* Obtain buffer address from rqe_opaque */ - idx = cqe->rqe_opaque.lo; + idx = cqe->rqe_opaque; if (idx > (QEDI_BDQ_NUM - 1)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "wrong idx %d returned by FW, dropping the unsolicited pkt\n", @@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi, } QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, - "rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n", - cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx); + "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx); QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "unsol_cqe_type = %d\n", cqe->unsol_cqe_type); @@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, struct scsi_bd *pbl; /* Obtain buffer address from rqe_opaque */ - idx = cqe->rqe_opaque.lo; + idx = cqe->rqe_opaque; if (idx > (QEDI_BDQ_NUM - 1)) { QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "wrong idx %d returned by FW, dropping the unsolicited pkt\n", @@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi, QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n", pbl, pbl->address.hi, pbl->address.lo, idx); - pbl->opaque.hi = 0; - pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx)); + pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; + pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx); /* Increment producer to let f/w know we've handled the frame */ qedi->bdq_prod_idx += count; @@ -1017,7 +1018,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; - struct iscsi_task_context *fw_task_ctx; + struct e4_iscsi_task_context *fw_task_ctx; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_login_req *login_hdr; struct scsi_sge *resp_sge = NULL; @@ -1037,8 +1038,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1119,7 +1121,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; - struct iscsi_task_context *fw_task_ctx; + struct e4_iscsi_task_context *fw_task_ctx; struct iscsi_logout *logout_hdr = NULL; struct qedi_ctx *qedi = qedi_conn->qedi; struct qedi_cmd *qedi_cmd; @@ -1137,8 +1139,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1467,7 +1470,7 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_tmf_request_hdr tmf_pdu_header; struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; - struct iscsi_task_context *fw_task_ctx; + struct e4_iscsi_task_context *fw_task_ctx; struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data; struct iscsi_task *ctask; struct iscsi_tm *tmf_hdr; @@ -1490,8 +1493,9 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1605,7 +1609,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params tx_sgl_task_params; struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; - struct iscsi_task_context *fw_task_ctx; + struct e4_iscsi_task_context *fw_task_ctx; struct qedi_ctx *qedi = qedi_conn->qedi; struct iscsi_text *text_hdr; struct scsi_sge *req_sge = NULL; @@ -1627,8 +1631,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); qedi_cmd->task_id = tid; @@ -1705,7 +1710,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, struct scsi_sgl_task_params rx_sgl_task_params; struct iscsi_task_params task_params; struct qedi_ctx *qedi = qedi_conn->qedi; - struct iscsi_task_context *fw_task_ctx; + struct e4_iscsi_task_context *fw_task_ctx; struct iscsi_nopout *nopout_hdr; struct scsi_sge *resp_sge = NULL; struct qedi_cmd *qedi_cmd; @@ -1725,8 +1730,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn, return -ENOMEM; fw_task_ctx = - (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); qedi_cmd->task_id = tid; @@ -2046,7 +2052,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) struct iscsi_task_params task_params; struct iscsi_conn_params conn_params; struct scsi_initiator_cmd_params cmd_params; - struct iscsi_task_context *fw_task_ctx; + struct e4_iscsi_task_context *fw_task_ctx; struct iscsi_cls_conn *cls_conn; struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE; @@ -2069,8 +2075,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task) return -ENOMEM; fw_task_ctx = - (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid); - memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context)); + (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, + tid); + memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context)); cmd->task_id = tid; diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c index 7df32a68bd54..a269da1a6c75 100644 --- a/drivers/scsi/qedi/qedi_fw_api.c +++ b/drivers/scsi/qedi/qedi_fw_api.c @@ -203,12 +203,15 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params, struct data_hdr *pdu_header, enum iscsi_task_type task_type) { - struct iscsi_task_context *context; - u16 index; + struct e4_iscsi_task_context *context; u32 val; + u16 index; + u8 val_byte; context = task_params->context; + val_byte = context->mstorm_ag_context.cdu_validation; memset(context, 0, sizeof(*context)); + context->mstorm_ag_context.cdu_validation = val_byte; for (index = 0; index < ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data); @@ -222,7 +225,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params, cpu_to_le16(task_params->conn_icid); SET_FIELD(context->ustorm_ag_context.flags1, - USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); + E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1); context->ustorm_st_context.task_type = task_type; context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; @@ -252,10 +255,9 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc, static void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, - struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, - u32 remaining_recv_len, - u32 expected_data_transfer_len, - u8 num_sges, bool tx_dif_conn_err_en) + struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt, + u32 remaining_recv_len, u32 expected_data_transfer_len, + u8 num_sges, bool tx_dif_conn_err_en) { u32 val; @@ -265,12 +267,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt, ustorm_st_cxt->exp_data_transfer_len = val; SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges); SET_FIELD(ustorm_ag_cxt->flags2, - USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, + E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN, tx_dif_conn_err_en ? 1 : 0); } static -void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context, +void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context, struct iscsi_conn_params *conn_params, enum iscsi_task_type task_type, u32 task_size, @@ -342,56 +344,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, cpu_to_le16(dif_task_params->application_tag_mask); SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED, dif_task_params->crc_seed ? 1 : 0); - SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE, + SET_FIELD(rdif_context->flags0, + RDIF_TASK_CONTEXT_HOST_GUARD_TYPE, dif_task_params->host_guard_type); SET_FIELD(rdif_context->flags0, - RDIF_TASK_CONTEXT_PROTECTIONTYPE, + RDIF_TASK_CONTEXT_PROTECTION_TYPE, dif_task_params->protection_type); SET_FIELD(rdif_context->flags0, - RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1); + RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1); SET_FIELD(rdif_context->flags0, - RDIF_TASK_CONTEXT_KEEPREFTAGCONST, + RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST, dif_task_params->keep_ref_tag_const ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_VALIDATEAPPTAG, + RDIF_TASK_CONTEXT_VALIDATE_APP_TAG, (dif_task_params->validate_app_tag && dif_task_params->dif_on_network) ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_VALIDATEGUARD, + RDIF_TASK_CONTEXT_VALIDATE_GUARD, (dif_task_params->validate_guard && dif_task_params->dif_on_network) ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_VALIDATEREFTAG, + RDIF_TASK_CONTEXT_VALIDATE_REF_TAG, (dif_task_params->validate_ref_tag && dif_task_params->dif_on_network) ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_HOSTINTERFACE, + RDIF_TASK_CONTEXT_HOST_INTERFACE, dif_task_params->dif_on_host ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_NETWORKINTERFACE, + RDIF_TASK_CONTEXT_NETWORK_INTERFACE, dif_task_params->dif_on_network ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_FORWARDGUARD, + RDIF_TASK_CONTEXT_FORWARD_GUARD, dif_task_params->forward_guard ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_FORWARDAPPTAG, + RDIF_TASK_CONTEXT_FORWARD_APP_TAG, dif_task_params->forward_app_tag ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_FORWARDREFTAG, + RDIF_TASK_CONTEXT_FORWARD_REF_TAG, dif_task_params->forward_ref_tag ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, + RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK, dif_task_params->forward_app_tag_with_mask ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, + RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK, dif_task_params->forward_ref_tag_with_mask ? 1 : 0); SET_FIELD(rdif_context->flags1, - RDIF_TASK_CONTEXT_INTERVALSIZE, + RDIF_TASK_CONTEXT_INTERVAL_SIZE, dif_task_params->dif_block_size_log - 9); SET_FIELD(rdif_context->state, - RDIF_TASK_CONTEXT_REFTAGMASK, + RDIF_TASK_CONTEXT_REF_TAG_MASK, dif_task_params->ref_tag_mask); - SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG, + SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG, dif_task_params->ignore_app_tag); } @@ -399,7 +402,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { tdif_context->app_tag_value = cpu_to_le16(dif_task_params->application_tag); - tdif_context->partial_crc_valueB = + tdif_context->partial_crc_value_b = cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); tdif_context->partial_crc_value_a = cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); @@ -407,64 +410,68 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, dif_task_params->crc_seed ? 1 : 0); SET_FIELD(tdif_context->flags0, - TDIF_TASK_CONTEXT_SETERRORWITHEOP, + TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP, dif_task_params->tx_dif_conn_err_en ? 1 : 0); - SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD, + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD, dif_task_params->forward_guard ? 1 : 0); - SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG, + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARD_APP_TAG, dif_task_params->forward_app_tag ? 1 : 0); - SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG, + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_FORWARD_REF_TAG, dif_task_params->forward_ref_tag ? 1 : 0); - SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE, + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE, dif_task_params->dif_block_size_log - 9); - SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE, + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_HOST_INTERFACE, dif_task_params->dif_on_host ? 1 : 0); SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_NETWORKINTERFACE, + TDIF_TASK_CONTEXT_NETWORK_INTERFACE, dif_task_params->dif_on_network ? 1 : 0); val = cpu_to_le32(dif_task_params->initial_ref_tag); tdif_context->initial_ref_tag = val; tdif_context->app_tag_mask = cpu_to_le16(dif_task_params->application_tag_mask); SET_FIELD(tdif_context->flags0, - TDIF_TASK_CONTEXT_HOSTGUARDTYPE, + TDIF_TASK_CONTEXT_HOST_GUARD_TYPE, dif_task_params->host_guard_type); SET_FIELD(tdif_context->flags0, - TDIF_TASK_CONTEXT_PROTECTIONTYPE, + TDIF_TASK_CONTEXT_PROTECTION_TYPE, dif_task_params->protection_type); SET_FIELD(tdif_context->flags0, - TDIF_TASK_CONTEXT_INITIALREFTAGVALID, + TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, dif_task_params->initial_ref_tag_is_valid ? 1 : 0); SET_FIELD(tdif_context->flags0, - TDIF_TASK_CONTEXT_KEEPREFTAGCONST, + TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST, dif_task_params->keep_ref_tag_const ? 1 : 0); - SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD, + SET_FIELD(tdif_context->flags1, + TDIF_TASK_CONTEXT_VALIDATE_GUARD, (dif_task_params->validate_guard && dif_task_params->dif_on_host) ? 1 : 0); SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_VALIDATEAPPTAG, + TDIF_TASK_CONTEXT_VALIDATE_APP_TAG, (dif_task_params->validate_app_tag && dif_task_params->dif_on_host) ? 1 : 0); SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_VALIDATEREFTAG, + TDIF_TASK_CONTEXT_VALIDATE_REF_TAG, (dif_task_params->validate_ref_tag && dif_task_params->dif_on_host) ? 1 : 0); SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, + TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK, dif_task_params->forward_app_tag_with_mask ? 1 : 0); SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, + TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK, dif_task_params->forward_ref_tag_with_mask ? 1 : 0); SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_REFTAGMASK, + TDIF_TASK_CONTEXT_REF_TAG_MASK, dif_task_params->ref_tag_mask); SET_FIELD(tdif_context->flags0, - TDIF_TASK_CONTEXT_IGNOREAPPTAG, + TDIF_TASK_CONTEXT_IGNORE_APP_TAG, dif_task_params->ignore_app_tag ? 1 : 0); } } -static void set_local_completion_context(struct iscsi_task_context *context) +static void set_local_completion_context(struct e4_iscsi_task_context *context) { SET_FIELD(context->ystorm_st_context.state.flags, YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1); @@ -481,7 +488,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params, struct scsi_dif_task_params *dif_task_params) { u32 exp_data_transfer_len = conn_params->max_burst_length; - struct iscsi_task_context *cxt; + struct e4_iscsi_task_context *cxt; bool slow_io = false; u32 task_size, val; u8 num_sges = 0; @@ -494,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params, cxt = task_params->context; - val = cpu_to_le32(task_size); - cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val; - init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context, - cmd_params); - val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo); - cxt->mstorm_st_context.sense_db.lo = val; - val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi); - cxt->mstorm_st_context.sense_db.hi = val; + if (task_type == ISCSI_TASK_TYPE_TARGET_READ) { + set_local_completion_context(cxt); + } else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) { + val = cpu_to_le32(task_size + + ((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset); + cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val; + cxt->mstorm_st_context.expected_itt = + cpu_to_le32(pdu_header->itt); + } else { + val = cpu_to_le32(task_size); + cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = + val; + init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context, + cmd_params); + val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo); + cxt->mstorm_st_context.sense_db.lo = val; + + val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi); + cxt->mstorm_st_context.sense_db.hi = val; + } if (task_params->tx_io_size) { init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags, dif_task_params); + init_dif_context_flags(&cxt->ustorm_st_context.dif_flags, + dif_task_params); init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params, &cxt->ystorm_st_context.state.data_desc, sgl_task_params); @@ -595,7 +616,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { - struct iscsi_task_context *cxt; + struct e4_iscsi_task_context *cxt; cxt = task_params->context; @@ -637,7 +658,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_sgl_task_params, struct scsi_sgl_task_params *rx_sgl_task_params) { - struct iscsi_task_context *cxt; + struct e4_iscsi_task_context *cxt; cxt = task_params->context; @@ -683,7 +704,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { - struct iscsi_task_context *cxt; + struct e4_iscsi_task_context *cxt; cxt = task_params->context; @@ -738,7 +759,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params, struct scsi_sgl_task_params *tx_params, struct scsi_sgl_task_params *rx_params) { - struct iscsi_task_context *cxt; + struct e4_iscsi_task_context *cxt; cxt = task_params->context; diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h index b6f24f91849d..c3deb77ac388 100644 --- a/drivers/scsi/qedi/qedi_fw_iscsi.h +++ b/drivers/scsi/qedi/qedi_fw_iscsi.h @@ -13,7 +13,7 @@ #include "qedi_fw_scsi.h" struct iscsi_task_params { - struct iscsi_task_context *context; + struct e4_iscsi_task_context *context; struct iscsi_wqe *sqe; u32 tx_io_size; u32 rx_io_size; diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h index 63d793f46064..f5b5a31999aa 100644 --- a/drivers/scsi/qedi/qedi_gbl.h +++ b/drivers/scsi/qedi/qedi_gbl.h @@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt, void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt); void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid); void qedi_process_iscsi_error(struct qedi_endpoint *ep, - struct async_data *data); + struct iscsi_eqe_data *data); void qedi_start_conn_recovery(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn); struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid); -void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data); +void qedi_process_tcp_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data); void qedi_mark_device_missing(struct iscsi_cls_session *cls_session); void qedi_mark_device_available(struct iscsi_cls_session *cls_session); void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu); diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index a02b34ea5cab..7ec7f6e00fb8 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c @@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT; conn_info->dup_ack_theshold = 3; conn_info->rcv_wnd = 65535; - conn_info->cwnd = DEF_MAX_CWND; conn_info->ss_thresh = 65535; conn_info->srtt = 300; @@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep) (qedi_ep->ip_type == TCP_IPV6), 1, (qedi_ep->vlan_id != 0)); + conn_info->cwnd = DEF_MAX_CWND * conn_info->mss; conn_info->rcv_wnd_scale = 4; - conn_info->ts_ticks_per_second = 1000; conn_info->da_timeout_value = 200; conn_info->ack_frequency = 2; @@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code) return msg; } -void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data) +void qedi_process_iscsi_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data) { struct qedi_conn *qedi_conn; struct qedi_ctx *qedi; @@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data) qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn); } -void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data) +void qedi_process_tcp_error(struct qedi_endpoint *ep, + struct iscsi_eqe_data *data) { struct qedi_conn *qedi_conn; diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h index 3247287cb0e7..ea1315189922 100644 --- a/drivers/scsi/qedi/qedi_iscsi.h +++ b/drivers/scsi/qedi/qedi_iscsi.h @@ -182,7 +182,7 @@ struct qedi_cmd { struct scsi_cmnd *scsi_cmd; struct scatterlist *sg; struct qedi_io_bdt io_tbl; - struct iscsi_task_context request; + struct e4_iscsi_task_context request; unsigned char *sense_buffer; dma_addr_t sense_buffer_dma; u16 task_id; diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cccc34adc0e0..a0002232a83f 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { struct qedi_ctx *qedi; struct qedi_endpoint *qedi_ep; - struct async_data *data; + struct iscsi_eqe_data *data; int rval = 0; if (!context || !fw_handle) { @@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle); - data = (struct async_data *)fw_handle; + data = (struct iscsi_eqe_data *)fw_handle; QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, - "cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n", - data->cid, data->itid, data->error_code, - data->fw_debug_param); + "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n", + data->icid, data->conn_id, data->error_code, + data->error_pdu_opcode_reserved); - qedi_ep = qedi->ep_tbl[data->cid]; + qedi_ep = qedi->ep_tbl[data->icid]; if (!qedi_ep) { QEDI_WARN(&qedi->dbg_ctx, "Cannot process event, ep already disconnected, cid=0x%x\n", - data->cid); + data->icid); WARN_ON(1); return -ENODEV; } @@ -339,12 +339,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi) static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block *sb_virt; + struct status_block_e4 *sb_virt; dma_addr_t sb_phys; int ret; sb_virt = dma_alloc_coherent(&qedi->pdev->dev, - sizeof(struct status_block), &sb_phys, + sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); if (!sb_virt) { QEDI_ERR(&qedi->dbg_ctx, @@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX; qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1; - qedi->pf_params.iscsi_pf_params.ooo_enable = 1; err_alloc_mem: return rval; @@ -961,7 +960,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp) { struct qedi_ctx *qedi = fp->qedi; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block *sb = sb_info->sb_virt; + struct status_block_e4 *sb = sb_info->sb_virt; struct qedi_percpu_s *p = NULL; struct global_queue *que; u16 prod_idx; @@ -1015,7 +1014,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp) struct qedi_ctx *qedi = fp->qedi; struct global_queue *que; struct qed_sb_info *sb_info = fp->sb_info; - struct status_block *sb = sb_info->sb_virt; + struct status_block_e4 *sb = sb_info->sb_virt; u16 prod_idx; barrier(); @@ -1262,8 +1261,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi) QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n", pbl, pbl->address.hi, pbl->address.lo, i); - pbl->opaque.hi = 0; - pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i)); + pbl->opaque.iscsi_opaque.reserved_zero[0] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[1] = 0; + pbl->opaque.iscsi_opaque.reserved_zero[2] = 0; + pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i); pbl++; } diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h index d61e3ac22e67..8a0e523fc089 100644 --- a/drivers/scsi/qedi/qedi_version.h +++ b/drivers/scsi/qedi/qedi_version.h @@ -7,8 +7,8 @@ * this source tree. */ -#define QEDI_MODULE_VERSION "8.10.4.0" +#define QEDI_MODULE_VERSION "8.33.0.20" #define QEDI_DRIVER_MAJOR_VER 8 -#define QEDI_DRIVER_MINOR_VER 10 -#define QEDI_DRIVER_REV_VER 4 -#define QEDI_DRIVER_ENG_VER 0 +#define QEDI_DRIVER_MINOR_VER 33 +#define QEDI_DRIVER_REV_VER 0 +#define QEDI_DRIVER_ENG_VER 20 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index a9996c16f4ae..26ce17178401 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -1415,7 +1415,10 @@ static void __scsi_remove_target(struct scsi_target *starget) * check. */ if (sdev->channel != starget->channel || - sdev->id != starget->id || + sdev->id != starget->id) + continue; + if (sdev->sdev_state == SDEV_DEL || + sdev->sdev_state == SDEV_CANCEL || !get_device(&sdev->sdev_gendev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 1b06cf0375dc..3b3d1d050cac 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -953,10 +953,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, case TEST_UNIT_READY: break; default: - set_host_byte(scmnd, DID_TARGET_FAILURE); + set_host_byte(scmnd, DID_ERROR); } break; case SRB_STATUS_INVALID_LUN: + set_host_byte(scmnd, DID_NO_CONNECT); do_work = true; process_err_fn = storvsc_remove_lun; break; diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index a517b2d29f1b..8f6494158d3d 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -37,7 +37,7 @@ config ION_CHUNK_HEAP config ION_CMA_HEAP bool "Ion CMA heap support" - depends on ION && CMA + depends on ION && DMA_CMA help Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index a7d9b0e98572..f480885e346b 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -346,7 +346,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); @@ -368,7 +368,7 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, mutex_lock(&buffer->lock); list_for_each_entry(a, &buffer->attachments, list) { dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); + direction); } mutex_unlock(&buffer->lock); diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index dd5545d9990a..86196ffd2faf 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -39,9 +39,15 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, struct ion_cma_heap *cma_heap = to_cma_heap(heap); struct sg_table *table; struct page *pages; + unsigned long size = PAGE_ALIGN(len); + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned long align = get_order(size); int ret; - pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; + + pages = cma_alloc(cma_heap->cma, nr_pages, align, GFP_KERNEL); if (!pages) return -ENOMEM; @@ -53,7 +59,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, if (ret) goto free_mem; - sg_set_page(table->sgl, pages, len, 0); + sg_set_page(table->sgl, pages, size, 0); buffer->priv_virt = pages; buffer->sg_table = table; @@ -62,7 +68,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, free_mem: kfree(table); err: - cma_release(cma_heap->cma, pages, buffer->size); + cma_release(cma_heap->cma, pages, nr_pages); return -ENOMEM; } @@ -70,9 +76,10 @@ static void ion_cma_free(struct ion_buffer *buffer) { struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); struct page *pages = buffer->priv_virt; + unsigned long nr_pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; /* release memory */ - cma_release(cma_heap->cma, pages, buffer->size); + cma_release(cma_heap->cma, pages, nr_pages); /* release sg table */ sg_free_table(buffer->sg_table); kfree(buffer->sg_table); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 986c2a40d978..8267119ccc8e 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -487,21 +487,18 @@ ksocknal_add_peer(struct lnet_ni *ni, struct lnet_process_id id, __u32 ipaddr, ksocknal_nid2peerlist(id.nid)); } - route2 = NULL; list_for_each_entry(route2, &peer->ksnp_routes, ksnr_list) { - if (route2->ksnr_ipaddr == ipaddr) - break; - - route2 = NULL; - } - if (!route2) { - ksocknal_add_route_locked(peer, route); - route->ksnr_share_count++; - } else { - ksocknal_route_decref(route); - route2->ksnr_share_count++; + if (route2->ksnr_ipaddr == ipaddr) { + /* Route already exists, use the old one */ + ksocknal_route_decref(route); + route2->ksnr_share_count++; + goto out; + } } - + /* Route doesn't already exist, add the new one */ + ksocknal_add_route_locked(peer, route); + route->ksnr_share_count++; +out: write_unlock_bh(&ksocknal_data.ksnd_global_lock); return 0; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 419a7a90bce0..f45bcbc63738 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -339,7 +339,7 @@ static void __ring_interrupt(struct tb_ring *ring) return; if (ring->start_poll) { - __ring_interrupt_mask(ring, false); + __ring_interrupt_mask(ring, true); ring->start_poll(ring->poll_data); } else { schedule_work(&ring->work); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 427e0d5d8f13..539b49adb6af 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1762,7 +1762,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) { struct n_tty_data *ldata = tty->disc_data; - if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) { + if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) { bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE); ldata->line_start = ldata->read_tail; if (!L_ICANON(tty) || !read_cnt(ldata)) { @@ -2425,7 +2425,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file, return put_user(tty_chars_in_buffer(tty), (int __user *) arg); case TIOCINQ: down_write(&tty->termios_rwsem); - if (L_ICANON(tty)) + if (L_ICANON(tty) && !L_EXTPROC(tty)) retval = inq_canon(ldata); else retval = read_cnt(ldata); diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index 3593ce0ec641..880009987460 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c @@ -247,7 +247,7 @@ static int ci_hdrc_msm_probe(struct platform_device *pdev) if (ret) goto err_mux; - ulpi_node = of_find_node_by_name(of_node_get(pdev->dev.of_node), "ulpi"); + ulpi_node = of_get_child_by_name(pdev->dev.of_node, "ulpi"); if (ulpi_node) { phy_node = of_get_next_available_child(ulpi_node, NULL); ci->hsic = of_device_is_compatible(phy_node, "qcom,usb-hsic-phy"); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 78e92d29f8d9..c821b4b9647e 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -1007,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) case USB_SSP_CAP_TYPE: ssp_cap = (struct usb_ssp_cap_descriptor *)buffer; ssac = (le32_to_cpu(ssp_cap->bmAttributes) & - USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1; + USB_SSP_SUBLINK_SPEED_ATTRIBS); if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac)) dev->bos->ssp_cap = ssp_cap; break; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a10b346b9777..4024926c1d68 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -52,10 +52,11 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, - /* Logitech HD Pro Webcams C920, C920-C and C930e */ + /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT }, /* Logitech ConferenceCam CC3000e */ { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, @@ -149,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */ { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM }, + /* ELSA MicroLink 56K */ + { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM }, diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 4f7895dbcf88..e26e685d8a57 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -162,7 +162,7 @@ static void xhci_debugfs_extcap_regset(struct xhci_hcd *xhci, int cap_id, static int xhci_ring_enqueue_show(struct seq_file *s, void *unused) { dma_addr_t dma; - struct xhci_ring *ring = s->private; + struct xhci_ring *ring = *(struct xhci_ring **)s->private; dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue); seq_printf(s, "%pad\n", &dma); @@ -173,7 +173,7 @@ static int xhci_ring_enqueue_show(struct seq_file *s, void *unused) static int xhci_ring_dequeue_show(struct seq_file *s, void *unused) { dma_addr_t dma; - struct xhci_ring *ring = s->private; + struct xhci_ring *ring = *(struct xhci_ring **)s->private; dma = xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); seq_printf(s, "%pad\n", &dma); @@ -183,7 +183,7 @@ static int xhci_ring_dequeue_show(struct seq_file *s, void *unused) static int xhci_ring_cycle_show(struct seq_file *s, void *unused) { - struct xhci_ring *ring = s->private; + struct xhci_ring *ring = *(struct xhci_ring **)s->private; seq_printf(s, "%d\n", ring->cycle_state); @@ -346,7 +346,7 @@ static void xhci_debugfs_create_files(struct xhci_hcd *xhci, } static struct dentry *xhci_debugfs_create_ring_dir(struct xhci_hcd *xhci, - struct xhci_ring *ring, + struct xhci_ring **ring, const char *name, struct dentry *parent) { @@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); epriv->root = xhci_debugfs_create_ring_dir(xhci, - dev->eps[ep_index].new_ring, + &dev->eps[ep_index].new_ring, epriv->name, spriv->root); spriv->eps[ep_index] = epriv; @@ -423,7 +423,7 @@ void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id) priv->dev = dev; dev->debugfs_private = priv; - xhci_debugfs_create_ring_dir(xhci, dev->eps[0].ring, + xhci_debugfs_create_ring_dir(xhci, &dev->eps[0].ring, "ep00", priv->root); xhci_debugfs_create_context_files(xhci, priv->root, slot_id); @@ -488,11 +488,11 @@ void xhci_debugfs_init(struct xhci_hcd *xhci) ARRAY_SIZE(xhci_extcap_dbc), "reg-ext-dbc"); - xhci_debugfs_create_ring_dir(xhci, xhci->cmd_ring, + xhci_debugfs_create_ring_dir(xhci, &xhci->cmd_ring, "command-ring", xhci->debugfs_root); - xhci_debugfs_create_ring_dir(xhci, xhci->event_ring, + xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring, "event-ring", xhci->debugfs_root); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 7ef1274ef7f7..1aad89b8aba0 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -178,6 +178,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_BROKEN_STREAMS; } if (pdev->vendor == PCI_VENDOR_ID_RENESAS && + pdev->device == 0x0014) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0015) xhci->quirks |= XHCI_RESET_ON_RESUME; if (pdev->vendor == PCI_VENDOR_ID_VIA) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2424d3020ca3..da6dbe3ebd8b 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3525,8 +3525,6 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_slot_ctx *slot_ctx; int i, ret; - xhci_debugfs_remove_slot(xhci, udev->slot_id); - #ifndef CONFIG_USB_DEFAULT_PERSIST /* * We called pm_runtime_get_noresume when the device was attached. @@ -3555,8 +3553,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) } ret = xhci_disable_slot(xhci, udev->slot_id); - if (ret) + if (ret) { + xhci_debugfs_remove_slot(xhci, udev->slot_id); xhci_free_virt_device(xhci, udev->slot_id); + } } int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1aba9105b369..fc68952c994a 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1013,6 +1013,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, + { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4faa09fe308c..8b4ecd2bd297 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -915,6 +915,12 @@ #define ICPDAS_I7563U_PID 0x0105 /* + * Airbus Defence and Space + */ +#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */ +#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */ + +/* * RT Systems programming cables for various ham radios */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3b3513874cfd..b6320e3be429 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -233,6 +233,8 @@ static void option_instat_callback(struct urb *urb); /* These Quectel products use Qualcomm's vendor ID */ #define QUECTEL_PRODUCT_UC20 0x9003 #define QUECTEL_PRODUCT_UC15 0x9090 +/* These Yuga products use Qualcomm's vendor ID */ +#define YUGA_PRODUCT_CLM920_NC5 0x9625 #define QUECTEL_VENDOR_ID 0x2c7c /* These Quectel products use Quectel's vendor ID */ @@ -280,6 +282,7 @@ static void option_instat_callback(struct urb *urb); #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 #define TELIT_PRODUCT_ME910 0x1100 +#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101 #define TELIT_PRODUCT_LE920 0x1200 #define TELIT_PRODUCT_LE910 0x1201 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 @@ -645,6 +648,11 @@ static const struct option_blacklist_info telit_me910_blacklist = { .reserved = BIT(1) | BIT(3), }; +static const struct option_blacklist_info telit_me910_dual_modem_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(3), +}; + static const struct option_blacklist_info telit_le910_blacklist = { .sendsetup = BIT(0), .reserved = BIT(1) | BIT(2), @@ -674,6 +682,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = { .reserved = BIT(4) | BIT(5), }; +static const struct option_blacklist_info yuga_clm920_nc5_blacklist = { + .reserved = BIT(1) | BIT(4), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -1178,6 +1190,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + /* Yuga products use Qualcomm vendor ID */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5), + .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, @@ -1244,6 +1259,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), + .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index e3892541a489..613f91add03d 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ + {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */ + {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */ {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ @@ -342,6 +344,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) break; case 2: dev_dbg(dev, "NMEA GPS interface found\n"); + sendsetup = true; break; case 3: dev_dbg(dev, "Modem port found\n"); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index a3df8ee82faf..e31a6f204397 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -149,8 +149,7 @@ static void stub_shutdown_connection(struct usbip_device *ud) * step 1? */ if (ud->tcp_socket) { - dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n", - ud->tcp_socket); + dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c index 4f48b306713f..c31c8402a0c5 100644 --- a/drivers/usb/usbip/stub_main.c +++ b/drivers/usb/usbip/stub_main.c @@ -237,11 +237,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev) struct stub_priv *priv; struct urb *urb; - dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev); + dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n"); while ((priv = stub_priv_pop(sdev))) { urb = priv->urb; - dev_dbg(&sdev->udev->dev, "free urb %p\n", urb); + dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n", + priv->seqnum); usb_kill_urb(urb); kmem_cache_free(stub_priv_cache, priv); diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 493ac2928391..6c5a59313999 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c @@ -211,9 +211,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, if (priv->seqnum != pdu->u.cmd_unlink.seqnum) continue; - dev_info(&priv->urb->dev->dev, "unlink urb %p\n", - priv->urb); - /* * This matched urb is not completed yet (i.e., be in * flight in usb hcd hardware/driver). Now we are @@ -252,8 +249,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev, ret = usb_unlink_urb(priv->urb); if (ret != -EINPROGRESS) dev_err(&priv->urb->dev->dev, - "failed to unlink a urb %p, ret %d\n", - priv->urb, ret); + "failed to unlink a urb # %lu, ret %d\n", + priv->seqnum, ret); return 0; } @@ -342,14 +339,6 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) epd = &ep->desc; - /* validate transfer_buffer_length */ - if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { - dev_err(&sdev->udev->dev, - "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", - pdu->u.cmd_submit.transfer_buffer_length); - return -1; - } - if (usb_endpoint_xfer_control(epd)) { if (dir == USBIP_DIR_OUT) return usb_sndctrlpipe(udev, epnum); @@ -482,8 +471,7 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, } /* allocate urb transfer buffer, if needed */ - if (pdu->u.cmd_submit.transfer_buffer_length > 0 && - pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { + if (pdu->u.cmd_submit.transfer_buffer_length > 0) { priv->urb->transfer_buffer = kzalloc(pdu->u.cmd_submit.transfer_buffer_length, GFP_KERNEL); diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index 53172b1f6257..f0ec41a50cbc 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c @@ -88,7 +88,7 @@ void stub_complete(struct urb *urb) /* link a urb to the queue of tx. */ spin_lock_irqsave(&sdev->priv_lock, flags); if (sdev->ud.tcp_socket == NULL) { - usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); + usbip_dbg_stub_tx("ignore urb for closed connection\n"); /* It will be freed in stub_device_cleanup_urbs(). */ } else if (priv->unlinking) { stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); @@ -190,8 +190,8 @@ static int stub_send_ret_submit(struct stub_device *sdev) /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb); - usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", - pdu_header.base.seqnum, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d\n", + pdu_header.base.seqnum); usbip_header_correct_endian(&pdu_header, 1); iov[iovnum].iov_base = &pdu_header; diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index f7978933b402..7b219d9109b4 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -317,26 +317,20 @@ int usbip_recv(struct socket *sock, void *buf, int size) struct msghdr msg = {.msg_flags = MSG_NOSIGNAL}; int total = 0; + if (!sock || !buf || !size) + return -EINVAL; + iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size); usbip_dbg_xmit("enter\n"); - if (!sock || !buf || !size) { - pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf, - size); - return -EINVAL; - } - do { - int sz = msg_data_left(&msg); + msg_data_left(&msg); sock->sk->sk_allocation = GFP_NOIO; result = sock_recvmsg(sock, &msg, MSG_WAITALL); - if (result <= 0) { - pr_debug("receive sock %p buf %p size %u ret %d total %d\n", - sock, buf + total, sz, result, total); + if (result <= 0) goto err; - } total += result; } while (msg_data_left(&msg)); diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 6b3278c4b72a..c3e1008aa491 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -656,9 +656,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag struct vhci_device *vdev; unsigned long flags; - usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", - hcd, urb, mem_flags); - if (portnum > VHCI_HC_PORTS) { pr_err("invalid port number %d\n", portnum); return -ENODEV; @@ -822,8 +819,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) struct vhci_device *vdev; unsigned long flags; - pr_info("dequeue a urb %p\n", urb); - spin_lock_irqsave(&vhci->lock, flags); priv = urb->hcpriv; @@ -851,7 +846,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) /* tcp connection is closed */ spin_lock(&vdev->priv_lock); - pr_info("device %p seems to be disconnected\n", vdev); list_del(&priv->list); kfree(priv); urb->hcpriv = NULL; @@ -863,8 +857,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) * vhci_rx will receive RET_UNLINK and give back the URB. * Otherwise, we give back it here. */ - pr_info("gives back urb %p\n", urb); - usb_hcd_unlink_urb_from_ep(hcd, urb); spin_unlock_irqrestore(&vhci->lock, flags); @@ -892,8 +884,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) unlink->unlink_seqnum = priv->seqnum; - pr_info("device %p seems to be still connected\n", vdev); - /* send cmd_unlink and try to cancel the pending URB in the * peer */ list_add_tail(&unlink->list, &vdev->unlink_tx); @@ -975,7 +965,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud) /* need this? see stub_dev.c */ if (ud->tcp_socket) { - pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket); + pr_debug("shutdown tcp_socket %d\n", ud->sockfd); kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR); } diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c index 90577e8b2282..112ebb90d8c9 100644 --- a/drivers/usb/usbip/vhci_rx.c +++ b/drivers/usb/usbip/vhci_rx.c @@ -23,24 +23,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) urb = priv->urb; status = urb->status; - usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n", - urb, priv, seqnum); + usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum); switch (status) { case -ENOENT: /* fall through */ case -ECONNRESET: - dev_info(&urb->dev->dev, - "urb %p was unlinked %ssynchronuously.\n", urb, - status == -ENOENT ? "" : "a"); + dev_dbg(&urb->dev->dev, + "urb seq# %u was unlinked %ssynchronuously\n", + seqnum, status == -ENOENT ? "" : "a"); break; case -EINPROGRESS: /* no info output */ break; default: - dev_info(&urb->dev->dev, - "urb %p may be in a error, status %d\n", urb, - status); + dev_dbg(&urb->dev->dev, + "urb seq# %u may be in a error, status %d\n", + seqnum, status); } list_del(&priv->list); @@ -67,8 +66,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, spin_unlock_irqrestore(&vdev->priv_lock, flags); if (!urb) { - pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum); - pr_info("max seqnum %d\n", + pr_err("cannot find a urb of seqnum %u max seqnum %d\n", + pdu->base.seqnum, atomic_read(&vhci_hcd->seqnum)); usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); return; @@ -91,7 +90,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum); spin_lock_irqsave(&vhci->lock, flags); usb_hcd_unlink_urb_from_ep(vhci_hcd_to_hcd(vhci_hcd), urb); @@ -158,7 +157,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, pr_info("the urb (seqnum %d) was already given back\n", pdu->base.seqnum); } else { - usbip_dbg_vhci_rx("now giveback urb %p\n", urb); + usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum); /* If unlink is successful, status is -ECONNRESET */ urb->status = pdu->u.ret_unlink.status; diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c index d625a2ff4b71..9aed15a358b7 100644 --- a/drivers/usb/usbip/vhci_tx.c +++ b/drivers/usb/usbip/vhci_tx.c @@ -69,7 +69,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev) memset(&msg, 0, sizeof(msg)); memset(&iov, 0, sizeof(iov)); - usbip_dbg_vhci_tx("setup txdata urb %p\n", urb); + usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n", + priv->seqnum); /* 1. setup usbip_header */ setup_cmd_submit_pdu(&pdu_header, urb); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c7bdeb655646..7baa90abe097 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -89,7 +89,7 @@ struct vhost_net_ubuf_ref { #define VHOST_RX_BATCH 64 struct vhost_net_buf { - struct sk_buff **queue; + void **queue; int tail; int head; }; @@ -108,7 +108,7 @@ struct vhost_net_virtqueue { /* Reference counting for outstanding ubufs. * Protected by vq mutex. Writers must also take device mutex. */ struct vhost_net_ubuf_ref *ubufs; - struct skb_array *rx_array; + struct ptr_ring *rx_ring; struct vhost_net_buf rxq; }; @@ -158,7 +158,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) struct vhost_net_buf *rxq = &nvq->rxq; rxq->head = 0; - rxq->tail = skb_array_consume_batched(nvq->rx_array, rxq->queue, + rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, VHOST_RX_BATCH); return rxq->tail; } @@ -167,13 +167,25 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) { struct vhost_net_buf *rxq = &nvq->rxq; - if (nvq->rx_array && !vhost_net_buf_is_empty(rxq)) { - skb_array_unconsume(nvq->rx_array, rxq->queue + rxq->head, - vhost_net_buf_get_size(rxq)); + if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { + ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, + vhost_net_buf_get_size(rxq), + __skb_array_destroy_skb); rxq->head = rxq->tail = 0; } } +static int vhost_net_buf_peek_len(void *ptr) +{ + if (tun_is_xdp_buff(ptr)) { + struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); + + return xdp->data_end - xdp->data; + } + + return __skb_array_len_with_tag(ptr); +} + static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) { struct vhost_net_buf *rxq = &nvq->rxq; @@ -185,7 +197,7 @@ static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) return 0; out: - return __skb_array_len_with_tag(vhost_net_buf_get_ptr(rxq)); + return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq)); } static void vhost_net_buf_init(struct vhost_net_buf *rxq) @@ -583,7 +595,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) int len = 0; unsigned long flags; - if (rvq->rx_array) + if (rvq->rx_ring) return vhost_net_buf_peek(rvq); spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); @@ -744,7 +756,7 @@ static void handle_rx(struct vhost_net *net) }; size_t total_len = 0; int err, mergeable; - s16 headcount; + s16 headcount, nheads = 0; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; struct socket *sock; @@ -772,7 +784,7 @@ static void handle_rx(struct vhost_net *net) while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; - headcount = get_rx_bufs(vq, vq->heads, vhost_len, + headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, &in, vq_log, &log, likely(mergeable) ? UIO_MAXIOV : 1); /* On error, stop handling until the next kick. */ @@ -790,7 +802,7 @@ static void handle_rx(struct vhost_net *net) * they refilled. */ goto out; } - if (nvq->rx_array) + if (nvq->rx_ring) msg.msg_control = vhost_net_buf_consume(&nvq->rxq); /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { @@ -844,8 +856,12 @@ static void handle_rx(struct vhost_net *net) vhost_discard_vq_desc(vq, headcount); goto out; } - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, - headcount); + nheads += headcount; + if (nheads > VHOST_RX_BATCH) { + vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, + nheads); + nheads = 0; + } if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len); total_len += vhost_len; @@ -856,6 +872,9 @@ static void handle_rx(struct vhost_net *net) } vhost_net_enable_vq(net, vq); out: + if (nheads) + vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, + nheads); mutex_unlock(&vq->mutex); } @@ -896,7 +915,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) struct vhost_net *n; struct vhost_dev *dev; struct vhost_virtqueue **vqs; - struct sk_buff **queue; + void **queue; int i; n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); @@ -908,7 +927,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) return -ENOMEM; } - queue = kmalloc_array(VHOST_RX_BATCH, sizeof(struct sk_buff *), + queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *), GFP_KERNEL); if (!queue) { kfree(vqs); @@ -1046,23 +1065,23 @@ err: return ERR_PTR(r); } -static struct skb_array *get_tap_skb_array(int fd) +static struct ptr_ring *get_tap_ptr_ring(int fd) { - struct skb_array *array; + struct ptr_ring *ring; struct file *file = fget(fd); if (!file) return NULL; - array = tun_get_skb_array(file); - if (!IS_ERR(array)) + ring = tun_get_tx_ring(file); + if (!IS_ERR(ring)) goto out; - array = tap_get_skb_array(file); - if (!IS_ERR(array)) + ring = tap_get_ptr_ring(file); + if (!IS_ERR(ring)) goto out; - array = NULL; + ring = NULL; out: fput(file); - return array; + return ring; } static struct socket *get_tap_socket(int fd) @@ -1143,7 +1162,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) vq->private_data = sock; vhost_net_buf_unproduce(nvq); if (index == VHOST_NET_VQ_RX) - nvq->rx_array = get_tap_skb_array(fd); + nvq->rx_ring = get_tap_ptr_ring(fd); r = vhost_vq_init_access(vq); if (r) goto err_used; diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index f77e499afddd..065f0b607373 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -257,10 +257,25 @@ static void release_memory_resource(struct resource *resource) kfree(resource); } +/* + * Host memory not allocated to dom0. We can use this range for hotplug-based + * ballooning. + * + * It's a type-less resource. Setting IORESOURCE_MEM will make resource + * management algorithms (arch_remove_reservations()) look into guest e820, + * which we don't want. + */ +static struct resource hostmem_resource = { + .name = "Host RAM", +}; + +void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res) +{} + static struct resource *additional_memory_resource(phys_addr_t size) { - struct resource *res; - int ret; + struct resource *res, *res_hostmem; + int ret = -ENOMEM; res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) @@ -269,13 +284,42 @@ static struct resource *additional_memory_resource(phys_addr_t size) res->name = "System RAM"; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; - ret = allocate_resource(&iomem_resource, res, - size, 0, -1, - PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); - if (ret < 0) { - pr_err("Cannot allocate new System RAM resource\n"); - kfree(res); - return NULL; + res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL); + if (res_hostmem) { + /* Try to grab a range from hostmem */ + res_hostmem->name = "Host memory"; + ret = allocate_resource(&hostmem_resource, res_hostmem, + size, 0, -1, + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); + } + + if (!ret) { + /* + * Insert this resource into iomem. Because hostmem_resource + * tracks portion of guest e820 marked as UNUSABLE noone else + * should try to use it. + */ + res->start = res_hostmem->start; + res->end = res_hostmem->end; + ret = insert_resource(&iomem_resource, res); + if (ret < 0) { + pr_err("Can't insert iomem_resource [%llx - %llx]\n", + res->start, res->end); + release_memory_resource(res_hostmem); + res_hostmem = NULL; + res->start = res->end = 0; + } + } + + if (ret) { + ret = allocate_resource(&iomem_resource, res, + size, 0, -1, + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); + if (ret < 0) { + pr_err("Cannot allocate new System RAM resource\n"); + kfree(res); + return NULL; + } } #ifdef CONFIG_SPARSEMEM @@ -287,6 +331,7 @@ static struct resource *additional_memory_resource(phys_addr_t size) pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", pfn, limit); release_memory_resource(res); + release_memory_resource(res_hostmem); return NULL; } } @@ -765,6 +810,8 @@ static int __init balloon_init(void) set_online_page_callback(&xen_online_page); register_memory_notifier(&xen_memory_nb); register_sysctl_table(xen_root); + + arch_xen_balloon_init(&hostmem_resource); #endif #ifdef CONFIG_XEN_PV diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index d1e1d8d2b9d5..4c789e61554b 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -805,7 +805,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags) pvcalls_exit(); return ret; } - map2 = kzalloc(sizeof(*map2), GFP_KERNEL); + map2 = kzalloc(sizeof(*map2), GFP_ATOMIC); if (map2 == NULL) { clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index ff8d5bf4354f..23c7f395d718 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -895,20 +895,38 @@ error: * However, if we didn't have a callback promise outstanding, or it was * outstanding on a different server, then it won't break it either... */ -static int afs_dir_remove_link(struct dentry *dentry, struct key *key) +static int afs_dir_remove_link(struct dentry *dentry, struct key *key, + unsigned long d_version_before, + unsigned long d_version_after) { + bool dir_valid; int ret = 0; + /* There were no intervening changes on the server if the version + * number we got back was incremented by exactly 1. + */ + dir_valid = (d_version_after == d_version_before + 1); + if (d_really_is_positive(dentry)) { struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) - kdebug("AFS_VNODE_DELETED"); - clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); - - ret = afs_validate(vnode, key); - if (ret == -ESTALE) + if (dir_valid) { + drop_nlink(&vnode->vfs_inode); + if (vnode->vfs_inode.i_nlink == 0) { + set_bit(AFS_VNODE_DELETED, &vnode->flags); + clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + } ret = 0; + } else { + clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); + + if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) + kdebug("AFS_VNODE_DELETED"); + + ret = afs_validate(vnode, key); + if (ret == -ESTALE) + ret = 0; + } _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret); } @@ -923,6 +941,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) struct afs_fs_cursor fc; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; struct key *key; + unsigned long d_version = (unsigned long)dentry->d_fsdata; int ret; _enter("{%x:%u},{%pd}", @@ -955,7 +974,9 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) afs_vnode_commit_status(&fc, dvnode, fc.cb_break); ret = afs_end_vnode_operation(&fc); if (ret == 0) - ret = afs_dir_remove_link(dentry, key); + ret = afs_dir_remove_link( + dentry, key, d_version, + (unsigned long)dvnode->status.data_version); } error_key: diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 3415eb7484f6..1e81864ef0b2 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -377,6 +377,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) } read_sequnlock_excl(&vnode->cb_lock); + + if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) + clear_nlink(&vnode->vfs_inode); + if (valid) goto valid; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index ea1460b9b71a..e1126659f043 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -885,7 +885,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count, { struct afs_net *net = call->net; enum afs_call_state state; - u32 remote_abort; + u32 remote_abort = 0; int ret; _enter("{%s,%zu},,%zu,%d", diff --git a/fs/afs/write.c b/fs/afs/write.c index cb5f8a3df577..9370e2feb999 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -198,7 +198,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, ret = afs_fill_page(vnode, key, pos + copied, len - copied, page); if (ret < 0) - return ret; + goto out; } SetPageUptodate(page); } @@ -206,10 +206,12 @@ int afs_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); if (PageDirty(page)) _debug("dirtied"); + ret = copied; + +out: unlock_page(page); put_page(page); - - return copied; + return ret; } /* diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 5d73f79ded8b..056276101c63 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( spin_lock(&root->inode_lock); node = radix_tree_lookup(&root->delayed_nodes_tree, ino); + if (node) { if (btrfs_inode->delayed_node) { refcount_inc(&node->refs); /* can be accessed */ @@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node( spin_unlock(&root->inode_lock); return node; } - btrfs_inode->delayed_node = node; - /* can be accessed and cached in the inode */ - refcount_add(2, &node->refs); + + /* + * It's possible that we're racing into the middle of removing + * this node from the radix tree. In this case, the refcount + * was zero and it should never go back to one. Just return + * NULL like it was never in the radix at all; our release + * function is in the process of removing it. + * + * Some implementations of refcount_inc refuse to bump the + * refcount once it has hit zero. If we don't do this dance + * here, refcount_inc() may decide to just WARN_ONCE() instead + * of actually bumping the refcount. + * + * If this node is properly in the radix, we want to bump the + * refcount twice, once for the inode and once for this get + * operation. + */ + if (refcount_inc_not_zero(&node->refs)) { + refcount_inc(&node->refs); + btrfs_inode->delayed_node = node; + } else { + node = NULL; + } + spin_unlock(&root->inode_lock); return node; } @@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node( mutex_unlock(&delayed_node->mutex); if (refcount_dec_and_test(&delayed_node->refs)) { - bool free = false; struct btrfs_root *root = delayed_node->root; + spin_lock(&root->inode_lock); - if (refcount_read(&delayed_node->refs) == 0) { - radix_tree_delete(&root->delayed_nodes_tree, - delayed_node->inode_id); - free = true; - } + /* + * Once our refcount goes to zero, nobody is allowed to bump it + * back up. We can delete it now. + */ + ASSERT(refcount_read(&delayed_node->refs) == 0); + radix_tree_delete(&root->delayed_nodes_tree, + delayed_node->inode_id); spin_unlock(&root->inode_lock); - if (free) - kmem_cache_free(delayed_node_cache, delayed_node); + kmem_cache_free(delayed_node_cache, delayed_node); } } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 49810b70afd3..a25684287501 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -237,7 +237,6 @@ static struct btrfs_device *__alloc_device(void) kfree(dev); return ERR_PTR(-ENOMEM); } - bio_get(dev->flush_bio); INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_alloc_list); diff --git a/fs/exec.c b/fs/exec.c index 5688b5e1b937..7eb8d21bcab9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1349,9 +1349,14 @@ void setup_new_exec(struct linux_binprm * bprm) current->sas_ss_sp = current->sas_ss_size = 0; - /* Figure out dumpability. */ + /* + * Figure out dumpability. Note that this checking only of current + * is wrong, but userspace depends on it. This should be testing + * bprm->secureexec instead. + */ if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP || - bprm->secureexec) + !(uid_eq(current_euid(), current_uid()) && + gid_eq(current_egid(), current_gid()))) set_dumpable(current->mm, suid_dumpable); else set_dumpable(current->mm, SUID_DUMP_USER); diff --git a/fs/nsfs.c b/fs/nsfs.c index 7c6f76d29f56..36b0772701a0 100644 --- a/fs/nsfs.c +++ b/fs/nsfs.c @@ -103,14 +103,14 @@ slow: goto got_it; } -void *ns_get_path(struct path *path, struct task_struct *task, - const struct proc_ns_operations *ns_ops) +void *ns_get_path_cb(struct path *path, ns_get_path_helper_t *ns_get_cb, + void *private_data) { struct ns_common *ns; void *ret; again: - ns = ns_ops->get(task); + ns = ns_get_cb(private_data); if (!ns) return ERR_PTR(-ENOENT); @@ -120,6 +120,29 @@ again: return ret; } +struct ns_get_path_task_args { + const struct proc_ns_operations *ns_ops; + struct task_struct *task; +}; + +static struct ns_common *ns_get_path_task(void *private_data) +{ + struct ns_get_path_task_args *args = private_data; + + return args->ns_ops->get(args->task); +} + +void *ns_get_path(struct path *path, struct task_struct *task, + const struct proc_ns_operations *ns_ops) +{ + struct ns_get_path_task_args args = { + .ns_ops = ns_ops, + .task = task, + }; + + return ns_get_path_cb(path, ns_get_path_task, &args); +} + int open_related_ns(struct ns_common *ns, struct ns_common *(*get_ns)(struct ns_common *ns)) { diff --git a/fs/super.c b/fs/super.c index 7ff1349609e4..06bd25d90ba5 100644 --- a/fs/super.c +++ b/fs/super.c @@ -517,7 +517,11 @@ retry: hlist_add_head(&s->s_instances, &type->fs_supers); spin_unlock(&sb_lock); get_filesystem(type); - register_shrinker(&s->s_shrink); + err = register_shrinker(&s->s_shrink); + if (err) { + deactivate_locked_super(s); + s = ERR_PTR(err); + } return s; } diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ac9a4e65ca49..41a75f9f23fd 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -570,11 +570,14 @@ out: static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, struct userfaultfd_wait_queue *ewq) { + struct userfaultfd_ctx *release_new_ctx; + if (WARN_ON_ONCE(current->flags & PF_EXITING)) goto out; ewq->ctx = ctx; init_waitqueue_entry(&ewq->wq, current); + release_new_ctx = NULL; spin_lock(&ctx->event_wqh.lock); /* @@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, new = (struct userfaultfd_ctx *) (unsigned long) ewq->msg.arg.reserved.reserved1; - - userfaultfd_ctx_put(new); + release_new_ctx = new; } break; } @@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, __set_current_state(TASK_RUNNING); spin_unlock(&ctx->event_wqh.lock); + if (release_new_ctx) { + struct vm_area_struct *vma; + struct mm_struct *mm = release_new_ctx->mm; + + /* the various vma->vm_userfaultfd_ctx still points to it */ + down_write(&mm->mmap_sem); + for (vma = mm->mmap; vma; vma = vma->vm_next) + if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; + up_write(&mm->mmap_sem); + + userfaultfd_ctx_put(release_new_ctx); + } + /* * ctx may go away after this if the userfault pseudo fd is * already released. diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 0da80019a917..83ed7715f856 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -702,7 +702,7 @@ xfs_alloc_ag_vextent( ASSERT(args->agbno % args->alignment == 0); /* if not file data, insert new block into the reverse map btree */ - if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) { + if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) { error = xfs_rmap_alloc(args->tp, args->agbp, args->agno, args->agbno, args->len, &args->oinfo); if (error) @@ -1682,7 +1682,7 @@ xfs_free_ag_extent( bno_cur = cnt_cur = NULL; mp = tp->t_mountp; - if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) { + if (!xfs_rmap_should_skip_owner_update(oinfo)) { error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo); if (error) goto error0; diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index 6249c92671de..a76914db72ef 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -212,6 +212,7 @@ xfs_attr_set( int flags) { struct xfs_mount *mp = dp->i_mount; + struct xfs_buf *leaf_bp = NULL; struct xfs_da_args args; struct xfs_defer_ops dfops; struct xfs_trans_res tres; @@ -327,9 +328,16 @@ xfs_attr_set( * GROT: another possible req'mt for a double-split btree op. */ xfs_defer_init(args.dfops, args.firstblock); - error = xfs_attr_shortform_to_leaf(&args); + error = xfs_attr_shortform_to_leaf(&args, &leaf_bp); if (error) goto out_defer_cancel; + /* + * Prevent the leaf buffer from being unlocked so that a + * concurrent AIL push cannot grab the half-baked leaf + * buffer and run into problems with the write verifier. + */ + xfs_trans_bhold(args.trans, leaf_bp); + xfs_defer_bjoin(args.dfops, leaf_bp); xfs_defer_ijoin(args.dfops, dp); error = xfs_defer_finish(&args.trans, args.dfops); if (error) @@ -337,13 +345,14 @@ xfs_attr_set( /* * Commit the leaf transformation. We'll need another (linked) - * transaction to add the new attribute to the leaf. + * transaction to add the new attribute to the leaf, which + * means that we have to hold & join the leaf buffer here too. */ - error = xfs_trans_roll_inode(&args.trans, dp); if (error) goto out; - + xfs_trans_bjoin(args.trans, leaf_bp); + leaf_bp = NULL; } if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) @@ -374,8 +383,9 @@ xfs_attr_set( out_defer_cancel: xfs_defer_cancel(&dfops); - args.trans = NULL; out: + if (leaf_bp) + xfs_trans_brelse(args.trans, leaf_bp); if (args.trans) xfs_trans_cancel(args.trans); xfs_iunlock(dp, XFS_ILOCK_EXCL); diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 53cc8b986eac..601eaa36f1ad 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -735,10 +735,13 @@ xfs_attr_shortform_getvalue(xfs_da_args_t *args) } /* - * Convert from using the shortform to the leaf. + * Convert from using the shortform to the leaf. On success, return the + * buffer so that we can keep it locked until we're totally done with it. */ int -xfs_attr_shortform_to_leaf(xfs_da_args_t *args) +xfs_attr_shortform_to_leaf( + struct xfs_da_args *args, + struct xfs_buf **leaf_bp) { xfs_inode_t *dp; xfs_attr_shortform_t *sf; @@ -818,7 +821,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args) sfe = XFS_ATTR_SF_NEXTENTRY(sfe); } error = 0; - + *leaf_bp = bp; out: kmem_free(tmpbuffer); return error; diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index f7dda0c237b0..894124efb421 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h @@ -48,7 +48,8 @@ void xfs_attr_shortform_create(struct xfs_da_args *args); void xfs_attr_shortform_add(struct xfs_da_args *args, int forkoff); int xfs_attr_shortform_lookup(struct xfs_da_args *args); int xfs_attr_shortform_getvalue(struct xfs_da_args *args); -int xfs_attr_shortform_to_leaf(struct xfs_da_args *args); +int xfs_attr_shortform_to_leaf(struct xfs_da_args *args, + struct xfs_buf **leaf_bp); int xfs_attr_shortform_remove(struct xfs_da_args *args); int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes); diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 1210f684d3c2..1bddbba6b80c 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -5136,7 +5136,7 @@ __xfs_bunmapi( * blowing out the transaction with a mix of EFIs and reflink * adjustments. */ - if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) + if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res)); else max_len = len; diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 072ebfe1d6ae..087fea02c389 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -249,6 +249,10 @@ xfs_defer_trans_roll( for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE); + /* Hold the (previously bjoin'd) buffer locked across the roll. */ + for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) + xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]); + trace_xfs_defer_trans_roll((*tp)->t_mountp, dop); /* Roll the transaction. */ @@ -264,6 +268,12 @@ xfs_defer_trans_roll( for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0); + /* Rejoin the buffers and dirty them so the log moves forward. */ + for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) { + xfs_trans_bjoin(*tp, dop->dop_bufs[i]); + xfs_trans_bhold(*tp, dop->dop_bufs[i]); + } + return error; } @@ -295,6 +305,31 @@ xfs_defer_ijoin( } } + ASSERT(0); + return -EFSCORRUPTED; +} + +/* + * Add this buffer to the deferred op. Each joined buffer is relogged + * each time we roll the transaction. + */ +int +xfs_defer_bjoin( + struct xfs_defer_ops *dop, + struct xfs_buf *bp) +{ + int i; + + for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) { + if (dop->dop_bufs[i] == bp) + return 0; + else if (dop->dop_bufs[i] == NULL) { + dop->dop_bufs[i] = bp; + return 0; + } + } + + ASSERT(0); return -EFSCORRUPTED; } @@ -493,9 +528,7 @@ xfs_defer_init( struct xfs_defer_ops *dop, xfs_fsblock_t *fbp) { - dop->dop_committed = false; - dop->dop_low = false; - memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes)); + memset(dop, 0, sizeof(struct xfs_defer_ops)); *fbp = NULLFSBLOCK; INIT_LIST_HEAD(&dop->dop_intake); INIT_LIST_HEAD(&dop->dop_pending); diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h index d4f046dd44bd..045beacdd37d 100644 --- a/fs/xfs/libxfs/xfs_defer.h +++ b/fs/xfs/libxfs/xfs_defer.h @@ -59,6 +59,7 @@ enum xfs_defer_ops_type { }; #define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */ +#define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */ struct xfs_defer_ops { bool dop_committed; /* did any trans commit? */ @@ -66,8 +67,9 @@ struct xfs_defer_ops { struct list_head dop_intake; /* unlogged pending work */ struct list_head dop_pending; /* logged pending work */ - /* relog these inodes with each roll */ + /* relog these with each roll */ struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES]; + struct xfs_buf *dop_bufs[XFS_DEFER_OPS_NR_BUFS]; }; void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type, @@ -77,6 +79,7 @@ void xfs_defer_cancel(struct xfs_defer_ops *dop); void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp); bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop); int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip); +int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp); /* Description of a deferred type. */ struct xfs_defer_op_type { diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c index 89bf16b4d937..b0f31791c7e6 100644 --- a/fs/xfs/libxfs/xfs_iext_tree.c +++ b/fs/xfs/libxfs/xfs_iext_tree.c @@ -632,8 +632,6 @@ xfs_iext_insert( struct xfs_iext_leaf *new = NULL; int nr_entries, i; - trace_xfs_iext_insert(ip, cur, state, _RET_IP_); - if (ifp->if_height == 0) xfs_iext_alloc_root(ifp, cur); else if (ifp->if_height == 1) @@ -661,6 +659,8 @@ xfs_iext_insert( xfs_iext_set(cur_rec(cur), irec); ifp->if_bytes += sizeof(struct xfs_iext_rec); + trace_xfs_iext_insert(ip, cur, state, _RET_IP_); + if (new) xfs_iext_insert_node(ifp, xfs_iext_leaf_key(new, 0), new, 2); } diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index 585b35d34142..c40d26763075 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -1488,27 +1488,12 @@ __xfs_refcount_cow_alloc( xfs_extlen_t aglen, struct xfs_defer_ops *dfops) { - int error; - trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno, agbno, aglen); /* Add refcount btree reservation */ - error = xfs_refcount_adjust_cow(rcur, agbno, aglen, + return xfs_refcount_adjust_cow(rcur, agbno, aglen, XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops); - if (error) - return error; - - /* Add rmap entry */ - if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) { - error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops, - rcur->bc_private.a.agno, - agbno, aglen, XFS_RMAP_OWN_COW); - if (error) - return error; - } - - return error; } /* @@ -1521,27 +1506,12 @@ __xfs_refcount_cow_free( xfs_extlen_t aglen, struct xfs_defer_ops *dfops) { - int error; - trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno, agbno, aglen); /* Remove refcount btree reservation */ - error = xfs_refcount_adjust_cow(rcur, agbno, aglen, + return xfs_refcount_adjust_cow(rcur, agbno, aglen, XFS_REFCOUNT_ADJUST_COW_FREE, dfops); - if (error) - return error; - - /* Remove rmap entry */ - if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) { - error = xfs_rmap_free_extent(rcur->bc_mp, dfops, - rcur->bc_private.a.agno, - agbno, aglen, XFS_RMAP_OWN_COW); - if (error) - return error; - } - - return error; } /* Record a CoW staging extent in the refcount btree. */ @@ -1552,11 +1522,19 @@ xfs_refcount_alloc_cow_extent( xfs_fsblock_t fsb, xfs_extlen_t len) { + int error; + if (!xfs_sb_version_hasreflink(&mp->m_sb)) return 0; - return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW, + error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW, fsb, len); + if (error) + return error; + + /* Add rmap entry */ + return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb), + XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW); } /* Forget a CoW staging event in the refcount btree. */ @@ -1567,9 +1545,17 @@ xfs_refcount_free_cow_extent( xfs_fsblock_t fsb, xfs_extlen_t len) { + int error; + if (!xfs_sb_version_hasreflink(&mp->m_sb)) return 0; + /* Remove rmap entry */ + error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb), + XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW); + if (error) + return error; + return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW, fsb, len); } diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index dd019cee1b3b..50db920ceeeb 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -368,6 +368,51 @@ xfs_rmap_lookup_le_range( } /* + * Perform all the relevant owner checks for a removal op. If we're doing an + * unknown-owner removal then we have no owner information to check. + */ +static int +xfs_rmap_free_check_owner( + struct xfs_mount *mp, + uint64_t ltoff, + struct xfs_rmap_irec *rec, + xfs_fsblock_t bno, + xfs_filblks_t len, + uint64_t owner, + uint64_t offset, + unsigned int flags) +{ + int error = 0; + + if (owner == XFS_RMAP_OWN_UNKNOWN) + return 0; + + /* Make sure the unwritten flag matches. */ + XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) == + (rec->rm_flags & XFS_RMAP_UNWRITTEN), out); + + /* Make sure the owner matches what we expect to find in the tree. */ + XFS_WANT_CORRUPTED_GOTO(mp, owner == rec->rm_owner, out); + + /* Check the offset, if necessary. */ + if (XFS_RMAP_NON_INODE_OWNER(owner)) + goto out; + + if (flags & XFS_RMAP_BMBT_BLOCK) { + XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_flags & XFS_RMAP_BMBT_BLOCK, + out); + } else { + XFS_WANT_CORRUPTED_GOTO(mp, rec->rm_offset <= offset, out); + XFS_WANT_CORRUPTED_GOTO(mp, + ltoff + rec->rm_blockcount >= offset + len, + out); + } + +out: + return error; +} + +/* * Find the extent in the rmap btree and remove it. * * The record we find should always be an exact match for the extent that we're @@ -444,33 +489,40 @@ xfs_rmap_unmap( goto out_done; } - /* Make sure the unwritten flag matches. */ - XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) == - (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error); + /* + * If we're doing an unknown-owner removal for EFI recovery, we expect + * to find the full range in the rmapbt or nothing at all. If we + * don't find any rmaps overlapping either end of the range, we're + * done. Hopefully this means that the EFI creator already queued + * (and finished) a RUI to remove the rmap. + */ + if (owner == XFS_RMAP_OWN_UNKNOWN && + ltrec.rm_startblock + ltrec.rm_blockcount <= bno) { + struct xfs_rmap_irec rtrec; + + error = xfs_btree_increment(cur, 0, &i); + if (error) + goto out_error; + if (i == 0) + goto out_done; + error = xfs_rmap_get_rec(cur, &rtrec, &i); + if (error) + goto out_error; + XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error); + if (rtrec.rm_startblock >= bno + len) + goto out_done; + } /* Make sure the extent we found covers the entire freeing range. */ XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno && - ltrec.rm_startblock + ltrec.rm_blockcount >= - bno + len, out_error); + ltrec.rm_startblock + ltrec.rm_blockcount >= + bno + len, out_error); - /* Make sure the owner matches what we expect to find in the tree. */ - XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner || - XFS_RMAP_NON_INODE_OWNER(owner), out_error); - - /* Check the offset, if necessary. */ - if (!XFS_RMAP_NON_INODE_OWNER(owner)) { - if (flags & XFS_RMAP_BMBT_BLOCK) { - XFS_WANT_CORRUPTED_GOTO(mp, - ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK, - out_error); - } else { - XFS_WANT_CORRUPTED_GOTO(mp, - ltrec.rm_offset <= offset, out_error); - XFS_WANT_CORRUPTED_GOTO(mp, - ltoff + ltrec.rm_blockcount >= offset + len, - out_error); - } - } + /* Check owner information. */ + error = xfs_rmap_free_check_owner(mp, ltoff, <rec, bno, len, owner, + offset, flags); + if (error) + goto out_error; if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) { /* exact match, simply remove the record from rmap tree */ @@ -664,6 +716,7 @@ xfs_rmap_map( flags |= XFS_RMAP_UNWRITTEN; trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len, unwritten, oinfo); + ASSERT(!xfs_rmap_should_skip_owner_update(oinfo)); /* * For the initial lookup, look for an exact match or the left-adjacent diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h index 466ede637080..0fcd5b1ba729 100644 --- a/fs/xfs/libxfs/xfs_rmap.h +++ b/fs/xfs/libxfs/xfs_rmap.h @@ -61,7 +61,21 @@ static inline void xfs_rmap_skip_owner_update( struct xfs_owner_info *oi) { - oi->oi_owner = XFS_RMAP_OWN_UNKNOWN; + xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL); +} + +static inline bool +xfs_rmap_should_skip_owner_update( + struct xfs_owner_info *oi) +{ + return oi->oi_owner == XFS_RMAP_OWN_NULL; +} + +static inline void +xfs_rmap_any_owner_update( + struct xfs_owner_info *oi) +{ + xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN); } /* Reverse mapping functions. */ diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 21e2d70884e1..4fc526a27a94 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -399,7 +399,7 @@ xfs_map_blocks( (ip->i_df.if_flags & XFS_IFEXTENTS)); ASSERT(offset <= mp->m_super->s_maxbytes); - if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes) + if (offset > mp->m_super->s_maxbytes - count) count = mp->m_super->s_maxbytes - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); offset_fsb = XFS_B_TO_FSBT(mp, offset); @@ -1312,7 +1312,7 @@ xfs_get_blocks( lockmode = xfs_ilock_data_map_shared(ip); ASSERT(offset <= mp->m_super->s_maxbytes); - if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes) + if (offset > mp->m_super->s_maxbytes - size) size = mp->m_super->s_maxbytes - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); offset_fsb = XFS_B_TO_FSBT(mp, offset); diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 44f8c5451210..64da90655e95 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -538,7 +538,7 @@ xfs_efi_recover( return error; efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); - xfs_rmap_skip_owner_update(&oinfo); + xfs_rmap_any_owner_update(&oinfo); for (i = 0; i < efip->efi_format.efi_nextents; i++) { extp = &efip->efi_format.efi_extents[i]; error = xfs_trans_free_extent(tp, efdp, extp->ext_start, diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 8f22fc579dbb..60a2e128cb6a 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -571,6 +571,11 @@ xfs_growfs_data_private( * this doesn't actually exist in the rmap btree. */ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL); + error = xfs_rmap_free(tp, bp, agno, + be32_to_cpu(agf->agf_length) - new, + new, &oinfo); + if (error) + goto error0; error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno, be32_to_cpu(agf->agf_length) - new), diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 43005fbe8b1e..3861d61fb265 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -870,7 +870,7 @@ xfs_eofblocks_worker( * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). * (We'll just piggyback on the post-EOF prealloc space workqueue.) */ -STATIC void +void xfs_queue_cowblocks( struct xfs_mount *mp) { @@ -1536,8 +1536,23 @@ xfs_inode_free_quota_eofblocks( return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); } +static inline unsigned long +xfs_iflag_for_tag( + int tag) +{ + switch (tag) { + case XFS_ICI_EOFBLOCKS_TAG: + return XFS_IEOFBLOCKS; + case XFS_ICI_COWBLOCKS_TAG: + return XFS_ICOWBLOCKS; + default: + ASSERT(0); + return 0; + } +} + static void -__xfs_inode_set_eofblocks_tag( +__xfs_inode_set_blocks_tag( xfs_inode_t *ip, void (*execute)(struct xfs_mount *mp), void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, @@ -1552,10 +1567,10 @@ __xfs_inode_set_eofblocks_tag( * Don't bother locking the AG and looking up in the radix trees * if we already know that we have the tag set. */ - if (ip->i_flags & XFS_IEOFBLOCKS) + if (ip->i_flags & xfs_iflag_for_tag(tag)) return; spin_lock(&ip->i_flags_lock); - ip->i_flags |= XFS_IEOFBLOCKS; + ip->i_flags |= xfs_iflag_for_tag(tag); spin_unlock(&ip->i_flags_lock); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); @@ -1587,13 +1602,13 @@ xfs_inode_set_eofblocks_tag( xfs_inode_t *ip) { trace_xfs_inode_set_eofblocks_tag(ip); - return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks, + return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks, trace_xfs_perag_set_eofblocks, XFS_ICI_EOFBLOCKS_TAG); } static void -__xfs_inode_clear_eofblocks_tag( +__xfs_inode_clear_blocks_tag( xfs_inode_t *ip, void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, int error, unsigned long caller_ip), @@ -1603,7 +1618,7 @@ __xfs_inode_clear_eofblocks_tag( struct xfs_perag *pag; spin_lock(&ip->i_flags_lock); - ip->i_flags &= ~XFS_IEOFBLOCKS; + ip->i_flags &= ~xfs_iflag_for_tag(tag); spin_unlock(&ip->i_flags_lock); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); @@ -1630,7 +1645,7 @@ xfs_inode_clear_eofblocks_tag( xfs_inode_t *ip) { trace_xfs_inode_clear_eofblocks_tag(ip); - return __xfs_inode_clear_eofblocks_tag(ip, + return __xfs_inode_clear_blocks_tag(ip, trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); } @@ -1724,7 +1739,7 @@ xfs_inode_set_cowblocks_tag( xfs_inode_t *ip) { trace_xfs_inode_set_cowblocks_tag(ip); - return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, + return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks, trace_xfs_perag_set_cowblocks, XFS_ICI_COWBLOCKS_TAG); } @@ -1734,6 +1749,6 @@ xfs_inode_clear_cowblocks_tag( xfs_inode_t *ip) { trace_xfs_inode_clear_cowblocks_tag(ip); - return __xfs_inode_clear_eofblocks_tag(ip, + return __xfs_inode_clear_blocks_tag(ip, trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); } diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index bff4d85e5498..d4a77588eca1 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h @@ -81,6 +81,7 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip); int xfs_icache_free_cowblocks(struct xfs_mount *, struct xfs_eofblocks *); int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip); void xfs_cowblocks_worker(struct work_struct *); +void xfs_queue_cowblocks(struct xfs_mount *); int xfs_inode_ag_iterator(struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, int flags, void *args), diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b41952a4ddd8..6f95bdb408ce 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1487,6 +1487,24 @@ xfs_link( return error; } +/* Clear the reflink flag and the cowblocks tag if possible. */ +static void +xfs_itruncate_clear_reflink_flags( + struct xfs_inode *ip) +{ + struct xfs_ifork *dfork; + struct xfs_ifork *cfork; + + if (!xfs_is_reflink_inode(ip)) + return; + dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK); + cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK); + if (dfork->if_bytes == 0 && cfork->if_bytes == 0) + ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; + if (cfork->if_bytes == 0) + xfs_inode_clear_cowblocks_tag(ip); +} + /* * Free up the underlying blocks past new_size. The new size must be smaller * than the current size. This routine can be used both for the attribute and @@ -1583,15 +1601,7 @@ xfs_itruncate_extents( if (error) goto out; - /* - * Clear the reflink flag if there are no data fork blocks and - * there are no extents staged in the cow fork. - */ - if (xfs_is_reflink_inode(ip) && ip->i_cnextents == 0) { - if (ip->i_d.di_nblocks == 0) - ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; - xfs_inode_clear_cowblocks_tag(ip); - } + xfs_itruncate_clear_reflink_flags(ip); /* * Always re-log the inode so that our permanent transaction can keep diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index b2136af9289f..d383e392ec9d 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -232,6 +232,7 @@ static inline bool xfs_is_reflink_inode(struct xfs_inode *ip) * log recovery to replay a bmap operation on the inode. */ #define XFS_IRECOVERY (1 << 11) +#define XFS_ICOWBLOCKS (1 << 12)/* has the cowblocks tag set */ /* * Per-lifetime flags need to be reset when re-using a reclaimable inode during diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 7ab52a8bc0a9..66e1edbfb2b2 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -1006,7 +1006,7 @@ xfs_file_iomap_begin( } ASSERT(offset <= mp->m_super->s_maxbytes); - if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes) + if (offset > mp->m_super->s_maxbytes - length) length = mp->m_super->s_maxbytes - offset; offset_fsb = XFS_B_TO_FSBT(mp, offset); end_fsb = XFS_B_TO_FSB(mp, offset + length); diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index ec952dfad359..b897b11afb2c 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -48,7 +48,7 @@ STATIC int xfs_qm_init_quotainos(xfs_mount_t *); STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); - +STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi); STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); /* * We use the batch lookup interface to iterate over the dquots as it @@ -695,9 +695,17 @@ xfs_qm_init_quotainfo( qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; qinf->qi_shrinker.seeks = DEFAULT_SEEKS; qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE; - register_shrinker(&qinf->qi_shrinker); + + error = register_shrinker(&qinf->qi_shrinker); + if (error) + goto out_free_inos; + return 0; +out_free_inos: + mutex_destroy(&qinf->qi_quotaofflock); + mutex_destroy(&qinf->qi_tree_lock); + xfs_qm_destroy_quotainos(qinf); out_free_lru: list_lru_destroy(&qinf->qi_lru); out_free_qinf: @@ -706,7 +714,6 @@ out_free_qinf: return error; } - /* * Gets called when unmounting a filesystem or when all quotas get * turned off. @@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo( unregister_shrinker(&qi->qi_shrinker); list_lru_destroy(&qi->qi_lru); - - if (qi->qi_uquotaip) { - IRELE(qi->qi_uquotaip); - qi->qi_uquotaip = NULL; /* paranoia */ - } - if (qi->qi_gquotaip) { - IRELE(qi->qi_gquotaip); - qi->qi_gquotaip = NULL; - } - if (qi->qi_pquotaip) { - IRELE(qi->qi_pquotaip); - qi->qi_pquotaip = NULL; - } + xfs_qm_destroy_quotainos(qi); + mutex_destroy(&qi->qi_tree_lock); mutex_destroy(&qi->qi_quotaofflock); kmem_free(qi); mp->m_quotainfo = NULL; @@ -1600,6 +1596,24 @@ error_rele: } STATIC void +xfs_qm_destroy_quotainos( + xfs_quotainfo_t *qi) +{ + if (qi->qi_uquotaip) { + IRELE(qi->qi_uquotaip); + qi->qi_uquotaip = NULL; /* paranoia */ + } + if (qi->qi_gquotaip) { + IRELE(qi->qi_gquotaip); + qi->qi_gquotaip = NULL; + } + if (qi->qi_pquotaip) { + IRELE(qi->qi_pquotaip); + qi->qi_pquotaip = NULL; + } +} + +STATIC void xfs_qm_dqfree_one( struct xfs_dquot *dqp) { diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index cf7c8f81bebb..47aea2e82c26 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -454,6 +454,8 @@ retry: if (error) goto out_bmap_cancel; + xfs_inode_set_cowblocks_tag(ip); + /* Finish up. */ error = xfs_defer_finish(&tp, &dfops); if (error) @@ -490,8 +492,9 @@ xfs_reflink_find_cow_mapping( struct xfs_iext_cursor icur; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)); - ASSERT(xfs_is_reflink_inode(ip)); + if (!xfs_is_reflink_inode(ip)) + return false; offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &icur, &got)) return false; @@ -610,6 +613,9 @@ xfs_reflink_cancel_cow_blocks( /* Remove the mapping from the CoW fork. */ xfs_bmap_del_extent_cow(ip, &icur, &got, &del); + } else { + /* Didn't do anything, push cursor back. */ + xfs_iext_prev(ifp, &icur); } next_extent: if (!xfs_iext_get_extent(ifp, &icur, &got)) @@ -725,7 +731,7 @@ xfs_reflink_end_cow( (unsigned int)(end_fsb - offset_fsb), XFS_DATA_FORK); error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write, - resblks, 0, 0, &tp); + resblks, 0, XFS_TRANS_RESERVE, &tp); if (error) goto out; @@ -1291,6 +1297,17 @@ xfs_reflink_remap_range( trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); + /* + * Clear out post-eof preallocations because we don't have page cache + * backing the delayed allocations and they'll never get freed on + * their own. + */ + if (xfs_can_free_eofblocks(dest, true)) { + ret = xfs_free_eofblocks(dest); + if (ret) + goto out_unlock; + } + /* Set flags and remap blocks. */ ret = xfs_reflink_set_inode_flag(src, dest); if (ret) diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 5122d3021117..1dacccc367f8 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -1360,6 +1360,7 @@ xfs_fs_remount( xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return error; } + xfs_queue_cowblocks(mp); /* Create the per-AG metadata reservation pool .*/ error = xfs_fs_reserve_ag_blocks(mp); @@ -1369,6 +1370,14 @@ xfs_fs_remount( /* rw -> ro */ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) { + /* Get rid of any leftover CoW reservations... */ + cancel_delayed_work_sync(&mp->m_cowblocks_work); + error = xfs_icache_free_cowblocks(mp, NULL); + if (error) { + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + return error; + } + /* Free the per-AG metadata reservation pool. */ error = xfs_fs_unreserve_ag_blocks(mp); if (error) { diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h index ea189d88a3cc..8ac4e68a12f0 100644 --- a/include/asm-generic/mm_hooks.h +++ b/include/asm-generic/mm_hooks.h @@ -7,9 +7,10 @@ #ifndef _ASM_GENERIC_MM_HOOKS_H #define _ASM_GENERIC_MM_HOOKS_H -static inline void arch_dup_mmap(struct mm_struct *oldmm, - struct mm_struct *mm) +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) { + return 0; } static inline void arch_exit_mmap(struct mm_struct *mm) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index b234d54f2cb6..868e68561f91 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -1025,6 +1025,11 @@ static inline int pmd_clear_huge(pmd_t *pmd) struct file; int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, unsigned long size, pgprot_t *vma_prot); + +#ifndef CONFIG_X86_ESPFIX64 +static inline void init_espfix_bsp(void) { } +#endif + #endif /* !__ASSEMBLY__ */ #ifndef io_remap_pfn_range diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h index 38d9c5861ed8..f38227a78eae 100644 --- a/include/crypto/if_alg.h +++ b/include/crypto/if_alg.h @@ -18,6 +18,7 @@ #include <linux/if_alg.h> #include <linux/scatterlist.h> #include <linux/types.h> +#include <linux/atomic.h> #include <net/sock.h> #include <crypto/aead.h> @@ -150,7 +151,7 @@ struct af_alg_ctx { struct crypto_wait wait; size_t used; - size_t rcvused; + atomic_t rcvused; bool more; bool merge; @@ -215,7 +216,7 @@ static inline int af_alg_rcvbuf(struct sock *sk) struct af_alg_ctx *ctx = ask->private; return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - - ctx->rcvused, 0); + atomic_read(&ctx->rcvused), 0); } /** diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h index cceafa01f907..b67404fc4b34 100644 --- a/include/crypto/mcryptd.h +++ b/include/crypto/mcryptd.h @@ -27,6 +27,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast( struct mcryptd_cpu_queue { struct crypto_queue queue; + spinlock_t q_lock; struct work_struct work; }; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index da54ef644fcd..6be837c063c3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -17,6 +17,7 @@ #include <linux/numa.h> #include <linux/wait.h> +struct bpf_verifier_env; struct perf_event; struct bpf_prog; struct bpf_map; @@ -184,14 +185,18 @@ struct bpf_verifier_ops { struct bpf_prog *prog, u32 *target_size); }; +struct bpf_prog_offload_ops { + int (*insn_hook)(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); +}; + struct bpf_dev_offload { struct bpf_prog *prog; struct net_device *netdev; void *dev_priv; struct list_head offloads; bool dev_state; - bool verifier_running; - wait_queue_head_t verifier_done; + const struct bpf_prog_offload_ops *dev_ops; }; struct bpf_prog_aux { @@ -201,6 +206,7 @@ struct bpf_prog_aux { u32 stack_depth; u32 id; u32 func_cnt; + bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ struct latch_tree_node ksym_tnode; @@ -351,6 +357,8 @@ void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); + struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); @@ -426,6 +434,8 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) attr->numa_node : NUMA_NO_NODE; } +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -513,6 +523,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, { return 0; } + +static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, + enum bpf_prog_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, @@ -521,15 +537,19 @@ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, return bpf_prog_get_type_dev(ufd, type, false); } +bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); + int bpf_prog_offload_compile(struct bpf_prog *prog); void bpf_prog_offload_destroy(struct bpf_prog *prog); +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) { - return aux->offload; + return aux->offload_requested; } #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, @@ -544,7 +564,7 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) } #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ -#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); #else diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 978c1d9c9383..19b8349a3809 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) -#ifdef CONFIG_STREAM_PARSER +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c009e472f647..2feb218c001d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -166,12 +166,6 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) return log->len_used >= log->len_total - 1; } -struct bpf_verifier_env; -struct bpf_ext_analyzer_ops { - int (*insn_hook)(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx); -}; - #define BPF_MAX_SUBPROGS 256 /* single container for all structs @@ -185,7 +179,6 @@ struct bpf_verifier_env { bool strict_alignment; /* perform strict pointer alignment checks */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ - const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ @@ -194,6 +187,7 @@ struct bpf_verifier_env { struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ struct bpf_verifer_log log; u32 subprog_starts[BPF_MAX_SUBPROGS]; + /* computes the stack depth of each bpf function */ u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; u32 subprog_cnt; }; @@ -205,13 +199,8 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) return cur->frame[cur->curframe]->regs; } -#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); -#else -static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) -{ - return -EOPNOTSUPP; -} -#endif +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx); #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 201ab7267986..1a32e558eb11 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -86,7 +86,7 @@ enum cpuhp_state { CPUHP_MM_ZSWP_POOL_PREPARE, CPUHP_KVM_PPC_BOOK3S_PREPARE, CPUHP_ZCOMP_PREPARE, - CPUHP_TIMERS_DEAD, + CPUHP_TIMERS_PREPARE, CPUHP_MIPS_SOC_PREPARE, CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index b6514c29563f..b4f22112ba75 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -23,7 +23,7 @@ struct lan9303 { struct regmap_irq_chip_data *irq_data; struct gpio_desc *reset_gpio; u32 reset_duration; /* in [ms] */ - bool phy_addr_sel_strap; + int phy_addr_base; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ struct mutex alr_mutex; /* protect ALR access */ diff --git a/include/linux/efi.h b/include/linux/efi.h index d813f7b04da7..29fdf8029cf6 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -140,11 +140,13 @@ struct efi_boot_memmap { struct capsule_info { efi_capsule_header_t header; + efi_capsule_header_t *capsule; int reset_type; long index; size_t count; size_t total_size; - phys_addr_t *pages; + struct page **pages; + phys_addr_t *phys; size_t page_bytes_remain; }; diff --git a/include/linux/filter.h b/include/linux/filter.h index e872b4ebaa57..425056c7f96c 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -18,7 +18,9 @@ #include <linux/capability.h> #include <linux/cryptohash.h> #include <linux/set_memory.h> +#include <linux/kallsyms.h> +#include <net/xdp.h> #include <net/sch_generic.h> #include <uapi/linux/filter.h> @@ -502,6 +504,7 @@ struct xdp_buff { void *data_end; void *data_meta; void *data_hard_start; + struct xdp_rxq_info *rxq; }; /* Compute the linear packet data range [data, data_end) which @@ -724,6 +727,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_helper_changes_pkt_data(void *func); +static inline bool bpf_dump_raw_ok(void) +{ + /* Reconstruction of call-sites is dependent on kallsyms, + * thus make dump the same restriction. + */ + return kallsyms_show_value() == 1; +} + struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index f4ff47d4a893..fe0c349684fa 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -755,7 +755,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie, { if (fscache_cookie_valid(cookie) && PageFsCache(page)) return __fscache_maybe_release_page(cookie, page, gfp); - return false; + return true; } /** diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 55e672592fa9..7258cd676df4 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -66,9 +66,10 @@ struct gpio_irq_chip { /** * @lock_key: * - * Per GPIO IRQ chip lockdep class. + * Per GPIO IRQ chip lockdep classes. */ struct lock_class_key *lock_key; + struct lock_class_key *request_key; /** * @parent_handler: @@ -323,7 +324,8 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip, /* add/remove chips */ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, - struct lock_class_key *lock_key); + struct lock_class_key *lock_key, + struct lock_class_key *request_key); /** * gpiochip_add_data() - register a gpio_chip @@ -350,11 +352,13 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, */ #ifdef CONFIG_LOCKDEP #define gpiochip_add_data(chip, data) ({ \ - static struct lock_class_key key; \ - gpiochip_add_data_with_key(chip, data, &key); \ + static struct lock_class_key lock_key; \ + static struct lock_class_key request_key; \ + gpiochip_add_data_with_key(chip, data, &lock_key, \ + &request_key); \ }) #else -#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL) +#define gpiochip_add_data(chip, data) gpiochip_add_data_with_key(chip, data, NULL, NULL) #endif static inline int gpiochip_add(struct gpio_chip *chip) @@ -429,7 +433,8 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, irq_flow_handler_t handler, unsigned int type, bool threaded, - struct lock_class_key *lock_key); + struct lock_class_key *lock_key, + struct lock_class_key *request_key); #ifdef CONFIG_LOCKDEP @@ -445,10 +450,12 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, irq_flow_handler_t handler, unsigned int type) { - static struct lock_class_key key; + static struct lock_class_key lock_key; + static struct lock_class_key request_key; return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, - handler, type, false, &key); + handler, type, false, + &lock_key, &request_key); } static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, @@ -458,10 +465,12 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, unsigned int type) { - static struct lock_class_key key; + static struct lock_class_key lock_key; + static struct lock_class_key request_key; return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, - handler, type, true, &key); + handler, type, true, + &lock_key, &request_key); } #else static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, @@ -471,7 +480,7 @@ static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip, unsigned int type) { return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, - handler, type, false, NULL); + handler, type, false, NULL, NULL); } static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, @@ -481,7 +490,7 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip, unsigned int type) { return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq, - handler, type, true, NULL); + handler, type, true, NULL, NULL); } #endif /* CONFIG_LOCKDEP */ diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 4c54611e03e9..622658dfbf0a 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -13,6 +13,8 @@ struct ifla_vf_stats { __u64 tx_bytes; __u64 broadcast; __u64 multicast; + __u64 rx_dropped; + __u64 tx_dropped; }; struct ifla_vf_info { diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3ecef57c31e3..8e66866c11be 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -4,7 +4,7 @@ #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); -struct skb_array *tap_get_skb_array(struct file *file); +struct ptr_ring *tap_get_ptr_ring(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> @@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tap_get_skb_array(struct file *f) +static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) { return ERR_PTR(-EINVAL); } @@ -70,7 +70,7 @@ struct tap_queue { u16 queue_index; bool enabled; struct list_head next; - struct skb_array skb_array; + struct ptr_ring ring; }; rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bf9bdf42d577..c5b0a75a7812 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h @@ -17,9 +17,14 @@ #include <uapi/linux/if_tun.h> +#define TUN_XDP_FLAG 0x1UL + #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) struct socket *tun_get_socket(struct file *); -struct skb_array *tun_get_skb_array(struct file *file); +struct ptr_ring *tun_get_tx_ring(struct file *file); +bool tun_is_xdp_buff(void *ptr); +void *tun_xdp_to_ptr(void *ptr); +void *tun_ptr_to_xdp(void *ptr); #else #include <linux/err.h> #include <linux/errno.h> @@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f) { return ERR_PTR(-EINVAL); } -static inline struct skb_array *tun_get_skb_array(struct file *f) +static inline struct ptr_ring *tun_get_tx_ring(struct file *f) { return ERR_PTR(-EINVAL); } +static inline bool tun_is_xdp_buff(void *ptr) +{ + return false; +} +static inline void *tun_xdp_to_ptr(void *ptr) +{ + return NULL; +} +static inline void *tun_ptr_to_xdp(void *ptr) +{ + return NULL; +} #endif /* CONFIG_TUN */ #endif /* __IF_TUN_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index e140f69163b6..a0231e96a578 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -212,6 +212,7 @@ struct irq_data { * mask. Applies only to affinity managed irqs. * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set + * IRQD_CAN_RESERVE - Can use reservation mode */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -233,6 +234,7 @@ enum { IRQD_MANAGED_SHUTDOWN = (1 << 23), IRQD_SINGLE_TARGET = (1 << 24), IRQD_DEFAULT_TRIGGER_SET = (1 << 25), + IRQD_CAN_RESERVE = (1 << 26), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -377,6 +379,21 @@ static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; } +static inline void irqd_set_can_reserve(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_CAN_RESERVE; +} + +static inline void irqd_clr_can_reserve(struct irq_data *d) +{ + __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; +} + +static inline bool irqd_can_reserve(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_CAN_RESERVE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 39fb3700f7a9..25b33b664537 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -255,12 +255,15 @@ static inline bool irq_is_percpu_devid(unsigned int irq) } static inline void -irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) +irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, + struct lock_class_key *request_class) { struct irq_desc *desc = irq_to_desc(irq); - if (desc) - lockdep_set_class(&desc->lock, class); + if (desc) { + lockdep_set_class(&desc->lock, lock_class); + lockdep_set_class(&desc->request_mutex, request_class); + } } #ifdef CONFIG_IRQ_PREFLOW_FASTEOI diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index a34355d19546..48c7e86bb556 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -113,7 +113,7 @@ struct irq_domain_ops { unsigned int nr_irqs, void *arg); void (*free)(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs); - int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early); + int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool reserve); void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type); diff --git a/include/linux/mdio.h b/include/linux/mdio.h index e37c21d8eb19..2cfffe586885 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -41,8 +41,8 @@ struct mdio_device { int addr; int flags; struct gpio_desc *reset; - unsigned int reset_delay; - unsigned int reset_post_delay; + unsigned int reset_assert_delay; + unsigned int reset_deassert_delay; }; #define to_mdio_device(d) container_of(d, struct mdio_device, dev) @@ -262,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) return reg; } +int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); +int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); + int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 57b109c6e422..1f509d072026 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1165,6 +1165,10 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, + u64 *values, + int num_counters, + size_t *offsets); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b25e7baa273e..a0b48afcb422 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -95,6 +95,10 @@ struct mlx5_flow_destination { struct mlx5_flow_namespace * mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type); +struct mlx5_flow_namespace * +mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type, + int vport); struct mlx5_flow_table * mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index d44ec5f41d4a..78e36fc2609e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -794,7 +794,10 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x80]; + u8 reserved_at_0[0x30]; + u8 vhca_id[0x10]; + + u8 reserved_at_40[0x40]; u8 log_max_srq_sz[0x8]; u8 log_max_qp_sz[0x8]; @@ -1023,12 +1026,19 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_3b8[0x3]; u8 log_min_stride_sz_sq[0x5]; - u8 reserved_at_3c0[0x1b]; + u8 hairpin[0x1]; + u8 reserved_at_3c1[0x2]; + u8 log_max_hairpin_queues[0x5]; + u8 reserved_at_3c8[0x3]; + u8 log_max_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3d0[0xb]; u8 log_max_wq_sz[0x5]; u8 nic_vport_change_event[0x1]; u8 disable_local_lb[0x1]; - u8 reserved_at_3e2[0x9]; + u8 reserved_at_3e2[0x1]; + u8 log_min_hairpin_wq_data_sz[0x5]; + u8 reserved_at_3e8[0x3]; u8 log_max_vlan_list[0x5]; u8 reserved_at_3f0[0x3]; u8 log_max_current_mc_list[0x5]; @@ -1162,7 +1172,10 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_118[0x3]; u8 log_wq_sz[0x5]; - u8 reserved_at_120[0x15]; + u8 reserved_at_120[0xb]; + u8 log_hairpin_data_sz[0x5]; + u8 reserved_at_130[0x5]; + u8 log_wqe_num_of_strides[0x3]; u8 two_byte_shift_en[0x1]; u8 reserved_at_139[0x4]; @@ -2482,7 +2495,8 @@ struct mlx5_ifc_sqc_bits { u8 state[0x4]; u8 reg_umr[0x1]; u8 allow_swp[0x1]; - u8 reserved_at_e[0x12]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2490,7 +2504,13 @@ struct mlx5_ifc_sqc_bits { u8 reserved_at_40[0x8]; u8 cqn[0x18]; - u8 reserved_at_60[0x90]; + u8 reserved_at_60[0x8]; + u8 hairpin_peer_rq[0x18]; + + u8 reserved_at_80[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_a0[0x50]; u8 packet_pacing_rate_limit_index[0x10]; u8 tis_lst_sz[0x10]; @@ -2562,7 +2582,8 @@ struct mlx5_ifc_rqc_bits { u8 state[0x4]; u8 reserved_at_c[0x1]; u8 flush_in_error_en[0x1]; - u8 reserved_at_e[0x12]; + u8 hairpin[0x1]; + u8 reserved_at_f[0x11]; u8 reserved_at_20[0x8]; u8 user_index[0x18]; @@ -2576,7 +2597,13 @@ struct mlx5_ifc_rqc_bits { u8 reserved_at_80[0x8]; u8 rmpn[0x18]; - u8 reserved_at_a0[0xe0]; + u8 reserved_at_a0[0x8]; + u8 hairpin_peer_sq[0x18]; + + u8 reserved_at_c0[0x10]; + u8 hairpin_peer_vhca[0x10]; + + u8 reserved_at_e0[0xa0]; struct mlx5_ifc_wq_bits wq; }; diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 88441f5ece25..a228310c1968 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -75,4 +75,23 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen); void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); +struct mlx5_hairpin_params { + u8 log_data_size; + u16 q_counter; +}; + +struct mlx5_hairpin { + struct mlx5_core_dev *func_mdev; + struct mlx5_core_dev *peer_mdev; + + u32 rqn; + u32 sqn; +}; + +struct mlx5_hairpin * +mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, + struct mlx5_core_dev *peer_mdev, + struct mlx5_hairpin_params *params); + +void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); #endif /* __TRANSOBJ_H__ */ diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h new file mode 100644 index 000000000000..1c7e45016120 --- /dev/null +++ b/include/linux/net_dim.h @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef NET_DIM_H +#define NET_DIM_H + +#include <linux/module.h> + +struct net_dim_cq_moder { + u16 usec; + u16 pkts; + u8 cq_period_mode; +}; + +struct net_dim_sample { + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; +}; + +struct net_dim_stats { + int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ + int epms; /* events per msec */ +}; + +struct net_dim { /* Adaptive Moderation */ + u8 state; + struct net_dim_stats prev_stats; + struct net_dim_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + +enum { + NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, + NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, + NET_DIM_CQ_PERIOD_NUM_MODES +}; + +/* Adaptive moderation logic */ +enum { + NET_DIM_START_MEASURE, + NET_DIM_MEASURE_IN_PROGRESS, + NET_DIM_APPLY_NEW_PROFILE, +}; + +enum { + NET_DIM_PARKING_ON_TOP, + NET_DIM_PARKING_TIRED, + NET_DIM_GOING_RIGHT, + NET_DIM_GOING_LEFT, +}; + +enum { + NET_DIM_STATS_WORSE, + NET_DIM_STATS_SAME, + NET_DIM_STATS_BETTER, +}; + +enum { + NET_DIM_STEPPED, + NET_DIM_TOO_TIRED, + NET_DIM_ON_EDGE, +}; + +#define NET_DIM_PARAMS_NUM_PROFILES 5 +/* Adaptive moderation profiles */ +#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + +/* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ +#define NET_DIM_EQE_PROFILES { \ + {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define NET_DIM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct net_dim_cq_moder +profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { + NET_DIM_EQE_PROFILES, + NET_DIM_CQE_PROFILES, +}; + +static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode, + int ix) +{ + struct net_dim_cq_moder cq_moder; + + cq_moder = profile[cq_period_mode][ix]; + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; +} + +static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = NET_DIM_DEF_PROFILE_CQE; + else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = NET_DIM_DEF_PROFILE_EQE; + + return net_dim_get_profile(rx_cq_period_mode, default_profile_ix); +} + +static inline bool net_dim_on_top(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + return true; + case NET_DIM_GOING_RIGHT: + return (dim->steps_left > 1) && (dim->steps_right == 1); + default: /* NET_DIM_GOING_LEFT */ + return (dim->steps_right > 1) && (dim->steps_left == 1); + } +} + +static inline void net_dim_turn(struct net_dim *dim) +{ + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + dim->tune_state = NET_DIM_GOING_LEFT; + dim->steps_left = 0; + break; + case NET_DIM_GOING_LEFT: + dim->tune_state = NET_DIM_GOING_RIGHT; + dim->steps_right = 0; + break; + } +} + +static inline int net_dim_step(struct net_dim *dim) +{ + if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) + return NET_DIM_TOO_TIRED; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + case NET_DIM_PARKING_TIRED: + break; + case NET_DIM_GOING_RIGHT: + if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) + return NET_DIM_ON_EDGE; + dim->profile_ix++; + dim->steps_right++; + break; + case NET_DIM_GOING_LEFT: + if (dim->profile_ix == 0) + return NET_DIM_ON_EDGE; + dim->profile_ix--; + dim->steps_left++; + break; + } + + dim->tired++; + return NET_DIM_STEPPED; +} + +static inline void net_dim_park_on_top(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tired = 0; + dim->tune_state = NET_DIM_PARKING_ON_TOP; +} + +static inline void net_dim_park_tired(struct net_dim *dim) +{ + dim->steps_right = 0; + dim->steps_left = 0; + dim->tune_state = NET_DIM_PARKING_TIRED; +} + +static inline void net_dim_exit_parking(struct net_dim *dim) +{ + dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : + NET_DIM_GOING_RIGHT; + net_dim_step(dim); +} + +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + +static inline int net_dim_stats_compare(struct net_dim_stats *curr, + struct net_dim_stats *prev) +{ + if (!prev->bpms) + return curr->bpms ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_SAME; + + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : + NET_DIM_STATS_WORSE; + + return NET_DIM_STATS_SAME; +} + +static inline bool net_dim_decision(struct net_dim_stats *curr_stats, + struct net_dim *dim) +{ + int prev_state = dim->tune_state; + int prev_ix = dim->profile_ix; + int stats_res; + int step_res; + + switch (dim->tune_state) { + case NET_DIM_PARKING_ON_TOP: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_SAME) + net_dim_exit_parking(dim); + break; + + case NET_DIM_PARKING_TIRED: + dim->tired--; + if (!dim->tired) + net_dim_exit_parking(dim); + break; + + case NET_DIM_GOING_RIGHT: + case NET_DIM_GOING_LEFT: + stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); + if (stats_res != NET_DIM_STATS_BETTER) + net_dim_turn(dim); + + if (net_dim_on_top(dim)) { + net_dim_park_on_top(dim); + break; + } + + step_res = net_dim_step(dim); + switch (step_res) { + case NET_DIM_ON_EDGE: + net_dim_park_on_top(dim); + break; + case NET_DIM_TOO_TIRED: + net_dim_park_tired(dim); + break; + } + + break; + } + + if ((prev_state != NET_DIM_PARKING_ON_TOP) || + (dim->tune_state != NET_DIM_PARKING_ON_TOP)) + dim->prev_stats = *curr_stats; + + return dim->profile_ix != prev_ix; +} + +static inline void net_dim_sample(u16 event_ctr, + u64 packets, + u64 bytes, + struct net_dim_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = packets; + s->byte_ctr = bytes; + s->event_ctr = event_ctr; +} + +#define NET_DIM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) + +static inline void net_dim_calc_stats(struct net_dim_sample *start, + struct net_dim_sample *end, + struct net_dim_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); + + if (!delta_us) + return; + + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, + delta_us); +} + +static inline void net_dim(struct net_dim *dim, + struct net_dim_sample end_sample) +{ + struct net_dim_stats curr_stats; + u16 nevents; + + switch (dim->state) { + case NET_DIM_MEASURE_IN_PROGRESS: + nevents = BIT_GAP(BITS_PER_TYPE(u16), + end_sample.event_ctr, + dim->start_sample.event_ctr); + if (nevents < NET_DIM_NEVENTS) + break; + net_dim_calc_stats(&dim->start_sample, &end_sample, + &curr_stats); + if (net_dim_decision(&curr_stats, dim)) { + dim->state = NET_DIM_APPLY_NEW_PROFILE; + schedule_work(&dim->work); + break; + } + /* fall through */ + case NET_DIM_START_MEASURE: + dim->state = NET_DIM_MEASURE_IN_PROGRESS; + break; + case NET_DIM_APPLY_NEW_PROFILE: + break; + } +} + +#endif /* NET_DIM_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cc4ce7456e38..ef7b348e8498 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -44,6 +44,7 @@ #include <net/dcbnl.h> #endif #include <net/netprio_cgroup.h> +#include <net/xdp.h> #include <linux/netdev_features.h> #include <linux/neighbour.h> @@ -686,6 +687,7 @@ struct netdev_rx_queue { #endif struct kobject kobj; struct net_device *dev; + struct xdp_rxq_info xdp_rxq; } ____cacheline_aligned_in_smp; /* @@ -804,7 +806,7 @@ enum bpf_netdev_command { BPF_OFFLOAD_DESTROY, }; -struct bpf_ext_analyzer_ops; +struct bpf_prog_offload_ops; struct netlink_ext_ack; struct netdev_bpf { @@ -826,7 +828,7 @@ struct netdev_bpf { /* BPF_OFFLOAD_VERIFIER_PREP */ struct { struct bpf_prog *prog; - const struct bpf_ext_analyzer_ops *ops; /* callee set */ + const struct bpf_prog_offload_ops *ops; /* callee set */ } verifier; /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ struct { @@ -1726,7 +1728,7 @@ struct net_device { const struct ndisc_ops *ndisc_ops; #endif -#ifdef CONFIG_XFRM +#ifdef CONFIG_XFRM_OFFLOAD const struct xfrmdev_ops *xfrmdev_ops; #endif @@ -1803,12 +1805,9 @@ struct net_device { /* Interface address info used in eth_type_trans() */ unsigned char *dev_addr; -#ifdef CONFIG_SYSFS struct netdev_rx_queue *_rx; - unsigned int num_rx_queues; unsigned int real_num_rx_queues; -#endif struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; @@ -2793,7 +2792,9 @@ struct softnet_data { struct Qdisc *output_queue; struct Qdisc **output_queue_tailp; struct sk_buff *completion_queue; - +#ifdef CONFIG_XFRM_OFFLOAD + struct sk_buff_head xfrm_backlog; +#endif #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. @@ -3325,7 +3326,7 @@ int dev_get_phys_port_id(struct net_device *dev, int dev_get_phys_port_name(struct net_device *dev, char *name, size_t len); int dev_change_proto_down(struct net_device *dev, bool proto_down); -struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -4402,11 +4403,11 @@ do { \ * file/line information and a backtrace. */ #define netdev_WARN(dev, format, args...) \ - WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ + WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) -#define netdev_WARN_ONCE(dev, condition, format, arg...) \ - WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \ +#define netdev_WARN_ONCE(dev, format, args...) \ + WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ netdev_reg_state(dev), ##args) /* netif printk helpers, similar to netdev_printk */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b24e9b101651..85a1a0b32c66 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -67,6 +67,7 @@ struct nf_hook_ops { struct net_device *dev; void *priv; u_int8_t pf; + bool nat_hook; unsigned int hooknum; /* Hooks are ordered in ascending priority. */ int priority; @@ -77,17 +78,28 @@ struct nf_hook_entry { void *priv; }; +struct nf_hook_entries_rcu_head { + struct rcu_head head; + void *allocation; +}; + struct nf_hook_entries { u16 num_hook_entries; /* padding */ struct nf_hook_entry hooks[]; - /* trailer: pointers to original orig_ops of each hook. - * - * This is not part of struct nf_hook_entry since its only - * needed in slow path (hook register/unregister). + /* trailer: pointers to original orig_ops of each hook, + * followed by rcu_head and scratch space used for freeing + * the structure via call_rcu. * + * This is not part of struct nf_hook_entry since its only + * needed in slow path (hook register/unregister): * const struct nf_hook_ops *orig_ops[] + * + * For the same reason, we store this at end -- its + * only needed when a hook is deleted, not during + * packet path processing: + * struct nf_hook_entries_rcu_head head */ }; @@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, struct net_device *indev, struct net_device *outdev, int (*okfn)(struct net *, struct sock *, struct sk_buff *)) { - struct nf_hook_entries *hook_head; + struct nf_hook_entries *hook_head = NULL; int ret = 1; #ifdef HAVE_JUMP_LABEL @@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, #endif rcu_read_lock(); - hook_head = rcu_dereference(net->nf.hooks[pf][hook]); + switch (pf) { + case NFPROTO_IPV4: + hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); + break; + case NFPROTO_IPV6: + hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); + break; + case NFPROTO_ARP: +#ifdef CONFIG_NETFILTER_FAMILY_ARP + hook_head = rcu_dereference(net->nf.hooks_arp[hook]); +#endif + break; + case NFPROTO_BRIDGE: +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); +#endif + break; +#if IS_ENABLED(CONFIG_DECNET) + case NFPROTO_DECNET: + hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); + break; +#endif + default: + WARN_ON_ONCE(1); + break; + } + if (hook_head) { struct nf_hook_state state; @@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); struct flowi; struct nf_queue_entry; -struct nf_afinfo { - unsigned short family; - __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol); - __sum16 (*checksum_partial)(struct sk_buff *skb, - unsigned int hook, - unsigned int dataoff, - unsigned int len, - u_int8_t protocol); - int (*route)(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict); - void (*saveroute)(const struct sk_buff *skb, - struct nf_queue_entry *entry); - int (*reroute)(struct net *net, struct sk_buff *skb, - const struct nf_queue_entry *entry); - int route_key_size; -}; - -extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; -static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) -{ - return rcu_dereference(nf_afinfo[family]); -} - -static inline __sum16 -nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, - u_int8_t protocol, unsigned short family) -{ - const struct nf_afinfo *afinfo; - __sum16 csum = 0; - - rcu_read_lock(); - afinfo = nf_get_afinfo(family); - if (afinfo) - csum = afinfo->checksum(skb, hook, dataoff, protocol); - rcu_read_unlock(); - return csum; -} - -static inline __sum16 -nf_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol, unsigned short family) -{ - const struct nf_afinfo *afinfo; - __sum16 csum = 0; - - rcu_read_lock(); - afinfo = nf_get_afinfo(family); - if (afinfo) - csum = afinfo->checksum_partial(skb, hook, dataoff, len, - protocol); - rcu_read_unlock(); - return csum; -} +__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol, + unsigned short family); -int nf_register_afinfo(const struct nf_afinfo *afinfo); -void nf_unregister_afinfo(const struct nf_afinfo *afinfo); +__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family); +int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict, unsigned short family); +int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); #include <net/flow.h> extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 8e42253e5d4d..34fc80f3eb90 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -122,6 +122,8 @@ struct ip_set_ext { u64 bytes; char *comment; u32 timeout; + u8 packets_op; + u8 bytes_op; }; struct ip_set; @@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], struct ip_set_ext *ext); extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, const void *e, bool active); +extern bool ip_set_match_extensions(struct ip_set *set, + const struct ip_set_ext *ext, + struct ip_set_ext *mext, + u32 flags, void *data); static inline int ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h index bb6fba480118..3d33a2c3f39f 100644 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ b/include/linux/netfilter/ipset/ip_set_counter.h @@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter) return (u64)atomic64_read(&(counter)->packets); } +static inline bool +ip_set_match_counter(u64 counter, u64 match, u8 op) +{ + switch (op) { + case IPSET_COUNTER_NONE: + return true; + case IPSET_COUNTER_EQ: + return counter == match; + case IPSET_COUNTER_NE: + return counter != match; + case IPSET_COUNTER_LT: + return counter < match; + case IPSET_COUNTER_GT: + return counter > match; + } + return false; +} + static inline void ip_set_update_counter(struct ip_set_counter *counter, - const struct ip_set_ext *ext, - struct ip_set_ext *mext, u32 flags) + const struct ip_set_ext *ext, u32 flags) { if (ext->packets != ULLONG_MAX && !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { ip_set_add_bytes(ext->bytes, counter); ip_set_add_packets(ext->packets, counter); } - if (flags & IPSET_FLAG_MATCH_COUNTERS) { - mext->packets = ip_set_get_packets(counter); - mext->bytes = ip_set_get_bytes(counter); - } } static inline bool diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 33f7530f96b9..1313b35c3ab7 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name); +struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, + const char *name); void xt_table_unlock(struct xt_table *t); int xt_proto_init(struct net *net, u_int8_t af); diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h index dc6111adea06..8dddfb151f00 100644 --- a/include/linux/netfilter_defs.h +++ b/include/linux/netfilter_defs.h @@ -4,7 +4,17 @@ #include <uapi/linux/netfilter.h> +/* in/out/forward only */ +#define NF_ARP_NUMHOOKS 3 + +/* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ +#define NF_DN_NUMHOOKS 7 + +#if IS_ENABLED(CONFIG_DECNET) /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ -#define NF_MAX_HOOKS 8 +#define NF_MAX_HOOKS NF_DN_NUMHOOKS +#else +#define NF_MAX_HOOKS NF_INET_NUMHOOKS +#endif #endif diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 98c03b2462b5..b31dabfdb453 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -6,7 +6,53 @@ #include <uapi/linux/netfilter_ipv4.h> +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip_rt_info { + __be32 daddr; + __be32 saddr; + u_int8_t tos; + u_int32_t mark; +}; + int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); + +struct nf_queue_entry; + +#ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); +__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol); +int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); +int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); +#else +static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol) +{ + return 0; +} +static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb, + unsigned int hook, + unsigned int dataoff, + unsigned int len, + u_int8_t protocol) +{ + return 0; +} +static inline int nf_ip_route(struct net *net, struct dst_entry **dst, + struct flowi *fl, bool strict) +{ + return -EOPNOTSUPP; +} +static inline int nf_ip_reroute(struct sk_buff *skb, + const struct nf_queue_entry *entry) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_INET */ + #endif /*__LINUX_IP_NETFILTER_H*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 47c6b04c28c0..288c597e75b3 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -9,6 +9,17 @@ #include <uapi/linux/netfilter_ipv6.h> +/* Extra routing may needed on local out, as the QUEUE target never returns + * control to the table. + */ +struct ip6_rt_info { + struct in6_addr daddr; + struct in6_addr saddr; + u_int32_t mark; +}; + +struct nf_queue_entry; + /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. @@ -19,6 +30,14 @@ struct nf_ipv6_ops { void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol); + __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol); + int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict); + int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); }; #ifdef CONFIG_NETFILTER diff --git a/include/linux/phy.h b/include/linux/phy.h index c4b4715caa21..135aba5c3d39 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -634,6 +634,9 @@ struct phy_driver { int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, u16 val); + int (*read_page)(struct phy_device *dev); + int (*write_page)(struct phy_device *dev, int page); + /* Get the size and type of the eeprom contained within a plug-in * module */ int (*module_info)(struct phy_device *dev, @@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask, size_t maxbit); +void phy_resolve_aneg_linkmode(struct phy_device *phydev); + /** * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. @@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) } /** + * __phy_read - convenience function for reading a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to read + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_read(struct phy_device *phydev, u32 regnum) +{ + return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); +} + +/** * phy_write - Convenience function for writing a given PHY register * @phydev: the phy_device struct * @regnum: register number to write @@ -731,6 +748,23 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) } /** + * __phy_write - Convenience function for writing a given PHY register + * @phydev: the phy_device struct + * @regnum: register number to write + * @val: value to write to @regnum + * + * The caller must have taken the MDIO bus lock. + */ +static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, + val); +} + +int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); +int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); + +/** * phy_interrupt_is_valid - Convenience function for testing a given PHY irq * @phydev: the phy_device struct * @@ -808,6 +842,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) */ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); +int phy_save_page(struct phy_device *phydev); +int phy_select_page(struct phy_device *phydev, int page); +int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); +int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); +int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); +int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, + u16 mask, u16 set); + struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, bool is_c45, struct phy_c45_device_ids *c45_ids); @@ -901,6 +943,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev); int genphy_c45_read_pma(struct phy_device *phydev); int genphy_c45_pma_setup_forced(struct phy_device *phydev); int genphy_c45_an_disable_aneg(struct phy_device *phydev); +int genphy_c45_read_mdix(struct phy_device *phydev); static inline int phy_read_status(struct phy_device *phydev) { diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index cf6392de6eb0..ee54453a40a0 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); -extern int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed); #else static inline int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status, @@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } -static inline int fixed_phy_update_state(struct phy_device *phydev, - const struct fixed_phy_status *status, - const struct fixed_phy_status *changed) -{ - return -ENODEV; -} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 2ff18c9840a7..d31cb6215905 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd); #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) extern void *ns_get_path(struct path *path, struct task_struct *task, const struct proc_ns_operations *ns_ops); +typedef struct ns_common *ns_get_path_helper_t(void *); +extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, + void *private_data); extern int ns_get_name(char *buf, size_t size, struct task_struct *task, const struct proc_ns_operations *ns_ops); diff --git a/include/linux/pti.h b/include/linux/pti.h new file mode 100644 index 000000000000..0174883a935a --- /dev/null +++ b/include/linux/pti.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#ifndef _INCLUDE_PTI_H +#define _INCLUDE_PTI_H + +#ifdef CONFIG_PAGE_TABLE_ISOLATION +#include <asm/pti.h> +#else +static inline void pti_init(void) { } +#endif + +#endif diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 6866df4f31b5..13fb06a103c6 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -447,7 +447,12 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) { - return kcalloc(size, sizeof(void *), gfp); + /* Allocate an extra dummy element at end of ring to avoid consumer head + * or produce head access past the end of the array. Possible when + * producer/consumer operations and __ptr_ring_peek operations run in + * parallel. + */ + return kcalloc(size + 1, sizeof(void *), gfp); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 39e2a2ac2471..2b3b350e07b7 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -32,14 +32,15 @@ #ifndef _COMMON_HSI_H #define _COMMON_HSI_H + #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> #include <linux/slab.h> /* dma_addr_t manip */ -#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) -#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) +#define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) +#define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) #define DMA_REGPAIR_LE(x, val) do { \ @@ -47,39 +48,45 @@ (x).lo = DMA_LO_LE((val)); \ } while (0) -#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) -#define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) -#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_64(hi, lo) \ + HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) +#define HILO_64_REGPAIR(regpair) ({ \ + typeof(regpair) __regpair = (regpair); \ + HILO_64(__regpair.hi, __regpair.lo); }) #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) #ifndef __COMMON_HSI__ #define __COMMON_HSI__ +/********************************/ +/* PROTOCOL COMMON FW CONSTANTS */ +/********************************/ -#define X_FINAL_CLEANUP_AGG_INT 1 +#define X_FINAL_CLEANUP_AGG_INT 1 -#define EVENT_RING_PAGE_SIZE_BYTES 4096 +#define EVENT_RING_PAGE_SIZE_BYTES 4096 -#define NUM_OF_GLOBAL_QUEUES 128 -#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 +#define NUM_OF_GLOBAL_QUEUES 128 +#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 -#define ISCSI_CDU_TASK_SEG_TYPE 0 -#define FCOE_CDU_TASK_SEG_TYPE 0 -#define RDMA_CDU_TASK_SEG_TYPE 1 +#define ISCSI_CDU_TASK_SEG_TYPE 0 +#define FCOE_CDU_TASK_SEG_TYPE 0 +#define RDMA_CDU_TASK_SEG_TYPE 1 -#define FW_ASSERT_GENERAL_ATTN_IDX 32 +#define FW_ASSERT_GENERAL_ATTN_IDX 32 -#define MAX_PINNED_CCFC 32 +#define MAX_PINNED_CCFC 32 /* Queue Zone sizes in bytes */ -#define TSTORM_QZONE_SIZE 8 -#define MSTORM_QZONE_SIZE 16 -#define USTORM_QZONE_SIZE 8 -#define XSTORM_QZONE_SIZE 8 -#define YSTORM_QZONE_SIZE 0 -#define PSTORM_QZONE_SIZE 0 - -#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 8 +#define YSTORM_QZONE_SIZE 0 +#define PSTORM_QZONE_SIZE 0 + +#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 @@ -102,8 +109,8 @@ #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 20 -#define FW_REVISION_VERSION 0 +#define FW_MINOR_VERSION 33 +#define FW_REVISION_VERSION 1 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -115,10 +122,10 @@ #define MAX_NUM_PORTS_BB (2) #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) -#define MAX_NUM_PFS_K2 (16) -#define MAX_NUM_PFS_BB (8) -#define MAX_NUM_PFS (MAX_NUM_PFS_K2) -#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ #define MAX_NUM_VFS_K2 (192) #define MAX_NUM_VFS_BB (120) @@ -141,29 +148,14 @@ /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ #define NUM_PHYS_TCS_4PORT_K2 (4) #define NUM_OF_PHYS_TCS (8) - +#define PURE_LB_TC NUM_OF_PHYS_TCS #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) -#define LB_TC (NUM_OF_PHYS_TCS) - -/* Num of possible traffic priority values */ -#define NUM_OF_PRIO (8) - -#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) -#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) -#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) -#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) - /* CIDs */ -#define NUM_OF_CONNECTION_TYPES (8) -#define NUM_OF_LCIDS (320) -#define NUM_OF_LTIDS (320) - -/* Clock values */ -#define MASTER_CLK_FREQ_E4 (375e6) -#define STORM_CLK_FREQ_E4 (1000e6) -#define CLK25M_CLK_FREQ_E4 (25e6) +#define NUM_OF_CONNECTION_TYPES_E4 (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) /* Global PXP windows (GTT) */ #define NUM_OF_GTT 19 @@ -172,17 +164,17 @@ #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) /* Tools Version */ -#define TOOLS_VERSION 10 +#define TOOLS_VERSION 10 /*****************/ /* CDU CONSTANTS */ /*****************/ -#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) -#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) -#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) +#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) @@ -201,45 +193,45 @@ #define DQ_DEMS_TOE_LOCAL_ADV_WND 4 #define DQ_DEMS_ROCE_CQ_CONS 7 -/* XCM agg val selection */ -#define DQ_XCM_AGG_VAL_SEL_WORD2 0 -#define DQ_XCM_AGG_VAL_SEL_WORD3 1 -#define DQ_XCM_AGG_VAL_SEL_WORD4 2 -#define DQ_XCM_AGG_VAL_SEL_WORD5 3 -#define DQ_XCM_AGG_VAL_SEL_REG3 4 -#define DQ_XCM_AGG_VAL_SEL_REG4 5 -#define DQ_XCM_AGG_VAL_SEL_REG5 6 -#define DQ_XCM_AGG_VAL_SEL_REG6 7 - -/* XCM agg val selection */ -#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 -#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 -#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 -#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 -#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 -#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 -#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 +/* XCM agg val selection (HW) */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection (FW) */ +#define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 +#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 +#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 +#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 /* UCM agg val selection (HW) */ #define DQ_UCM_AGG_VAL_SEL_WORD0 0 #define DQ_UCM_AGG_VAL_SEL_WORD1 1 #define DQ_UCM_AGG_VAL_SEL_WORD2 2 #define DQ_UCM_AGG_VAL_SEL_WORD3 3 -#define DQ_UCM_AGG_VAL_SEL_REG0 4 -#define DQ_UCM_AGG_VAL_SEL_REG1 5 -#define DQ_UCM_AGG_VAL_SEL_REG2 6 -#define DQ_UCM_AGG_VAL_SEL_REG3 7 +#define DQ_UCM_AGG_VAL_SEL_REG0 4 +#define DQ_UCM_AGG_VAL_SEL_REG1 5 +#define DQ_UCM_AGG_VAL_SEL_REG2 6 +#define DQ_UCM_AGG_VAL_SEL_REG3 7 /* UCM agg val selection (FW) */ #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 @@ -263,7 +255,7 @@ #define DQ_TCM_ROCE_RQ_PROD_CMD \ DQ_TCM_AGG_VAL_SEL_WORD0 -/* XCM agg counter flag selection */ +/* XCM agg counter flag selection (HW) */ #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 @@ -273,20 +265,20 @@ #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 -/* XCM agg counter flag selection */ -#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) -#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) -#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) -#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) -#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +/* XCM agg counter flag selection (FW) */ +#define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) +#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) /* UCM agg counter flag selection (HW) */ #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 @@ -317,9 +309,9 @@ #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 /* TCM agg counter flag selection (FW) */ -#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) -#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) -#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) +#define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) +#define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) +#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) #define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) @@ -327,18 +319,18 @@ #define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) /* PWM address mapping */ -#define DQ_PWM_OFFSET_DPM_BASE 0x0 -#define DQ_PWM_OFFSET_DPM_END 0x27 +#define DQ_PWM_OFFSET_DPM_BASE 0x0 +#define DQ_PWM_OFFSET_DPM_END 0x27 #define DQ_PWM_OFFSET_XCM16_BASE 0x40 #define DQ_PWM_OFFSET_XCM32_BASE 0x44 #define DQ_PWM_OFFSET_UCM16_BASE 0x48 #define DQ_PWM_OFFSET_UCM32_BASE 0x4C -#define DQ_PWM_OFFSET_UCM16_4 0x50 +#define DQ_PWM_OFFSET_UCM16_4 0x50 #define DQ_PWM_OFFSET_TCM16_BASE 0x58 #define DQ_PWM_OFFSET_TCM32_BASE 0x5C -#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 -#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 -#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B +#define DQ_PWM_OFFSET_XCM_FLAGS 0x68 +#define DQ_PWM_OFFSET_UCM_FLAGS 0x69 +#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) @@ -347,10 +339,11 @@ #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) -#define DQ_REGION_SHIFT (12) + +#define DQ_REGION_SHIFT (12) /* DPM */ -#define DQ_DPM_WQE_BUFF_SIZE (320) +#define DQ_DPM_WQE_BUFF_SIZE (320) /* Conn type ranges */ #define DQ_CONN_TYPE_RANGE_SHIFT (4) @@ -359,29 +352,30 @@ /* QM CONSTANTS */ /*****************/ -/* number of TX queues in the QM */ +/* Number of TX queues in the QM */ #define MAX_QM_TX_QUEUES_K2 512 #define MAX_QM_TX_QUEUES_BB 448 #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 -/* number of Other queues in the QM */ +/* Number of Other queues in the QM */ #define MAX_QM_OTHER_QUEUES_BB 64 #define MAX_QM_OTHER_QUEUES_K2 128 #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 -/* number of queues in a PF queue group */ +/* Number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 -/* the size of a single queue element in bytes */ -#define QM_PQ_ELEMENT_SIZE 4 +/* The size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 -/* base number of Tx PQs in the CM PQ representation. - * should be used when storing PQ IDs in CM PQ registers and context +/* Base number of Tx PQs in the CM PQ representation. + * Should be used when storing PQ IDs in CM PQ registers and context. */ -#define CM_TX_PQ_BASE 0x200 +#define CM_TX_PQ_BASE 0x200 -/* number of global Vport/QCN rate limiters */ +/* Number of global Vport/QCN rate limiters */ #define MAX_QM_GLOBAL_RLS 256 + /* QM registers data */ #define QM_LINE_CRD_REG_WIDTH 16 #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) @@ -400,7 +394,7 @@ #define CAU_FSM_ETH_TX 1 /* Number of Protocol Indices per Status Block */ -#define PIS_PER_SB 12 +#define PIS_PER_SB_E4 12 #define CAU_HC_STOPPED_STATE 3 #define CAU_HC_DISABLE_STATE 4 @@ -432,8 +426,7 @@ #define IGU_CMD_INT_ACK_BASE 0x0400 #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ - MAX_TOT_SB_PER_PATH - \ - 1) + MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 @@ -447,8 +440,7 @@ #define IGU_CMD_PROD_UPD_BASE 0x0600 #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ - MAX_TOT_SB_PER_PATH - \ - 1) + MAX_TOT_SB_PER_PATH - 1) #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff /*****************/ @@ -514,129 +506,126 @@ PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) /* PF BAR */ -#define PXP_BAR0_START_GRC 0x0000 -#define PXP_BAR0_GRC_LENGTH 0x1C00000 -#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ - PXP_BAR0_GRC_LENGTH - 1) - -#define PXP_BAR0_START_IGU 0x1C00000 -#define PXP_BAR0_IGU_LENGTH 0x10000 -#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ - PXP_BAR0_IGU_LENGTH - 1) - -#define PXP_BAR0_START_TSDM 0x1C80000 -#define PXP_BAR0_SDM_LENGTH 0x40000 +#define PXP_BAR0_START_GRC 0x0000 +#define PXP_BAR0_GRC_LENGTH 0x1C00000 +#define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ + PXP_BAR0_GRC_LENGTH - 1) + +#define PXP_BAR0_START_IGU 0x1C00000 +#define PXP_BAR0_IGU_LENGTH 0x10000 +#define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ + PXP_BAR0_IGU_LENGTH - 1) + +#define PXP_BAR0_START_TSDM 0x1C80000 +#define PXP_BAR0_SDM_LENGTH 0x40000 #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 -#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_MSDM 0x1D00000 -#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_MSDM 0x1D00000 +#define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_USDM 0x1D80000 -#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_USDM 0x1D80000 +#define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_XSDM 0x1E00000 -#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_XSDM 0x1E00000 +#define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_YSDM 0x1E80000 -#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_YSDM 0x1E80000 +#define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ + PXP_BAR0_SDM_LENGTH - 1) -#define PXP_BAR0_START_PSDM 0x1F00000 -#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ - PXP_BAR0_SDM_LENGTH - 1) +#define PXP_BAR0_START_PSDM 0x1F00000 +#define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ + PXP_BAR0_SDM_LENGTH - 1) #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) /* VF BAR */ -#define PXP_VF_BAR0 0 - -#define PXP_VF_BAR0_START_GRC 0x3E00 -#define PXP_VF_BAR0_GRC_LENGTH 0x200 -#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ - PXP_VF_BAR0_GRC_LENGTH - 1) - -#define PXP_VF_BAR0_START_IGU 0 -#define PXP_VF_BAR0_IGU_LENGTH 0x3000 -#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ - PXP_VF_BAR0_IGU_LENGTH - 1) - -#define PXP_VF_BAR0_START_DQ 0x3000 -#define PXP_VF_BAR0_DQ_LENGTH 0x200 -#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 -#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_OPAQUE_OFFSET) -#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ - + 4) -#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ - PXP_VF_BAR0_DQ_LENGTH - 1) - -#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 -#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 -#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 -#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 -#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 -#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 -#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 -#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ - + \ - PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ - - 1) - -#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 -#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 - -#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 - -#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 -#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ + + 4) +#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ + PXP_VF_BAR0_GRC_LENGTH - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_START_IGU2 0x10000 +#define PXP_VF_BAR0_IGU2_LENGTH 0xD000 +#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ + PXP_VF_BAR0_IGU2_LENGTH - 1) + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 /* ILT Records */ #define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) -#define PXP_QUEUES_ZONE_MAX_NUM 320 + +/* Host Interface */ +#define PXP_QUEUES_ZONE_MAX_NUM 320 + /*****************/ /* PRM CONSTANTS */ /*****************/ -#define PRM_DMA_PAD_BYTES_NUM 2 +#define PRM_DMA_PAD_BYTES_NUM 2 + /*****************/ /* SDMs CONSTANTS */ /*****************/ -#define SDM_OP_GEN_TRIG_NONE 0 -#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 -#define SDM_OP_GEN_TRIG_AGG_INT 2 -#define SDM_OP_GEN_TRIG_LOADER 4 +#define SDM_OP_GEN_TRIG_NONE 0 +#define SDM_OP_GEN_TRIG_WAKE_THREAD 1 +#define SDM_OP_GEN_TRIG_AGG_INT 2 +#define SDM_OP_GEN_TRIG_LOADER 4 #define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 #define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 @@ -644,26 +633,26 @@ /* Completion types */ /********************/ -#define SDM_COMP_TYPE_NONE 0 -#define SDM_COMP_TYPE_WAKE_THREAD 1 -#define SDM_COMP_TYPE_AGG_INT 2 -#define SDM_COMP_TYPE_CM 3 -#define SDM_COMP_TYPE_LOADER 4 -#define SDM_COMP_TYPE_PXP 5 -#define SDM_COMP_TYPE_INDICATE_ERROR 6 -#define SDM_COMP_TYPE_RELEASE_THREAD 7 -#define SDM_COMP_TYPE_RAM 8 -#define SDM_COMP_TYPE_INC_ORDER_CNT 9 +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 +#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /*****************/ -/* PBF Constants */ +/* PBF CONSTANTS */ /*****************/ /* Number of PBF command queue lines. Each line is 32B. */ -#define PBF_MAX_CMD_LINES 3328 +#define PBF_MAX_CMD_LINES 3328 /* Number of BTB blocks. Each block is 256B. */ -#define BTB_MAX_BLOCKS 1440 +#define BTB_MAX_BLOCKS 1440 /*****************/ /* PRS CONSTANTS */ @@ -671,14 +660,7 @@ #define PRS_GFT_CAM_LINES_NO_MATCH 31 -/* Async data KCQ CQE */ -struct async_data { - __le32 cid; - __le16 itid; - u8 error_code; - u8 fw_debug_param; -}; - +/* Interrupt coalescing TimeSet */ struct coalescing_timeset { u8 value; #define COALESCING_TIMESET_TIMESET_MASK 0x7F @@ -692,23 +674,32 @@ struct common_queue_zone { __le16 reserved; }; +/* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; __le16 cqe_prod; }; -struct regpair { - __le32 lo; - __le32 hi; +struct tcp_ulp_connect_done_params { + __le16 mss; + u8 snd_wnd_scale; + u8 flags; +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 +#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F +#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 }; -struct vf_pf_channel_eqe_data { - struct regpair msg_addr; +struct iscsi_connect_done_results { + __le16 icid; + __le16 conn_id; + struct tcp_ulp_connect_done_params params; }; struct iscsi_eqe_data { - __le32 cid; + __le16 icid; __le16 conn_id; + __le16 reserved; u8 error_code; u8 error_pdu_opcode_reserved; #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F @@ -719,52 +710,6 @@ struct iscsi_eqe_data { #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 }; -struct rdma_eqe_destroy_qp { - __le32 cid; - u8 reserved[4]; -}; - -union rdma_eqe_data { - struct regpair async_handle; - struct rdma_eqe_destroy_qp rdma_destroy_qp_data; -}; - -struct malicious_vf_eqe_data { - u8 vf_id; - u8 err_id; - __le16 reserved[3]; -}; - -struct initial_cleanup_eqe_data { - u8 vf_id; - u8 reserved[7]; -}; - -/* Event Data Union */ -union event_ring_data { - u8 bytes[8]; - struct vf_pf_channel_eqe_data vf_pf_channel; - struct iscsi_eqe_data iscsi_info; - union rdma_eqe_data rdma_data; - struct malicious_vf_eqe_data malicious_vf; - struct initial_cleanup_eqe_data vf_init_cleanup; -}; - -/* Event Ring Entry */ -struct event_ring_entry { - u8 protocol_id; - u8 opcode; - __le16 reserved0; - __le16 echo; - u8 fw_return_code; - u8 flags; -#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 -#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 -#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F -#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 - union event_ring_data data; -}; - /* Multi function mode */ enum mf_mode { ERROR_MODE /* Unsupported mode */, @@ -781,13 +726,31 @@ enum protocol_type { PROTOCOLID_CORE, PROTOCOLID_ETH, PROTOCOLID_IWARP, - PROTOCOLID_RESERVED5, + PROTOCOLID_RESERVED0, PROTOCOLID_PREROCE, PROTOCOLID_COMMON, - PROTOCOLID_RESERVED6, + PROTOCOLID_RESERVED1, MAX_PROTOCOL_TYPE }; +struct regpair { + __le32 lo; + __le32 hi; +}; + +/* RoCE Destroy Event Data */ +struct rdma_eqe_destroy_qp { + __le32 cid; + u8 reserved[4]; +}; + +/* RDMA Event Data Union */ +union rdma_eqe_data { + struct regpair async_handle; + struct rdma_eqe_destroy_qp rdma_destroy_qp_data; +}; + +/* Ustorm Queue Zone */ struct ustorm_eth_queue_zone { struct coalescing_timeset int_coalescing_timeset; u8 reserved[3]; @@ -798,62 +761,71 @@ struct ustorm_queue_zone { struct common_queue_zone common; }; -/* status block structure */ +/* Status block structure */ struct cau_pi_entry { - u32 prod; -#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF -#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 -#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F -#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 -#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 -#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 -#define CAU_PI_ENTRY_RESERVED_MASK 0xFF -#define CAU_PI_ENTRY_RESERVED_SHIFT 24 + __le32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 }; -/* status block structure */ +/* Status block structure */ struct cau_sb_entry { - u32 data; -#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF -#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 -#define CAU_SB_ENTRY_STATE0_MASK 0xF -#define CAU_SB_ENTRY_STATE0_SHIFT 24 -#define CAU_SB_ENTRY_STATE1_MASK 0xF -#define CAU_SB_ENTRY_STATE1_SHIFT 28 - u32 params; -#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 -#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F -#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 -#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 -#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 -#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 -#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF -#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 -#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 -#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 -#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF -#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 -#define CAU_SB_ENTRY_TPH_MASK 0x1 -#define CAU_SB_ENTRY_TPH_SHIFT 31 + __le32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + __le32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 }; -/* core doorbell data */ +/* Igu cleanup bit values to distinguish between clean or producer consumer + * update. + */ +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +/* Core doorbell data */ struct core_db_data { u8 params; -#define CORE_DB_DATA_DEST_MASK 0x3 -#define CORE_DB_DATA_DEST_SHIFT 0 -#define CORE_DB_DATA_AGG_CMD_MASK 0x3 -#define CORE_DB_DATA_AGG_CMD_SHIFT 2 -#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 -#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 -#define CORE_DB_DATA_RESERVED_MASK 0x1 -#define CORE_DB_DATA_RESERVED_SHIFT 5 -#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 - u8 agg_flags; - __le16 spq_prod; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; }; /* Enum of doorbell aggregative command selection */ @@ -909,67 +881,69 @@ struct db_l2_dpm_sge { struct regpair addr; __le16 nbytes; __le16 bitfields; -#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF -#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 -#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 -#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 -#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 -#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 -#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF -#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 +#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF +#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 +#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 +#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 +#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 +#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 +#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF +#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 __le32 reserved2; }; /* Structure for doorbell address, in legacy mode */ struct db_legacy_addr { __le32 addr; -#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 -#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 -#define DB_LEGACY_ADDR_DEMS_MASK 0x7 -#define DB_LEGACY_ADDR_DEMS_SHIFT 2 -#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF -#define DB_LEGACY_ADDR_ICID_SHIFT 5 +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 }; /* Structure for doorbell address, in PWM mode */ struct db_pwm_addr { __le32 addr; #define DB_PWM_ADDR_RESERVED0_MASK 0x7 -#define DB_PWM_ADDR_RESERVED0_SHIFT 0 -#define DB_PWM_ADDR_OFFSET_MASK 0x7F +#define DB_PWM_ADDR_RESERVED0_SHIFT 0 +#define DB_PWM_ADDR_OFFSET_MASK 0x7F #define DB_PWM_ADDR_OFFSET_SHIFT 3 -#define DB_PWM_ADDR_WID_MASK 0x3 -#define DB_PWM_ADDR_WID_SHIFT 10 -#define DB_PWM_ADDR_DPI_MASK 0xFFFF -#define DB_PWM_ADDR_DPI_SHIFT 12 +#define DB_PWM_ADDR_WID_MASK 0x3 +#define DB_PWM_ADDR_WID_SHIFT 10 +#define DB_PWM_ADDR_DPI_MASK 0xFFFF +#define DB_PWM_ADDR_DPI_SHIFT 12 #define DB_PWM_ADDR_RESERVED1_MASK 0xF -#define DB_PWM_ADDR_RESERVED1_SHIFT 28 +#define DB_PWM_ADDR_RESERVED1_SHIFT 28 }; -/* Parameters to RoCE firmware, passed in EDPM doorbell */ +/* Parameters to RDMA firmware, passed in EDPM doorbell */ struct db_rdma_dpm_params { __le32 params; -#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F -#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 -#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 -#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF -#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF -#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 -#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 -#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 -#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 -#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 +#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F +#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 +#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 +#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF +#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF +#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 +#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 +#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 +#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 +#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 }; -/* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ +/* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a + * DPM burst. + */ struct db_rdma_dpm_data { __le16 icid; __le16 prod_val; @@ -987,22 +961,22 @@ enum igu_int_cmd { /* IGU producer or consumer update command */ struct igu_prod_cons_update { - u32 sb_id_and_flags; -#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF -#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 -#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 -#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 -#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 - u32 reserved1; + __le32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; }; /* Igu segments access for default status block only */ @@ -1012,38 +986,63 @@ enum igu_seg_access { MAX_IGU_SEG_ACCESS }; +/* Enumeration for L3 type field of parsing_and_err_flags. + * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 + * (This field can be filled according to the last-ethertype) + */ +enum l3_type { + e_l3_type_unknown, + e_l3_type_ipv4, + e_l3_type_ipv6, + MAX_L3_TYPE +}; + +/* Enumeration for l4Protocol field of parsing_and_err_flags. + * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. + * If the packet is IPv4 fragment, and its not the first fragment, the + * protocol-type should be set to none. + */ +enum l4_protocol { + e_l4_protocol_none, + e_l4_protocol_tcp, + e_l4_protocol_udp, + MAX_L4_PROTOCOL +}; + +/* Parsing and error flags field */ struct parsing_and_err_flags { __le16 flags; -#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 -#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 -#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 }; +/* Parsing error flags bitmap */ struct parsing_err_flags { __le16 flags; #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 @@ -1080,266 +1079,260 @@ struct parsing_err_flags { #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 }; +/* Pb context */ struct pb_context { __le32 crc[4]; }; +/* Concrete Function ID */ struct pxp_concrete_fid { __le16 fid; -#define PXP_CONCRETE_FID_PFID_MASK 0xF -#define PXP_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_CONCRETE_FID_PORT_MASK 0x3 -#define PXP_CONCRETE_FID_PORT_SHIFT 4 -#define PXP_CONCRETE_FID_PATH_MASK 0x1 -#define PXP_CONCRETE_FID_PATH_SHIFT 6 -#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 }; +/* Concrete Function ID */ struct pxp_pretend_concrete_fid { __le16 fid; -#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF -#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 -#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 -#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 -#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF -#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 }; +/* Function ID */ union pxp_pretend_fid { struct pxp_pretend_concrete_fid concrete_fid; - __le16 opaque_fid; + __le16 opaque_fid; }; -/* Pxp Pretend Command Register. */ +/* Pxp Pretend Command Register */ struct pxp_pretend_cmd { - union pxp_pretend_fid fid; - __le16 control; -#define PXP_PRETEND_CMD_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PATH_SHIFT 0 -#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 -#define PXP_PRETEND_CMD_PORT_MASK 0x3 -#define PXP_PRETEND_CMD_PORT_SHIFT 2 -#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 -#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF -#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 -#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 -#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 -#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 -#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 -#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 }; -/* PTT Record in PXP Admin Window. */ +/* PTT Record in PXP Admin Window */ struct pxp_ptt_entry { - __le32 offset; -#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF -#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 -#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF -#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 - struct pxp_pretend_cmd pretend; + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; }; -/* VF Zone A Permission Register. */ +/* VF Zone A Permission Register */ struct pxp_vf_zone_a_permission { __le32 control; -#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF -#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 -#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 -#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F -#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF -#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 +#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF +#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 +#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 +#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F +#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF +#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 }; -/* RSS hash type */ +/* Rdif context */ struct rdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; u8 flags0; -#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 -#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 -#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 -#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 -#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 -#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 -#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 -#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 -#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 -#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 -#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 u8 partial_dif_data[7]; __le16 partial_crc_value; __le16 partial_checksum_value; __le32 offset_in_io; __le16 flags1; -#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 -#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 -#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 -#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 -#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 -#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 -#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 -#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 -#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 -#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 -#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 -#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 -#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 -#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 -#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 -#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 -#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 +#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 __le16 state; -#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 -#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF -#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 -#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 -#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 -#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 -#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 -#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF -#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 -#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 -#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF +#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 +#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 +#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 +#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 +#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 __le32 reserved2; }; -/* RSS hash type */ -enum rss_hash_type { - RSS_HASH_TYPE_DEFAULT = 0, - RSS_HASH_TYPE_IPV4 = 1, - RSS_HASH_TYPE_TCP_IPV4 = 2, - RSS_HASH_TYPE_IPV6 = 3, - RSS_HASH_TYPE_TCP_IPV6 = 4, - RSS_HASH_TYPE_UDP_IPV4 = 5, - RSS_HASH_TYPE_UDP_IPV6 = 6, - MAX_RSS_HASH_TYPE -}; - -/* status block structure */ -struct status_block { - __le16 pi_array[PIS_PER_SB]; +/* Status block structure */ +struct status_block_e4 { + __le16 pi_array[PIS_PER_SB_E4]; __le32 sb_num; -#define STATUS_BLOCK_SB_NUM_MASK 0x1FF -#define STATUS_BLOCK_SB_NUM_SHIFT 0 -#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F -#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 -#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF -#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 +#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 __le32 prod_index; -#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF -#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 -#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF -#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 }; +/* Tdif context */ struct tdif_task_context { __le32 initial_ref_tag; __le16 app_tag_value; __le16 app_tag_mask; - __le16 partial_crc_valueB; - __le16 partial_checksum_valueB; + __le16 partial_crc_value_b; + __le16 partial_checksum_value_b; __le16 stateB; -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 -#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 -#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F -#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 +#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F +#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 u8 reserved1; u8 flags0; -#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 -#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 -#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 -#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 -#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 -#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 -#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 -#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 -#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 -#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 -#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 -#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 +#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 +#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 +#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 +#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 +#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 +#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 +#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 __le32 flags1; -#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 -#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 -#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 -#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 -#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 -#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 -#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 -#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 -#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 -#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 -#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 -#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 -#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 -#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF -#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF -#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 -#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 -#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 -#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 -#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF -#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 -#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 -#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 -#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 -#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 -#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 -#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 - __le32 offset_in_iob; +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 +#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 +#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 +#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 +#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 +#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF +#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 +#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF +#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 +#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 +#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 +#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 + __le32 offset_in_io_b; __le16 partial_crc_value_a; - __le16 partial_checksum_valuea_; - __le32 offset_in_ioa; + __le16 partial_checksum_value_a; + __le32 offset_in_io_a; u8 partial_dif_data_a[8]; u8 partial_dif_data_b[8]; }; +/* Timers context */ struct timers_context { __le32 logical_client_0; #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF @@ -1385,6 +1378,7 @@ struct timers_context { #define TIMERS_CONTEXT_RESERVED7_SHIFT 29 }; +/* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ enum tunnel_next_protocol { e_unknown = 0, e_l2 = 1, diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index cb06e6e368e1..9db02856623b 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -36,150 +36,168 @@ /********************/ /* ETH FW CONSTANTS */ /********************/ -#define ETH_HSI_VER_MAJOR 3 -#define ETH_HSI_VER_MINOR 10 + +#define ETH_HSI_VER_MAJOR 3 +#define ETH_HSI_VER_MINOR 10 #define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 -#define ETH_CACHE_LINE_SIZE 64 -#define ETH_RX_CQE_GAP 32 -#define ETH_MAX_RAMROD_PER_CON 8 -#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 -#define ETH_RX_NUM_NEXT_PAGE_BDS 2 - -#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 -#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 - -#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 -#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 -#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 -#define ETH_TX_MAX_LSO_HDR_NBD 4 -#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 -#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 -#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 -#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 -#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) -#define ETH_TX_MAX_LSO_HDR_BYTES 510 -#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) -#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 -#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 -#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 -#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF - -#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 + +#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 +#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 +#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 +#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) /* Maximum number of buffers, used for RX packet placement */ -#define ETH_RX_MAX_BUFF_PER_PKT 5 -#define ETH_RX_BD_THRESHOLD 12 +#define ETH_RX_MAX_BUFF_PER_PKT 5 +#define ETH_RX_BD_THRESHOLD 12 -/* num of MAC/VLAN filters */ -#define ETH_NUM_MAC_FILTERS 512 -#define ETH_NUM_VLAN_FILTERS 512 +/* Num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 -/* approx. multicast constants */ -#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 -#define ETH_MULTICAST_MAC_BINS 256 -#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) +/* Approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) -/* ethernet vport update constants */ -#define ETH_FILTER_RULES_COUNT 10 -#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 -#define ETH_RSS_KEY_SIZE_REGS 10 -#define ETH_RSS_ENGINE_NUM_K2 207 -#define ETH_RSS_ENGINE_NUM_BB 127 +/* Ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 /* TPA constants */ -#define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT -#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 -#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Control frame check constants */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 +/* GFS constants */ +#define ETH_GFT_TRASH_CAN_VPORT 0x1FF + +/* Destination port mode */ +enum dest_port_mode { + DEST_PORT_PHY, + DEST_PORT_LOOPBACK, + DEST_PORT_PHY_LOOPBACK, + DEST_PORT_DROP, + MAX_DEST_PORT_MODE +}; + +/* Ethernet address type */ +enum eth_addr_type { + BROADCAST_ADDRESS, + MULTICAST_ADDRESS, + UNICAST_ADDRESS, + UNKNOWN_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + struct eth_tx_1st_bd_flags { u8 bitfields; -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 }; -/* The parsing information data fo rthe first tx bd of a given packet. */ +/* The parsing information data fo rthe first tx bd of a given packet */ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; __le16 bitfields; -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 -#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 -#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 -#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF -#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 }; -/* The parsing information data for the second tx bd of a given packet. */ +/* The parsing information data for the second tx bd of a given packet */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; __le16 bitfields1; -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 -#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 -#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 -#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 __le16 bitfields2; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; -/* Firmware data for L2-EDPM packet. */ +/* Firmware data for L2-EDPM packet */ struct eth_edpm_fw_data { struct eth_tx_data_1st_bd data_1st_bd; struct eth_tx_data_2nd_bd data_2nd_bd; __le32 reserved; }; -struct eth_fast_path_cqe_fw_debug { - __le16 reserved2; -}; - -/* tunneling parsing flags */ +/* Tunneling parsing flags */ struct eth_tunnel_parsing_flags { u8 flags; #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 @@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags { /* PMD flow control bits */ struct eth_pmd_flow_flags { u8 flags; -#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 -#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 -#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F -#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 +#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 +#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 +#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F +#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 }; -/* Regular ETH Rx FP CQE. */ +/* Regular ETH Rx FP CQE */ struct eth_fast_path_rx_reg_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 __le16 pkt_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe { u8 placement_offset; struct eth_tunnel_parsing_flags tunnel_pars_flags; u8 bd_num; - u8 reserved[9]; - struct eth_fast_path_cqe_fw_debug fw_debug; - u8 reserved1[3]; + u8 reserved; + __le16 flow_id; + u8 reserved1[11]; struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-continue ETH Rx FP CQE. */ +/* TPA-continue ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_cont_cqe { u8 type; u8 tpa_agg_index; @@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-end ETH Rx FP CQE. */ +/* TPA-end ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_end_cqe { u8 type; u8 tpa_agg_index; @@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe { struct eth_pmd_flow_flags pmd_flags; }; -/* TPA-start ETH Rx FP CQE. */ +/* TPA-start ETH Rx FP CQE */ struct eth_fast_path_rx_tpa_start_cqe { u8 type; u8 bitfields; -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF -#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 -#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 __le16 seg_len; struct parsing_and_err_flags pars_flags; __le16 vlan_tag; @@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe { u8 tpa_agg_index; u8 header_len; __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; - struct eth_fast_path_cqe_fw_debug fw_debug; + __le16 flow_id; u8 reserved; struct eth_pmd_flow_flags pmd_flags; }; @@ -295,24 +313,24 @@ struct eth_rx_bd { struct regpair addr; }; -/* regular ETH Rx SP CQE */ +/* Regular ETH Rx SP CQE */ struct eth_slow_path_rx_cqe { - u8 type; - u8 ramrod_cmd_id; - u8 error_flag; - u8 reserved[25]; - __le16 echo; - u8 reserved1; + u8 type; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; struct eth_pmd_flow_flags pmd_flags; }; -/* union for all ETH Rx CQE types */ +/* Union for all ETH Rx CQE types */ union eth_rx_cqe { - struct eth_fast_path_rx_reg_cqe fast_path_regular; - struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; - struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; - struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; - struct eth_slow_path_rx_cqe slow_path; + struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; + struct eth_slow_path_rx_cqe slow_path; }; /* ETH Rx CQE type */ @@ -339,7 +357,7 @@ enum eth_rx_tunn_type { MAX_ETH_RX_TUNN_TYPE }; -/* Aggregation end reason. */ +/* Aggregation end reason. */ enum eth_tpa_end_reason { ETH_AGG_END_UNUSED, ETH_AGG_END_SP_UPDATE, @@ -354,59 +372,59 @@ enum eth_tpa_end_reason { /* The first tx bd of a given packet */ struct eth_tx_1st_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_1st_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_1st_bd data; }; /* The second tx bd of a given packet */ struct eth_tx_2nd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_2nd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_2nd_bd data; }; -/* The parsing information data for the third tx bd of a given packet. */ +/* The parsing information data for the third tx bd of a given packet */ struct eth_tx_data_3rd_bd { __le16 lso_mss; __le16 bitfields; -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF -#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 -#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF -#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 -#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F -#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 u8 tunn_l4_hdr_start_offset_w; u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ struct eth_tx_3rd_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_3rd_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_3rd_bd data; }; -/* Complementary information for the regular tx bd of a given packet. */ +/* Complementary information for the regular tx bd of a given packet */ struct eth_tx_data_bd { - __le16 reserved0; - __le16 bitfields; -#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF -#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 -#define ETH_TX_DATA_BD_START_BD_MASK 0x1 -#define ETH_TX_DATA_BD_START_BD_SHIFT 8 -#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F -#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 __le16 reserved3; }; /* The common non-special TX BD ring element */ struct eth_tx_bd { - struct regpair addr; - __le16 nbytes; - struct eth_tx_data_bd data; + struct regpair addr; + __le16 nbytes; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { @@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone { /* ETH doorbell data */ struct eth_db_data { u8 params; -#define ETH_DB_DATA_DEST_MASK 0x3 -#define ETH_DB_DATA_DEST_SHIFT 0 -#define ETH_DB_DATA_AGG_CMD_MASK 0x3 -#define ETH_DB_DATA_AGG_CMD_SHIFT 2 -#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 -#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 -#define ETH_DB_DATA_RESERVED_MASK 0x1 -#define ETH_DB_DATA_RESERVED_SHIFT 5 -#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 bd_prod; }; +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + #endif /* __ETH_COMMON__ */ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 12fc9e788eea..22077c586853 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -8,217 +8,78 @@ #ifndef __FCOE_COMMON__ #define __FCOE_COMMON__ + /*********************/ /* FCOE FW CONSTANTS */ /*********************/ #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 -struct fcoe_abts_pkt { - __le32 abts_rsp_fc_payload_lo; - __le16 abts_rsp_rx_id; - u8 abts_rsp_rctl; - u8 reserved2; -}; - -/* FCoE additional WQE (Sq/XferQ) information */ -union fcoe_additional_info_union { - __le32 previous_tid; - __le32 parent_tid; - __le32 burst_length; - __le32 seq_rec_updated_offset; -}; - -struct fcoe_exp_ro { - __le32 data_offset; - __le32 reserved; -}; - -union fcoe_cleanup_addr_exp_ro_union { - struct regpair abts_rsp_fc_payload_hi; - struct fcoe_exp_ro exp_ro; -}; - -/* FCoE Ramrod Command IDs */ -enum fcoe_completion_status { - FCOE_COMPLETION_STATUS_SUCCESS, - FCOE_COMPLETION_STATUS_FCOE_VER_ERR, - FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, - MAX_FCOE_COMPLETION_STATUS -}; - -struct fc_addr_nw { - u8 addr_lo; - u8 addr_mid; - u8 addr_hi; -}; - -/* FCoE connection offload */ -struct fcoe_conn_offload_ramrod_data { - struct regpair sq_pbl_addr; - struct regpair sq_curr_page_addr; - struct regpair sq_next_page_addr; - struct regpair xferq_pbl_addr; - struct regpair xferq_curr_page_addr; - struct regpair xferq_next_page_addr; - struct regpair respq_pbl_addr; - struct regpair respq_curr_page_addr; - struct regpair respq_next_page_addr; - __le16 dst_mac_addr_lo; - __le16 dst_mac_addr_mid; - __le16 dst_mac_addr_hi; - __le16 src_mac_addr_lo; - __le16 src_mac_addr_mid; - __le16 src_mac_addr_hi; - __le16 tx_max_fc_pay_len; - __le16 e_d_tov_timer_val; - __le16 rx_max_fc_pay_len; - __le16 vlan_tag; -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 - __le16 physical_q0; - __le16 rec_rr_tov_timer_val; - struct fc_addr_nw s_id; - u8 max_conc_seqs_c3; - struct fc_addr_nw d_id; - u8 flags; -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 -#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 - __le16 conn_id; - u8 def_q_idx; - u8 reserved[5]; -}; - -/* FCoE terminate connection request */ -struct fcoe_conn_terminate_ramrod_data { - struct regpair terminate_params_addr; -}; - -struct fcoe_slow_sgl_ctx { - struct regpair base_sgl_addr; - __le16 curr_sge_off; - __le16 remainder_num_sges; - __le16 curr_sgl_index; - __le16 reserved; -}; - -union fcoe_dix_desc_ctx { - struct fcoe_slow_sgl_ctx dix_sgl; - struct scsi_sge cached_dix_sge; +/* The fcoe storm task context protection-information of Ystorm */ +struct protection_info_ctx { + __le16 flags; +#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 +#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 +#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 +#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F +#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 + u8 dix_block_size; + u8 dst_size; }; -struct fcoe_fast_sgl_ctx { - struct regpair sgl_start_addr; - __le32 sgl_byte_offset; - __le16 task_reuse_cnt; - __le16 init_offset_in_first_sge; +/* The fcoe storm task context protection-information of Ystorm */ +union protection_info_union_ctx { + struct protection_info_ctx info; + __le32 value; }; +/* FCP CMD payload */ struct fcoe_fcp_cmd_payload { __le32 opaque[8]; }; +/* FCP RSP payload */ struct fcoe_fcp_rsp_payload { __le32 opaque[6]; }; -struct fcoe_fcp_xfer_payload { - __le32 opaque[3]; -}; - -/* FCoE firmware function init */ -struct fcoe_init_func_ramrod_data { - struct scsi_init_func_params func_params; - struct scsi_init_func_queues q_params; - __le16 mtu; - __le16 sq_num_pages_in_pbl; - __le32 reserved; -}; - -/* FCoE: Mode of the connection: Target or Initiator or both */ -enum fcoe_mode_type { - FCOE_INITIATOR_MODE = 0x0, - FCOE_TARGET_MODE = 0x1, - FCOE_BOTH_OR_NOT_CHOSEN = 0x3, - MAX_FCOE_MODE_TYPE -}; - -struct fcoe_rx_stat { - struct regpair fcoe_rx_byte_cnt; - struct regpair fcoe_rx_data_pkt_cnt; - struct regpair fcoe_rx_xfer_pkt_cnt; - struct regpair fcoe_rx_other_pkt_cnt; - __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; - __le32 fcoe_silent_drop_pkt_rq_full_cnt; - __le32 fcoe_silent_drop_pkt_crc_error_cnt; - __le32 fcoe_silent_drop_pkt_task_invalid_cnt; - __le32 fcoe_silent_drop_total_pkt_cnt; - __le32 rsrv; -}; - -struct fcoe_stat_ramrod_data { - struct regpair stat_params_addr; -}; - -struct protection_info_ctx { - __le16 flags; -#define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 -#define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 -#define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 -#define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 -#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF -#define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 -#define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F -#define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 - u8 dix_block_size; - u8 dst_size; -}; - -union protection_info_union_ctx { - struct protection_info_ctx info; - __le32 value; -}; - +/* FCP RSP payload */ struct fcp_rsp_payload_padded { struct fcoe_fcp_rsp_payload rsp_payload; __le32 reserved[2]; }; +/* FCP RSP payload */ +struct fcoe_fcp_xfer_payload { + __le32 opaque[3]; +}; + +/* FCP RSP payload */ struct fcp_xfer_payload_padded { struct fcoe_fcp_xfer_payload xfer_payload; __le32 reserved[5]; }; +/* Task params */ struct fcoe_tx_data_params { __le32 data_offset; __le32 offset_in_io; u8 flags; -#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 -#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 -#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 -#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 -#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F -#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 +#define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 +#define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F +#define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 u8 dif_residual; __le16 seq_cnt; __le16 single_sge_saved_offset; @@ -227,6 +88,7 @@ struct fcoe_tx_data_params { __le16 reserved3; }; +/* Middle path parameters: FC header fields provided by the driver */ struct fcoe_tx_mid_path_params { __le32 parameter; u8 r_ctl; @@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params { __le16 ox_id; }; +/* Task params */ struct fcoe_tx_params { struct fcoe_tx_data_params data; struct fcoe_tx_mid_path_params mid_path; }; +/* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */ union fcoe_tx_info_union_ctx { struct fcoe_fcp_cmd_payload fcp_cmd_payload; struct fcp_rsp_payload_padded fcp_rsp_payload; @@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx { struct fcoe_tx_params tx_params; }; +/* Data sgl */ +struct fcoe_slow_sgl_ctx { + struct regpair base_sgl_addr; + __le16 curr_sge_off; + __le16 remainder_num_sges; + __le16 curr_sgl_index; + __le16 reserved; +}; + +/* Union of DIX SGL \ cached DIX sges */ +union fcoe_dix_desc_ctx { + struct fcoe_slow_sgl_ctx dix_sgl; + struct scsi_sge cached_dix_sge; +}; + +/* The fcoe storm task context of Ystorm */ struct ystorm_fcoe_task_st_ctx { u8 task_type; u8 sgl_mode; -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 -#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F -#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F +#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 u8 cached_dix_sge; u8 expect_first_xfer; __le32 num_pbf_zero_write; @@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx { u8 reserved2[8]; }; -struct ystorm_fcoe_task_ag_ctx { +struct e4_ystorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 byte2; __le32 reg0; u8 byte3; @@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx { __le32 reg2; }; -struct tstorm_fcoe_task_ag_ctx { +struct e4_tstorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 u8 flags1; -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 u8 flags3; -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 u8 flags4; -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 cleanup_state; __le16 last_sent_tid; __le32 rec_rr_tov_exp_timeout; @@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx { __le32 data_offset_next; }; +/* Cached data sges */ +struct fcoe_exp_ro { + __le32 data_offset; + __le32 reserved; +}; + +/* Union of Cleanup address \ expected relative offsets */ +union fcoe_cleanup_addr_exp_ro_union { + struct regpair abts_rsp_fc_payload_hi; + struct fcoe_exp_ro exp_ro; +}; + +/* Fields coppied from ABTSrsp pckt */ +struct fcoe_abts_pkt { + __le32 abts_rsp_fc_payload_lo; + __le16 abts_rsp_rx_id; + u8 abts_rsp_rctl; + u8 reserved2; +}; + +/* FW read- write (modifyable) part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; __le16 flags; -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF -#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF +#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 __le16 seq_cnt; u8 seq_id; u8 ooo_rx_seq_id; @@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { __le16 reserved1; }; +/* FW read only part The fcoe task storm context of Tstorm */ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { u8 task_type; u8 dev_type; @@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { __le32 rsrv; }; +/** The fcoe task storm context of Tstorm */ struct tstorm_fcoe_task_st_ctx { struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; }; -struct mstorm_fcoe_task_ag_ctx { +struct e4_mstorm_fcoe_task_ag_ctx { u8 byte0; u8 byte1; __le16 icid; u8 flags0; -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 u8 flags2; -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 u8 cleanup_state; __le32 received_bytes; u8 byte3; @@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx { __le32 reg2; }; +/* The fcoe task storm context of Mstorm */ struct mstorm_fcoe_task_st_ctx { struct regpair rsp_buf_addr; __le32 rsrv[2]; @@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx { __le32 data_buffer_offset; __le16 parent_id; __le16 flags; -#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF -#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 -#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 -#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 -#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 -#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 -#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 -#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 -#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF +#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 +#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 +#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 struct scsi_cached_sges data_desc; }; -struct ustorm_fcoe_task_ag_ctx { +struct e4_ustorm_fcoe_task_ag_ctx { u8 reserved; u8 byte1; __le16 icid; u8 flags0; -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 u8 flags2; -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 u8 flags3; -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 __le32 dif_err_intervals; __le32 dif_error_1st_interval; __le32 global_cq_num; @@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx { __le32 reg5; }; -struct fcoe_task_context { +/* FCoE task context */ +struct e4_fcoe_task_context { struct ystorm_fcoe_task_st_ctx ystorm_st_context; struct regpair ystorm_st_padding[2]; struct tdif_task_context tdif_context; - struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; - struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; + struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; + struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; struct timers_context timer_context; struct tstorm_fcoe_task_st_ctx tstorm_st_context; struct regpair tstorm_st_padding[2]; - struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; + struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; struct mstorm_fcoe_task_st_ctx mstorm_st_context; - struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; + struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; struct rdif_task_context rdif_context; }; +/* FCoE additional WQE (Sq/XferQ) information */ +union fcoe_additional_info_union { + __le32 previous_tid; + __le32 parent_tid; + __le32 burst_length; + __le32 seq_rec_updated_offset; +}; + +/* FCoE Ramrod Command IDs */ +enum fcoe_completion_status { + FCOE_COMPLETION_STATUS_SUCCESS, + FCOE_COMPLETION_STATUS_FCOE_VER_ERR, + FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, + MAX_FCOE_COMPLETION_STATUS +}; + +/* FC address (SID/DID) network presentation */ +struct fc_addr_nw { + u8 addr_lo; + u8 addr_mid; + u8 addr_hi; +}; + +/* FCoE connection offload */ +struct fcoe_conn_offload_ramrod_data { + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 dst_mac_addr_lo; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_hi; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + __le16 src_mac_addr_hi; + __le16 tx_max_fc_pay_len; + __le16 e_d_tov_timer_val; + __le16 rx_max_fc_pay_len; + __le16 vlan_tag; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 + __le16 physical_q0; + __le16 rec_rr_tov_timer_val; + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1 +#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7 + __le16 conn_id; + u8 def_q_idx; + u8 reserved[5]; +}; + +/* FCoE terminate connection request */ +struct fcoe_conn_terminate_ramrod_data { + struct regpair terminate_params_addr; +}; + +/* FCoE device type */ +enum fcoe_device_type { + FCOE_TASK_DEV_TYPE_DISK, + FCOE_TASK_DEV_TYPE_TAPE, + MAX_FCOE_DEVICE_TYPE +}; + +/* Data sgl */ +struct fcoe_fast_sgl_ctx { + struct regpair sgl_start_addr; + __le32 sgl_byte_offset; + __le16 task_reuse_cnt; + __le16 init_offset_in_first_sge; +}; + +/* FCoE firmware function init */ +struct fcoe_init_func_ramrod_data { + struct scsi_init_func_params func_params; + struct scsi_init_func_queues q_params; + __le16 mtu; + __le16 sq_num_pages_in_pbl; + __le32 reserved[3]; +}; + +/* FCoE: Mode of the connection: Target or Initiator or both */ +enum fcoe_mode_type { + FCOE_INITIATOR_MODE = 0x0, + FCOE_TARGET_MODE = 0x1, + FCOE_BOTH_OR_NOT_CHOSEN = 0x3, + MAX_FCOE_MODE_TYPE +}; + +/* Per PF FCoE receive path statistics - tStorm RAM structure */ +struct fcoe_rx_stat { + struct regpair fcoe_rx_byte_cnt; + struct regpair fcoe_rx_data_pkt_cnt; + struct regpair fcoe_rx_xfer_pkt_cnt; + struct regpair fcoe_rx_other_pkt_cnt; + __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; + __le32 fcoe_silent_drop_pkt_rq_full_cnt; + __le32 fcoe_silent_drop_pkt_crc_error_cnt; + __le32 fcoe_silent_drop_pkt_task_invalid_cnt; + __le32 fcoe_silent_drop_total_pkt_cnt; + __le32 rsrv; +}; + +/* FCoE SQE request type */ +enum fcoe_sqe_request_type { + SEND_FCOE_CMD, + SEND_FCOE_MIDPATH, + SEND_FCOE_ABTS_REQUEST, + FCOE_EXCHANGE_CLEANUP, + FCOE_SEQUENCE_RECOVERY, + SEND_FCOE_XFER_RDY, + SEND_FCOE_RSP, + SEND_FCOE_RSP_WITH_SENSE_DATA, + SEND_FCOE_TARGET_DATA, + SEND_FCOE_INITIATOR_DATA, + SEND_FCOE_XFER_CONTINUATION_RDY, + SEND_FCOE_TARGET_ABTS_RSP, + MAX_FCOE_SQE_REQUEST_TYPE +}; + +/* FCoe statistics request */ +struct fcoe_stat_ramrod_data { + struct regpair stat_params_addr; +}; + +/* FCoE task type */ +enum fcoe_task_type { + FCOE_TASK_TYPE_WRITE_INITIATOR, + FCOE_TASK_TYPE_READ_INITIATOR, + FCOE_TASK_TYPE_MIDPATH, + FCOE_TASK_TYPE_UNSOLICITED, + FCOE_TASK_TYPE_ABTS, + FCOE_TASK_TYPE_EXCHANGE_CLEANUP, + FCOE_TASK_TYPE_SEQUENCE_CLEANUP, + FCOE_TASK_TYPE_WRITE_TARGET, + FCOE_TASK_TYPE_READ_TARGET, + FCOE_TASK_TYPE_RSP, + FCOE_TASK_TYPE_RSP_SENSE_DATA, + FCOE_TASK_TYPE_ABTS_TARGET, + FCOE_TASK_TYPE_ENUM_SIZE, + MAX_FCOE_TASK_TYPE +}; + +/* Per PF FCoE transmit path statistics - pStorm RAM structure */ struct fcoe_tx_stat { struct regpair fcoe_tx_byte_cnt; struct regpair fcoe_tx_data_pkt_cnt; @@ -618,51 +690,55 @@ struct fcoe_tx_stat { struct regpair fcoe_tx_other_pkt_cnt; }; +/* FCoE SQ/XferQ element */ struct fcoe_wqe { __le16 task_id; __le16 flags; -#define FCOE_WQE_REQ_TYPE_MASK 0xF -#define FCOE_WQE_REQ_TYPE_SHIFT 0 -#define FCOE_WQE_SGL_MODE_MASK 0x1 -#define FCOE_WQE_SGL_MODE_SHIFT 4 -#define FCOE_WQE_CONTINUATION_MASK 0x1 -#define FCOE_WQE_CONTINUATION_SHIFT 5 -#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 -#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 -#define FCOE_WQE_RESERVED_MASK 0x1 -#define FCOE_WQE_RESERVED_SHIFT 7 -#define FCOE_WQE_NUM_SGES_MASK 0xF -#define FCOE_WQE_NUM_SGES_SHIFT 8 -#define FCOE_WQE_RESERVED1_MASK 0xF -#define FCOE_WQE_RESERVED1_SHIFT 12 +#define FCOE_WQE_REQ_TYPE_MASK 0xF +#define FCOE_WQE_REQ_TYPE_SHIFT 0 +#define FCOE_WQE_SGL_MODE_MASK 0x1 +#define FCOE_WQE_SGL_MODE_SHIFT 4 +#define FCOE_WQE_CONTINUATION_MASK 0x1 +#define FCOE_WQE_CONTINUATION_SHIFT 5 +#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 +#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 +#define FCOE_WQE_RESERVED_MASK 0x1 +#define FCOE_WQE_RESERVED_SHIFT 7 +#define FCOE_WQE_NUM_SGES_MASK 0xF +#define FCOE_WQE_NUM_SGES_SHIFT 8 +#define FCOE_WQE_RESERVED1_MASK 0xF +#define FCOE_WQE_RESERVED1_SHIFT 12 union fcoe_additional_info_union additional_info_union; }; +/* FCoE XFRQ element */ struct xfrqe_prot_flags { u8 flags; -#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 -#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 -#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 -#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 -#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 -#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 +#define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 +#define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 +#define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 +#define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 }; +/* FCoE doorbell data */ struct fcoe_db_data { u8 params; -#define FCOE_DB_DATA_DEST_MASK 0x3 -#define FCOE_DB_DATA_DEST_SHIFT 0 -#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 -#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 -#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 -#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 -#define FCOE_DB_DATA_RESERVED_MASK 0x1 -#define FCOE_DB_DATA_RESERVED_SHIFT 5 -#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define FCOE_DB_DATA_DEST_MASK 0x3 +#define FCOE_DB_DATA_DEST_SHIFT 0 +#define FCOE_DB_DATA_AGG_CMD_MASK 0x3 +#define FCOE_DB_DATA_AGG_CMD_SHIFT 2 +#define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define FCOE_DB_DATA_RESERVED_MASK 0x1 +#define FCOE_DB_DATA_RESERVED_SHIFT 5 +#define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; + #endif /* __FCOE_COMMON__ */ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 85e086cba639..4cc9b37b8d95 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -32,47 +32,48 @@ #ifndef __ISCSI_COMMON__ #define __ISCSI_COMMON__ + /**********************/ /* ISCSI FW CONSTANTS */ /**********************/ /* iSCSI HSI constants */ -#define ISCSI_DEFAULT_MTU (1500) +#define ISCSI_DEFAULT_MTU (1500) /* KWQ (kernel work queue) layer codes */ -#define ISCSI_SLOW_PATH_LAYER_CODE (6) +#define ISCSI_SLOW_PATH_LAYER_CODE (6) /* iSCSI parameter defaults */ -#define ISCSI_DEFAULT_HEADER_DIGEST (0) -#define ISCSI_DEFAULT_DATA_DIGEST (0) -#define ISCSI_DEFAULT_INITIAL_R2T (1) -#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) -#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) -#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) -#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) -#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) +#define ISCSI_DEFAULT_HEADER_DIGEST (0) +#define ISCSI_DEFAULT_DATA_DIGEST (0) +#define ISCSI_DEFAULT_INITIAL_R2T (1) +#define ISCSI_DEFAULT_IMMEDIATE_DATA (1) +#define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) +#define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) +#define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) +#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) /* iSCSI parameter limits */ -#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) -#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) -#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) -#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) -#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) +#define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) +#define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_BURST_LENGTH (0x200) +#define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) +#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) +#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) -#define ISCSI_AHS_CNTL_SIZE 4 +#define ISCSI_AHS_CNTL_SIZE 4 -#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) +#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) /* iSCSI reserved params */ #define ISCSI_ITT_ALL_ONES (0xffffffff) #define ISCSI_TTT_ALL_ONES (0xffffffff) -#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 -#define ISCSI_OPTION_2_ON_CHIP_TCP 2 +#define ISCSI_OPTION_1_OFF_CHIP_TCP 1 +#define ISCSI_OPTION_2_ON_CHIP_TCP 2 -#define ISCSI_INITIATOR_MODE 0 -#define ISCSI_TARGET_MODE 1 +#define ISCSI_INITIATOR_MODE 0 +#define ISCSI_TARGET_MODE 1 /* iSCSI request op codes */ #define ISCSI_OPCODE_NOP_OUT (0) @@ -84,41 +85,48 @@ #define ISCSI_OPCODE_LOGOUT_REQUEST (6) /* iSCSI response/messages op codes */ -#define ISCSI_OPCODE_NOP_IN (0x20) -#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) -#define ISCSI_OPCODE_TMF_RESPONSE (0x22) -#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) -#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) -#define ISCSI_OPCODE_DATA_IN (0x25) -#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) -#define ISCSI_OPCODE_R2T (0x31) -#define ISCSI_OPCODE_ASYNC_MSG (0x32) -#define ISCSI_OPCODE_REJECT (0x3f) +#define ISCSI_OPCODE_NOP_IN (0x20) +#define ISCSI_OPCODE_SCSI_RESPONSE (0x21) +#define ISCSI_OPCODE_TMF_RESPONSE (0x22) +#define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) +#define ISCSI_OPCODE_TEXT_RESPONSE (0x24) +#define ISCSI_OPCODE_DATA_IN (0x25) +#define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) +#define ISCSI_OPCODE_R2T (0x31) +#define ISCSI_OPCODE_ASYNC_MSG (0x32) +#define ISCSI_OPCODE_REJECT (0x3f) /* iSCSI stages */ -#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) -#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) -#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) +#define ISCSI_STAGE_SECURITY_NEGOTIATION (0) +#define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) +#define ISCSI_STAGE_FULL_FEATURE_PHASE (3) /* iSCSI CQE errors */ -#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) -#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) +#define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) +#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) + +/* Union of data bd_opaque/ tq_tid */ +union bd_opaque_tq_union { + __le16 bd_opaque; + __le16 tq_tid; +}; +/* ISCSI SGL entry */ struct cqe_error_bitmap { u8 cqe_error_status_bits; -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 -#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 -#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 -#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 -#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 -#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 +#define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 +#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 +#define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 +#define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 +#define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 }; union cqe_error_status { @@ -126,86 +134,133 @@ union cqe_error_status { struct cqe_error_bitmap error_bits; }; +/* iSCSI Login Response PDU header */ struct data_hdr { __le32 data[12]; }; -struct iscsi_async_msg_hdr { - __le16 reserved0; - u8 flags_attr; -#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F -#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 -#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 - u8 opcode; - __le32 hdr_second_dword; -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 all_ones; - __le32 reserved1; - __le32 stat_sn; - __le32 exp_cmd_sn; - __le32 max_cmd_sn; - __le16 param1_rsrv; - u8 async_vcode; - u8 async_event; - __le16 param3_rsrv; - __le16 param2_rsrv; - __le32 reserved7; +struct lun_mapper_addr_reserved { + struct regpair lun_mapper_addr; + u8 reserved0[8]; +}; + +/* rdif conetxt for dif on immediate */ +struct dif_on_immediate_params { + __le32 initial_ref_tag; + __le16 application_tag; + __le16 application_tag_mask; + __le16 flags1; +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF +#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 + u8 flags0; +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 +#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 +#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 + u8 reserved_zero[5]; +}; + +/* iSCSI dif on immediate mode attributes union */ +union dif_configuration_params { + struct lun_mapper_addr_reserved lun_mapper_address; + struct dif_on_immediate_params def_dif_conf; +}; + +/* Union of data/r2t sequence number */ +union iscsi_seq_num { + __le16 data_sn; + __le16 r2t_sn; }; -struct iscsi_cmd_hdr { - __le16 reserved1; - u8 flags_attr; -#define ISCSI_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_CMD_HDR_READ_MASK 0x1 -#define ISCSI_CMD_HDR_READ_SHIFT 6 -#define ISCSI_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_CMD_HDR_FINAL_SHIFT 7 - u8 hdr_first_byte; -#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F -#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 -#define ISCSI_CMD_HDR_IMM_MASK 0x1 -#define ISCSI_CMD_HDR_IMM_SHIFT 6 -#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 -#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 - __le32 hdr_second_dword; -#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 - struct regpair lun; - __le32 itt; - __le32 expected_transfer_length; - __le32 cmd_sn; - __le32 exp_stat_sn; - __le32 cdb[4]; +/* iSCSI DIF flags */ +struct iscsi_dif_flags { + u8 flags; +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF +#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 +#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 +#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 }; +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_state { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 exp_r2t_sn; + __le32 buffer_offset; + union iscsi_seq_num seq_num; + struct iscsi_dif_flags dif_flags; + u8 flags; +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 +#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F +#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 +}; + +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_rxmit_opt { + __le32 fast_rxmit_sge_offset; + __le32 scan_start_buffer_offset; + __le32 fast_rxmit_buffer_offset; + u8 scan_start_sgl_index; + u8 fast_rxmit_sgl_index; + __le16 reserved; +}; + +/* iSCSI Common PDU header */ struct iscsi_common_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 hdr_first_byte; -#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F -#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 -#define ISCSI_COMMON_HDR_IMM_MASK 0x1 -#define ISCSI_COMMON_HDR_IMM_SHIFT 6 -#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 -#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 +#define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F +#define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 +#define ISCSI_COMMON_HDR_IMM_MASK 0x1 +#define ISCSI_COMMON_HDR_IMM_SHIFT 6 +#define ISCSI_COMMON_HDR_RSRV_MASK 0x1 +#define ISCSI_COMMON_HDR_RSRV_SHIFT 7 __le32 hdr_second_dword; -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun_reserved; __le32 itt; __le32 ttt; @@ -215,86 +270,60 @@ struct iscsi_common_hdr { __le32 data[3]; }; -struct iscsi_conn_offload_params { - struct regpair sq_pbl_addr; - struct regpair r2tq_pbl_addr; - struct regpair xhq_pbl_addr; - struct regpair uhq_pbl_addr; - __le32 initial_ack; - __le16 physical_q0; - __le16 physical_q1; - u8 flags; -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F -#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 - u8 pbl_page_size_log; - u8 pbe_page_size_log; - u8 default_cq; - __le32 stat_sn; -}; - -struct iscsi_slow_path_hdr { - u8 op_code; - u8 flags; -#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF -#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 -#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 -#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 -#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 -#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 -}; - -struct iscsi_conn_update_ramrod_params { - struct iscsi_slow_path_hdr hdr; - __le16 conn_id; - __le32 fw_cid; - u8 flags; -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 -#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 - u8 reserved0[3]; - __le32 max_seq_size; - __le32 max_send_pdu_length; - __le32 max_recv_pdu_length; - __le32 first_seq_length; +/* iSCSI Command PDU header */ +struct iscsi_cmd_hdr { + __le16 reserved1; + u8 flags_attr; +#define ISCSI_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_CMD_HDR_READ_MASK 0x1 +#define ISCSI_CMD_HDR_READ_SHIFT 6 +#define ISCSI_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_CMD_HDR_FINAL_SHIFT 7 + u8 hdr_first_byte; +#define ISCSI_CMD_HDR_OPCODE_MASK 0x3F +#define ISCSI_CMD_HDR_OPCODE_SHIFT 0 +#define ISCSI_CMD_HDR_IMM_MASK 0x1 +#define ISCSI_CMD_HDR_IMM_SHIFT 6 +#define ISCSI_CMD_HDR_RSRV1_MASK 0x1 +#define ISCSI_CMD_HDR_RSRV1_SHIFT 7 + __le32 hdr_second_dword; +#define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 itt; + __le32 expected_transfer_length; + __le32 cmd_sn; __le32 exp_stat_sn; + __le32 cdb[4]; }; +/* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ struct iscsi_ext_cdb_cmd_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 -#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 -#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 -#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 -#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 +#define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 +#define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 +#define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 +#define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF -#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF +#define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 struct regpair lun; __le32 itt; __le32 expected_transfer_length; @@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr { struct scsi_sge cdb_sge; }; +/* iSCSI login request PDU header */ struct iscsi_login_req_hdr { u8 version_min; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -334,6 +364,7 @@ struct iscsi_login_req_hdr { __le32 reserved2[4]; }; +/* iSCSI logout request PDU header */ struct iscsi_logout_req_hdr { __le16 reserved0; u8 reason_code; @@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr { __le32 reserved4[4]; }; +/* iSCSI Data-out PDU header */ struct iscsi_data_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -368,22 +400,23 @@ struct iscsi_data_out_hdr { __le32 reserved5; }; +/* iSCSI Data-in PDU header */ struct iscsi_data_in_hdr { u8 status_rsvd; u8 reserved1; u8 flags; -#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 -#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 -#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 -#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 -#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 -#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 -#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 -#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 -#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 +#define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 +#define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 +#define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 +#define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 +#define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 +#define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 +#define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 +#define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 +#define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -397,6 +430,7 @@ struct iscsi_data_in_hdr { __le32 residual_count; }; +/* iSCSI R2T PDU header */ struct iscsi_r2t_hdr { u8 reserved0[3]; u8 opcode; @@ -412,13 +446,14 @@ struct iscsi_r2t_hdr { __le32 desired_data_trns_len; }; +/* iSCSI NOP-out PDU header */ struct iscsi_nop_out_hdr { __le16 reserved1; u8 flags_attr; -#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 u8 opcode; __le32 reserved2; struct regpair lun; @@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr { __le32 reserved6; }; +/* iSCSI NOP-in PDU header */ struct iscsi_nop_in_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F -#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 -#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 -#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 +#define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F +#define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 +#define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 +#define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr { __le32 reserved7; }; +/* iSCSI Login Response PDU header */ struct iscsi_login_response_hdr { u8 version_active; u8 version_max; u8 flags_attr; -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 -#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 -#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 -#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 +#define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 +#define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 +#define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 isid_tabc; __le16 tsih; __le16 isid_d; @@ -490,16 +527,17 @@ struct iscsi_login_response_hdr { __le32 reserved4[2]; }; +/* iSCSI Logout Response PDU header */ struct iscsi_logout_response_hdr { u8 reserved1; u8 response; u8 flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 __le32 reserved2[2]; __le32 itt; __le32 reserved3; @@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr { __le32 reserved5[1]; }; +/* iSCSI Text Request PDU header */ struct iscsi_text_request_hdr { __le16 reserved0; u8 flags_attr; -#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 -#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 -#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 +#define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 +#define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 +#define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -535,21 +574,22 @@ struct iscsi_text_request_hdr { __le32 reserved4[4]; }; +/* iSCSI Text Response PDU header */ struct iscsi_text_response_hdr { __le16 reserved1; u8 flags; -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F -#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 -#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 -#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F +#define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 +#define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 +#define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 ttt; @@ -559,15 +599,16 @@ struct iscsi_text_response_hdr { __le32 reserved4[3]; }; +/* iSCSI TMF Request PDU header */ struct iscsi_tmf_request_hdr { __le16 reserved0; u8 function; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 rtt; @@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr { u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 itt; __le32 reserved1; @@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr { __le32 reserved4[3]; }; +/* iSCSI Response PDU header */ struct iscsi_response_hdr { u8 hdr_status; u8 hdr_response; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair lun; __le32 itt; __le32 snack_tag; @@ -618,16 +660,17 @@ struct iscsi_response_hdr { __le32 residual_count; }; +/* iSCSI Reject PDU header */ struct iscsi_reject_hdr { u8 reserved4; u8 hdr_reason; u8 hdr_flags; u8 opcode; __le32 hdr_second_dword; -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF -#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF -#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 struct regpair reserved0; __le32 all_ones; __le32 reserved2; @@ -638,6 +681,35 @@ struct iscsi_reject_hdr { __le32 reserved3[2]; }; +/* iSCSI Asynchronous Message PDU header */ +struct iscsi_async_msg_hdr { + __le16 reserved0; + u8 flags_attr; +#define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F +#define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 +#define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 + u8 opcode; + __le32 hdr_second_dword; +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF +#define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF +#define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 + struct regpair lun; + __le32 all_ones; + __le32 reserved1; + __le32 stat_sn; + __le32 exp_cmd_sn; + __le32 max_cmd_sn; + __le16 param1_rsrv; + u8 async_vcode; + u8 async_event; + __le16 param3_rsrv; + __le16 param2_rsrv; + __le32 reserved7; +}; + +/* PDU header part of Ystorm task context */ union iscsi_task_hdr { struct iscsi_common_hdr common; struct data_hdr data; @@ -661,6 +733,348 @@ union iscsi_task_hdr { struct iscsi_async_msg_hdr async_msg; }; +/* The iscsi storm task context of Ystorm */ +struct ystorm_iscsi_task_st_ctx { + struct ystorm_iscsi_task_state state; + struct ystorm_iscsi_task_rxmit_opt rxmit_opt; + union iscsi_task_hdr pdu_hdr; +}; + +struct e4_ystorm_iscsi_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 word0; + u8 flags0; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 TTT; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct e4_mstorm_iscsi_task_ag_ctx { + u8 cdu_validation; + u8 byte1; + __le16 task_cid; + u8 flags0; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 + u8 flags1; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 byte2; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word1; +}; + +struct e4_ustorm_iscsi_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 + u8 flags1; +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 + u8 flags3; +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 rcv_cont_len; + __le32 exp_cont_len; + __le32 total_data_acked; + __le32 exp_data_acked; + u8 next_tid_valid; + u8 byte3; + __le16 word1; + __le16 next_tid; + __le16 word3; + __le32 hdr_residual_count; + __le32 exp_r2t_sn; +}; + +/* The iscsi storm task context of Mstorm */ +struct mstorm_iscsi_task_st_ctx { + struct scsi_cached_sges data_desc; + struct scsi_sgl_params sgl_params; + __le32 rem_task_size; + __le32 data_buffer_offset; + u8 task_type; + struct iscsi_dif_flags dif_flags; + __le16 dif_task_icid; + struct regpair sense_db; + __le32 expected_itt; + __le32 reserved1; +}; + +struct iscsi_reg1 { + __le32 reg1_map; +#define ISCSI_REG1_NUM_SGES_MASK 0xF +#define ISCSI_REG1_NUM_SGES_SHIFT 0 +#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF +#define ISCSI_REG1_RESERVED1_SHIFT 4 +}; + +struct tqe_opaque { + __le16 opaque[2]; +}; + +/* The iscsi storm task context of Ustorm */ +struct ustorm_iscsi_task_st_ctx { + __le32 rem_rcv_len; + __le32 exp_data_transfer_len; + __le32 exp_data_sn; + struct regpair lun; + struct iscsi_reg1 reg1; + u8 flags2; +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 + struct iscsi_dif_flags dif_flags; + __le16 reserved3; + struct tqe_opaque tqe_opaque_list; + __le32 reserved5; + __le32 reserved6; + __le32 reserved7; + u8 task_type; + u8 error_flags; +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 + u8 flags; +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 +#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 +#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 + u8 cq_rss_number; +}; + +/* iscsi task context */ +struct e4_iscsi_task_context { + struct ystorm_iscsi_task_st_ctx ystorm_st_context; + struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; + struct regpair ystorm_ag_padding[2]; + struct tdif_task_context tdif_context; + struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; + struct regpair mstorm_ag_padding[2]; + struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; + struct mstorm_iscsi_task_st_ctx mstorm_st_context; + struct ustorm_iscsi_task_st_ctx ustorm_st_context; + struct rdif_task_context rdif_context; +}; + +/* iSCSI connection offload params passed by driver to FW in ISCSI offload + * ramrod. + */ +struct iscsi_conn_offload_params { + struct regpair sq_pbl_addr; + struct regpair r2tq_pbl_addr; + struct regpair xhq_pbl_addr; + struct regpair uhq_pbl_addr; + __le32 initial_ack; + __le16 physical_q0; + __le16 physical_q1; + u8 flags; +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F +#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 + u8 pbl_page_size_log; + u8 pbe_page_size_log; + u8 default_cq; + __le32 stat_sn; +}; + +/* iSCSI connection statistics */ +struct iscsi_conn_stats_params { + struct regpair iscsi_tcp_tx_packets_cnt; + struct regpair iscsi_tcp_tx_bytes_cnt; + struct regpair iscsi_tcp_tx_rxmit_cnt; + struct regpair iscsi_tcp_rx_packets_cnt; + struct regpair iscsi_tcp_rx_bytes_cnt; + struct regpair iscsi_tcp_rx_dup_ack_cnt; + __le32 iscsi_tcp_rx_chksum_err_cnt; + __le32 reserved; +}; + +/* spe message header */ +struct iscsi_slow_path_hdr { + u8 op_code; + u8 flags; +#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF +#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 +#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 +#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 +}; + +/* iSCSI connection update params passed by driver to FW in ISCSI update + *ramrod. + */ +struct iscsi_conn_update_ramrod_params { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 flags; +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 +#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 + u8 reserved0[3]; + __le32 max_seq_size; + __le32 max_send_pdu_length; + __le32 max_recv_pdu_length; + __le32 first_seq_length; + __le32 exp_stat_sn; + union dif_configuration_params dif_on_imme_params; +}; + +/* iSCSI CQ element */ struct iscsi_cqe_common { __le16 conn_id; u8 cqe_type; @@ -669,6 +1083,7 @@ struct iscsi_cqe_common { union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ struct iscsi_cqe_solicited { __le16 conn_id; u8 cqe_type; @@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited { u8 fw_dbg_field; u8 caused_conn_err; u8 reserved0[3]; - __le32 reserved1[1]; + __le32 data_truncated_bytes; union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ struct iscsi_cqe_unsolicited { __le16 conn_id; u8 cqe_type; @@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited { __le16 reserved0; u8 reserved1; u8 unsol_cqe_type; - struct regpair rqe_opaque; + __le16 rqe_opaque; + __le16 reserved2[3]; union iscsi_task_hdr iscsi_hdr; }; +/* iSCSI CQ element */ union iscsi_cqe { struct iscsi_cqe_common cqe_common; struct iscsi_cqe_solicited cqe_solicited; struct iscsi_cqe_unsolicited cqe_unsolicited; }; +/* iSCSI CQE type */ enum iscsi_cqes_type { ISCSI_CQE_TYPE_SOLICITED = 1, ISCSI_CQE_TYPE_UNSOLICITED, @@ -708,6 +1127,7 @@ enum iscsi_cqes_type { MAX_ISCSI_CQES_TYPE }; +/* iSCSI CQE type */ enum iscsi_cqe_unsolicited_type { ISCSI_CQE_UNSOLICITED_NONE, ISCSI_CQE_UNSOLICITED_SINGLE, @@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type { MAX_ISCSI_CQE_UNSOLICITED_TYPE }; - +/* iscsi debug modes */ struct iscsi_debug_modes { u8 flags; -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 -#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 -}; - -struct iscsi_dif_flags { - u8 flags; -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF -#define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 -#define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 -#define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 -}; - +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 +#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 +}; + +/* iSCSI kernel completion queue IDs */ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_INIT_FUNC = 0, ISCSI_EVENT_TYPE_DESTROY_FUNC, @@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode { ISCSI_EVENT_TYPE_CLEAR_SQ, ISCSI_EVENT_TYPE_TERMINATE_CONN, ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, + ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, - RESERVED9, ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, @@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode { MAX_ISCSI_EQE_OPCODE }; +/* iSCSI EQE and CQE completion status */ enum iscsi_error_types { ISCSI_STATUS_NONE = 0, ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, @@ -823,7 +1235,7 @@ enum iscsi_error_types { MAX_ISCSI_ERROR_TYPES }; - +/* iSCSI Ramrod Command IDs */ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_UNUSED = 0, ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, @@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id { ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, + ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, MAX_ISCSI_RAMROD_CMD_ID }; -struct iscsi_reg1 { - __le32 reg1_map; -#define ISCSI_REG1_NUM_SGES_MASK 0xF -#define ISCSI_REG1_NUM_SGES_SHIFT 0 -#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF -#define ISCSI_REG1_RESERVED1_SHIFT 4 -}; - -union iscsi_seq_num { - __le16 data_sn; - __le16 r2t_sn; -}; - +/* iSCSI connection termination request */ struct iscsi_spe_conn_mac_update { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update { u8 reserved0[2]; }; +/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ struct iscsi_spe_conn_offload { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload { struct tcp_offload_params tcp; }; +/* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in + * iSCSI offload ramrod. + */ struct iscsi_spe_conn_offload_option2 { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 { struct tcp_offload_params_opt2 tcp; }; +/* iSCSI collect connection statistics request */ +struct iscsi_spe_conn_statistics { + struct iscsi_slow_path_hdr hdr; + __le16 conn_id; + __le32 fw_cid; + u8 reset_stats; + u8 reserved0[7]; + struct regpair stats_cnts_addr; +}; + +/* iSCSI connection termination request */ struct iscsi_spe_conn_termination { struct iscsi_slow_path_hdr hdr; __le16 conn_id; @@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination { struct regpair query_params_addr; }; +/* iSCSI firmware function destroy parameters */ struct iscsi_spe_func_dstry { struct iscsi_slow_path_hdr hdr; __le16 reserved0; __le32 reserved1; }; +/* iSCSI firmware function init parameters */ struct iscsi_spe_func_init { struct iscsi_slow_path_hdr hdr; __le16 half_way_close_timeout; @@ -898,283 +1318,19 @@ struct iscsi_spe_func_init { u8 num_r2tq_pages_in_ring; u8 num_uhq_pages_in_ring; u8 ll2_rx_queue_id; - u8 ooo_enable; + u8 flags; +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 +#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 +#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F +#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 struct iscsi_debug_modes debug_mode; __le16 reserved1; __le32 reserved2; - __le32 reserved3; - __le32 reserved4; struct scsi_init_func_params func_params; struct scsi_init_func_queues q_params; }; -struct ystorm_iscsi_task_state { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 exp_r2t_sn; - __le32 buffer_offset; - union iscsi_seq_num seq_num; - struct iscsi_dif_flags dif_flags; - u8 flags; -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 -#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F -#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 -}; - -struct ystorm_iscsi_task_rxmit_opt { - __le32 fast_rxmit_sge_offset; - __le32 scan_start_buffer_offset; - __le32 fast_rxmit_buffer_offset; - u8 scan_start_sgl_index; - u8 fast_rxmit_sgl_index; - __le16 reserved; -}; - -struct ystorm_iscsi_task_st_ctx { - struct ystorm_iscsi_task_state state; - struct ystorm_iscsi_task_rxmit_opt rxmit_opt; - union iscsi_task_hdr pdu_hdr; -}; - -struct ystorm_iscsi_task_ag_ctx { - u8 reserved; - u8 byte1; - __le16 word0; - u8 flags0; -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 - u8 flags1; -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 -#define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 TTT; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct mstorm_iscsi_task_ag_ctx { - u8 cdu_validation; - u8 byte1; - __le16 task_cid; - u8 flags0; -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 - u8 flags1; -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 - u8 flags2; -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 - u8 byte2; - __le32 reg0; - u8 byte3; - u8 byte4; - __le16 word1; -}; - -struct ustorm_iscsi_task_ag_ctx { - u8 reserved; - u8 state; - __le16 icid; - u8 flags0; -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 - u8 flags1; -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 - u8 flags2; -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 - u8 flags3; -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 -#define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF -#define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 - __le32 dif_err_intervals; - __le32 dif_error_1st_interval; - __le32 rcv_cont_len; - __le32 exp_cont_len; - __le32 total_data_acked; - __le32 exp_data_acked; - u8 next_tid_valid; - u8 byte3; - __le16 word1; - __le16 next_tid; - __le16 word3; - __le32 hdr_residual_count; - __le32 exp_r2t_sn; -}; - -struct mstorm_iscsi_task_st_ctx { - struct scsi_cached_sges data_desc; - struct scsi_sgl_params sgl_params; - __le32 rem_task_size; - __le32 data_buffer_offset; - u8 task_type; - struct iscsi_dif_flags dif_flags; - u8 reserved0[2]; - struct regpair sense_db; - __le32 expected_itt; - __le32 reserved1; -}; - -struct ustorm_iscsi_task_st_ctx { - __le32 rem_rcv_len; - __le32 exp_data_transfer_len; - __le32 exp_data_sn; - struct regpair lun; - struct iscsi_reg1 reg1; - u8 flags2; -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 - struct iscsi_dif_flags dif_flags; - __le16 reserved3; - __le32 reserved4; - __le32 reserved5; - __le32 reserved6; - __le32 reserved7; - u8 task_type; - u8 error_flags; -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 - u8 flags; -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 -#define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 -#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 - u8 cq_rss_number; -}; - -struct iscsi_task_context { - struct ystorm_iscsi_task_st_ctx ystorm_st_context; - struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; - struct regpair ystorm_ag_padding[2]; - struct tdif_task_context tdif_context; - struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; - struct regpair mstorm_ag_padding[2]; - struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; - struct mstorm_iscsi_task_st_ctx mstorm_st_context; - struct ustorm_iscsi_task_st_ctx ustorm_st_context; - struct rdif_task_context rdif_context; -}; - +/* iSCSI task type */ enum iscsi_task_type { ISCSI_TASK_TYPE_INITIATOR_WRITE, ISCSI_TASK_TYPE_INITIATOR_READ, @@ -1186,53 +1342,57 @@ enum iscsi_task_type { ISCSI_TASK_TYPE_TARGET_READ, ISCSI_TASK_TYPE_TARGET_RESPONSE, ISCSI_TASK_TYPE_LOGIN_RESPONSE, + ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, MAX_ISCSI_TASK_TYPE }; +/* iSCSI DesiredDataTransferLength/ttt union */ union iscsi_ttt_txlen_union { __le32 desired_tx_len; __le32 ttt; }; +/* iSCSI uHQ element */ struct iscsi_uhqe { __le32 reg1; -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF -#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 -#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 -#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 -#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 -#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 -#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 -#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 -#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 -#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF +#define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 +#define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 +#define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 +#define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 +#define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 +#define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 +#define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 +#define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 +#define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 __le32 reg2; -#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF -#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 -#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF -#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 +#define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF +#define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 +#define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF +#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 }; - +/* iSCSI WQ element */ struct iscsi_wqe { __le16 task_id; u8 flags; -#define ISCSI_WQE_WQE_TYPE_MASK 0x7 -#define ISCSI_WQE_WQE_TYPE_SHIFT 0 -#define ISCSI_WQE_NUM_SGES_MASK 0xF -#define ISCSI_WQE_NUM_SGES_SHIFT 3 -#define ISCSI_WQE_RESPONSE_MASK 0x1 -#define ISCSI_WQE_RESPONSE_SHIFT 7 +#define ISCSI_WQE_WQE_TYPE_MASK 0x7 +#define ISCSI_WQE_WQE_TYPE_SHIFT 0 +#define ISCSI_WQE_NUM_SGES_MASK 0xF +#define ISCSI_WQE_NUM_SGES_SHIFT 3 +#define ISCSI_WQE_RESPONSE_MASK 0x1 +#define ISCSI_WQE_RESPONSE_SHIFT 7 struct iscsi_dif_flags prot_flags; __le32 contlen_cdbsize; -#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF -#define ISCSI_WQE_CONT_LEN_SHIFT 0 -#define ISCSI_WQE_CDB_SIZE_MASK 0xFF -#define ISCSI_WQE_CDB_SIZE_SHIFT 24 +#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF +#define ISCSI_WQE_CONT_LEN_SHIFT 0 +#define ISCSI_WQE_CDB_SIZE_MASK 0xFF +#define ISCSI_WQE_CDB_SIZE_SHIFT 24 }; +/* iSCSI wqe type */ enum iscsi_wqe_type { ISCSI_WQE_TYPE_NORMAL, ISCSI_WQE_TYPE_TASK_CLEANUP, @@ -1244,6 +1404,7 @@ enum iscsi_wqe_type { MAX_ISCSI_WQE_TYPE }; +/* iSCSI xHQ element */ struct iscsi_xhqe { union iscsi_ttt_txlen_union ttt_or_txlen; __le32 exp_stat_sn; @@ -1251,120 +1412,134 @@ struct iscsi_xhqe { u8 total_ahs_length; u8 opcode; u8 flags; -#define ISCSI_XHQE_FINAL_MASK 0x1 -#define ISCSI_XHQE_FINAL_SHIFT 0 -#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 -#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 -#define ISCSI_XHQE_NUM_SGES_MASK 0xF -#define ISCSI_XHQE_NUM_SGES_SHIFT 2 -#define ISCSI_XHQE_RESERVED0_MASK 0x3 -#define ISCSI_XHQE_RESERVED0_SHIFT 6 +#define ISCSI_XHQE_FINAL_MASK 0x1 +#define ISCSI_XHQE_FINAL_SHIFT 0 +#define ISCSI_XHQE_STATUS_BIT_MASK 0x1 +#define ISCSI_XHQE_STATUS_BIT_SHIFT 1 +#define ISCSI_XHQE_NUM_SGES_MASK 0xF +#define ISCSI_XHQE_NUM_SGES_SHIFT 2 +#define ISCSI_XHQE_RESERVED0_MASK 0x3 +#define ISCSI_XHQE_RESERVED0_SHIFT 6 union iscsi_seq_num seq_num; __le16 reserved1; }; +/* Per PF iSCSI receive path statistics - mStorm RAM structure */ struct mstorm_iscsi_stats_drv { struct regpair iscsi_rx_dropped_pdus_task_not_valid; + struct regpair iscsi_rx_dup_ack_cnt; }; +/* Per PF iSCSI transmit path statistics - pStorm RAM structure */ struct pstorm_iscsi_stats_drv { struct regpair iscsi_tx_bytes_cnt; struct regpair iscsi_tx_packet_cnt; }; +/* Per PF iSCSI receive path statistics - tStorm RAM structure */ struct tstorm_iscsi_stats_drv { struct regpair iscsi_rx_bytes_cnt; struct regpair iscsi_rx_packet_cnt; struct regpair iscsi_rx_new_ooo_isle_events_cnt; + struct regpair iscsi_rx_tcp_payload_bytes_cnt; + struct regpair iscsi_rx_tcp_pkt_cnt; + struct regpair iscsi_rx_pure_ack_cnt; __le32 iscsi_cmdq_threshold_cnt; __le32 iscsi_rq_threshold_cnt; __le32 iscsi_immq_threshold_cnt; }; +/* Per PF iSCSI receive path statistics - uStorm RAM structure */ struct ustorm_iscsi_stats_drv { struct regpair iscsi_rx_data_pdu_cnt; struct regpair iscsi_rx_r2t_pdu_cnt; struct regpair iscsi_rx_total_pdu_cnt; }; +/* Per PF iSCSI transmit path statistics - xStorm RAM structure */ struct xstorm_iscsi_stats_drv { struct regpair iscsi_tx_go_to_slow_start_event_cnt; struct regpair iscsi_tx_fast_retransmit_event_cnt; + struct regpair iscsi_tx_pure_ack_cnt; + struct regpair iscsi_tx_delayed_ack_cnt; }; +/* Per PF iSCSI transmit path statistics - yStorm RAM structure */ struct ystorm_iscsi_stats_drv { struct regpair iscsi_tx_data_pdu_cnt; struct regpair iscsi_tx_r2t_pdu_cnt; struct regpair iscsi_tx_total_pdu_cnt; + struct regpair iscsi_tx_tcp_payload_bytes_cnt; + struct regpair iscsi_tx_tcp_pkt_cnt; }; -struct tstorm_iscsi_task_ag_ctx { +struct e4_tstorm_iscsi_task_ag_ctx { u8 byte0; u8 byte1; __le16 word0; u8 flags0; -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF -#define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 u8 flags1; -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 u8 flags2; -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 u8 flags3; -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 u8 flags4; -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 -#define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 u8 byte2; __le16 word1; __le32 reg0; @@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx { __le32 reg1; __le32 reg2; }; + +/* iSCSI doorbell data */ struct iscsi_db_data { u8 params; -#define ISCSI_DB_DATA_DEST_MASK 0x3 -#define ISCSI_DB_DATA_DEST_SHIFT 0 -#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 -#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 -#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 -#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 -#define ISCSI_DB_DATA_RESERVED_MASK 0x1 -#define ISCSI_DB_DATA_RESERVED_SHIFT 5 -#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 -#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 +#define ISCSI_DB_DATA_DEST_MASK 0x3 +#define ISCSI_DB_DATA_DEST_SHIFT 0 +#define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 +#define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 +#define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 +#define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 +#define ISCSI_DB_DATA_RESERVED_MASK 0x1 +#define ISCSI_DB_DATA_RESERVED_SHIFT 5 +#define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 u8 agg_flags; __le16 sq_prod; }; diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index b8b3e1cfae90..c6cfd39cd910 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -29,9 +29,12 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ + #ifndef __IWARP_COMMON__ #define __IWARP_COMMON__ + #include <linux/qed/rdma_common.h> + /************************/ /* IWARP FW CONSTANTS */ /************************/ @@ -40,14 +43,14 @@ #define IWARP_PASSIVE_MODE 1 #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) -#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) -#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) -#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) -#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) +#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) +#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) -#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) -#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) +#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) +#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) -#define IWARP_MAX_QPS (64 * 1024) +#define IWARP_MAX_QPS (64 * 1024) #endif /* __IWARP_COMMON__ */ diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d60de4a39810..147d08ccf813 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -61,6 +61,35 @@ struct qed_txq_start_ret_params { void *p_handle; }; +enum qed_filter_config_mode { + QED_FILTER_CONFIG_MODE_DISABLE, + QED_FILTER_CONFIG_MODE_5_TUPLE, + QED_FILTER_CONFIG_MODE_L4_PORT, + QED_FILTER_CONFIG_MODE_IP_DEST, +}; + +struct qed_ntuple_filter_params { + /* Physically mapped address containing header of buffer to be used + * as filter. + */ + dma_addr_t addr; + + /* Length of header in bytes */ + u16 length; + + /* Relative queue-id to receive classified packet */ +#define QED_RFS_NTUPLE_QID_RSS ((u16)-1) + u16 qid; + + /* Identifier can either be according to vport-id or vfid */ + bool b_is_vf; + u8 vport_id; + u8 vf_id; + + /* true iff this filter is to be added. Else to be removed */ + bool b_is_add; +}; + struct qed_dev_eth_info { struct qed_dev_info common; @@ -316,13 +345,12 @@ struct qed_eth_ops { int (*tunn_config)(struct qed_dev *cdev, struct qed_tunn_params *params); - int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, - dma_addr_t mapping, u16 length, - u16 vport_id, u16 rx_queue_id, - bool add_filter); + int (*ntuple_filter_config)(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params); int (*configure_arfs_searcher)(struct qed_dev *cdev, - bool en_searcher); + enum qed_filter_config_mode mode); int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); }; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index cc646ca97974..15e398c7230e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -244,16 +244,11 @@ struct qed_fcoe_pf_params { /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; - u64 bdq_pbl_base_addr[2]; - u32 max_cwnd; + u64 bdq_pbl_base_addr[3]; u16 cq_num_entries; u16 cmdq_num_entries; u32 two_msl_timer; - u16 dup_ack_threshold; u16 tx_sws_timer; - u16 min_rto; - u16 min_rto_rt; - u16 max_rto; /* The following parameters are used during HW-init * and these parameters need to be passed as arguments @@ -264,8 +259,8 @@ struct qed_iscsi_pf_params { /* The following parameters are used during protocol-init */ u16 half_way_close_timeout; - u16 bdq_xoff_threshold[2]; - u16 bdq_xon_threshold[2]; + u16 bdq_xoff_threshold[3]; + u16 bdq_xon_threshold[3]; u16 cmdq_xoff_threshold; u16 cmdq_xon_threshold; u16 rq_buffer_size; @@ -281,10 +276,11 @@ struct qed_iscsi_pf_params { u8 gl_cmd_pi; u8 debug_mode; u8 ll2_ooo_queue_id; - u8 ooo_enable; u8 is_target; - u8 bdq_pbl_num_entries[2]; + u8 is_soc_en; + u8 soc_num_of_blocks_log; + u8 bdq_pbl_num_entries[3]; }; struct qed_rdma_pf_params { @@ -316,16 +312,16 @@ enum qed_int_mode { }; struct qed_sb_info { - struct status_block *sb_virt; - dma_addr_t sb_phys; - u32 sb_ack; /* Last given ack */ - u16 igu_sb_id; - void __iomem *igu_addr; - u8 flags; -#define QED_SB_INFO_INIT 0x1 -#define QED_SB_INFO_SETUP 0x2 + struct status_block_e4 *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void __iomem *igu_addr; + u8 flags; +#define QED_SB_INFO_INIT 0x1 +#define QED_SB_INFO_SETUP 0x2 - struct qed_dev *cdev; + struct qed_dev *cdev; }; enum qed_dev_type { @@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) u16 rc = 0; prod = le32_to_cpu(sb_info->sb_virt->prod_index) & - STATUS_BLOCK_PROD_INDEX_MASK; + STATUS_BLOCK_E4_PROD_INDEX_MASK; if (sb_info->sb_ack != prod) { sb_info->sb_ack = prod; rc |= QED_SB_IDX; diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 111e606a74c8..d0df1bec5357 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -102,7 +102,6 @@ struct qed_iscsi_params_offload { u32 ss_thresh; u16 srtt; u16 rtt_var; - u32 ts_time; u32 ts_recent; u32 ts_recent_age; u32 total_rt; @@ -124,7 +123,6 @@ struct qed_iscsi_params_offload { u16 mss; u8 snd_wnd_scale; u8 rcv_wnd_scale; - u32 ts_ticks_per_second; u16 da_timeout_value; u8 ack_frequency; }; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index e755954d85fd..266c1fb45387 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data { u32 opaque_data_1; /* GSI only */ - u32 gid_dst[4]; + u32 src_qp; u16 qp_id; union { diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index a9b3050f469c..c1a446ebe362 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -32,28 +32,29 @@ #ifndef __RDMA_COMMON__ #define __RDMA_COMMON__ + /************************/ /* RDMA FW CONSTANTS */ /************************/ -#define RDMA_RESERVED_LKEY (0) -#define RDMA_RING_PAGE_SIZE (0x1000) +#define RDMA_RESERVED_LKEY (0) +#define RDMA_RING_PAGE_SIZE (0x1000) -#define RDMA_MAX_SGE_PER_SQ_WQE (4) -#define RDMA_MAX_SGE_PER_RQ_WQE (4) +#define RDMA_MAX_SGE_PER_SQ_WQE (4) +#define RDMA_MAX_SGE_PER_RQ_WQE (4) #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) -#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) -#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) +#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) +#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) -#define RDMA_MAX_CQS (64 * 1024) -#define RDMA_MAX_TIDS (128 * 1024 - 1) -#define RDMA_MAX_PDS (64 * 1024) +#define RDMA_MAX_CQS (64 * 1024) +#define RDMA_MAX_TIDS (128 * 1024 - 1) +#define RDMA_MAX_PDS (64 * 1024) -#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 -#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB +#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS +#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 +#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index fe6a33e45977..e15e0da71240 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -33,13 +33,18 @@ #ifndef __ROCE_COMMON__ #define __ROCE_COMMON__ -#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) -#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) +/************************/ +/* ROCE FW CONSTANTS */ +/************************/ -#define ROCE_MAX_QPS (32 * 1024) -#define ROCE_DCQCN_NP_MAX_QPS (64) -#define ROCE_DCQCN_RP_MAX_QPS (64) +#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) +#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) +#define ROCE_MAX_QPS (32 * 1024) +#define ROCE_DCQCN_NP_MAX_QPS (64) +#define ROCE_DCQCN_RP_MAX_QPS (64) + +/* Affiliated asynchronous events / errors enumeration */ enum roce_async_events_type { ROCE_ASYNC_EVENT_NONE = 0, ROCE_ASYNC_EVENT_COMM_EST = 1, diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 08df82a096b6..505c0b48a761 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -33,43 +33,77 @@ #ifndef __STORAGE_COMMON__ #define __STORAGE_COMMON__ -#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) -#define BDQ_NUM_RESOURCES (4) - -#define BDQ_ID_RQ (0) -#define BDQ_ID_IMM_DATA (1) -#define BDQ_NUM_IDS (2) - -#define SCSI_NUM_SGES_SLOW_SGL_THR 8 +/*********************/ +/* SCSI CONSTANTS */ +/*********************/ + +#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) +#define BDQ_NUM_RESOURCES (4) + +#define BDQ_ID_RQ (0) +#define BDQ_ID_IMM_DATA (1) +#define BDQ_ID_TQ (2) +#define BDQ_NUM_IDS (3) + +#define SCSI_NUM_SGES_SLOW_SGL_THR 8 + +#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) + +/* SCSI op codes */ +#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) +#define SCSI_OPCODE_READ_10 (0x28) +#define SCSI_OPCODE_WRITE_6 (0x0A) +#define SCSI_OPCODE_WRITE_10 (0x2A) +#define SCSI_OPCODE_WRITE_12 (0xAA) +#define SCSI_OPCODE_WRITE_16 (0x8A) +#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) +#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) +#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) + +/* iSCSI Drv opaque */ +struct iscsi_drv_opaque { + __le16 reserved_zero[3]; + __le16 opaque; +}; -#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) +/* Scsi 2B/8B opaque union */ +union scsi_opaque { + struct regpair fcoe_opaque; + struct iscsi_drv_opaque iscsi_opaque; +}; +/* SCSI buffer descriptor */ struct scsi_bd { struct regpair address; - struct regpair opaque; + union scsi_opaque opaque; }; +/* Scsi Drv BDQ struct */ struct scsi_bdq_ram_drv_data { __le16 external_producer; __le16 reserved0[3]; }; +/* SCSI SGE entry */ struct scsi_sge { struct regpair sge_addr; __le32 sge_len; __le32 reserved; }; +/* Cached SGEs section */ struct scsi_cached_sges { struct scsi_sge sge[4]; }; +/* Scsi Drv CMDQ struct */ struct scsi_drv_cmdq { __le16 cmdq_cons; __le16 reserved0; __le32 reserved1; }; +/* Common SCSI init params passed by driver to FW in function init ramrod */ struct scsi_init_func_params { __le16 num_tasks; u8 log_page_size; @@ -77,6 +111,7 @@ struct scsi_init_func_params { u8 reserved2[12]; }; +/* SCSI RQ/CQ/CMDQ firmware function init parameters */ struct scsi_init_func_queues { struct regpair glbl_q_params_addr; __le16 rq_buffer_size; @@ -84,39 +119,45 @@ struct scsi_init_func_queues { __le16 cmdq_num_entries; u8 bdq_resource_id; u8 q_validity; -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 -#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F -#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 +#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 +#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 + __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; u8 num_queues; u8 queue_relative_offset; u8 cq_sb_pi; u8 cmdq_sb_pi; - __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; - __le16 reserved0; u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; + u8 reserved1; struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; - __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xoff_threshold; + __le16 bdq_xon_threshold[BDQ_NUM_IDS]; __le16 cmdq_xon_threshold; - __le32 reserved1; }; +/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ struct scsi_ram_per_bdq_resource_drv_data { struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; }; +/* SCSI SGL types */ enum scsi_sgl_mode { SCSI_TX_SLOW_SGL, SCSI_FAST_SGL, MAX_SCSI_SGL_MODE }; +/* SCSI SGL parameters */ struct scsi_sgl_params { struct regpair sgl_addr; __le32 sgl_total_length; @@ -126,10 +167,16 @@ struct scsi_sgl_params { u8 reserved; }; +/* SCSI terminate connection params */ struct scsi_terminate_extra_params { __le16 unsolicited_cq_count; __le16 cmdq_count; u8 reserved[4]; }; +/* SCSI Task Queue Element */ +struct scsi_tqe { + __le16 itid; +}; + #endif /* __STORAGE_COMMON__ */ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index dbf7a43c3e1f..4a4845193539 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -33,8 +33,13 @@ #ifndef __TCP_COMMON__ #define __TCP_COMMON__ -#define TCP_INVALID_TIMEOUT_VAL -1 +/********************/ +/* TCP FW CONSTANTS */ +/********************/ +#define TCP_INVALID_TIMEOUT_VAL -1 + +/* OOO opaque data received from LL2 */ struct ooo_opaque { __le32 cid; u8 drop_isle; @@ -43,25 +48,29 @@ struct ooo_opaque { u8 ooo_isle; }; +/* tcp connect mode enum */ enum tcp_connect_mode { TCP_CONNECT_ACTIVE, TCP_CONNECT_PASSIVE, MAX_TCP_CONNECT_MODE }; +/* tcp function init parameters */ struct tcp_init_params { __le32 two_msl_timer; __le16 tx_sws_timer; - u8 maxfinrt; + u8 max_fin_rt; u8 reserved[9]; }; +/* tcp IPv4/IPv6 enum */ enum tcp_ip_version { TCP_IPV4, TCP_IPV6, MAX_TCP_IP_VERSION }; +/* tcp offload parameters */ struct tcp_offload_params { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -70,24 +79,29 @@ struct tcp_offload_params { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - u8 flags; -#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 -#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 + __le16 flags; +#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 +#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F +#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 u8 ip_version; + u8 reserved0[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -99,17 +113,21 @@ struct tcp_offload_params { u8 rcv_wnd_scale; u8 connect_mode; __le16 srtt; - __le32 cwnd; __le32 ss_thresh; - __le16 reserved1; + __le32 rcv_wnd; + __le32 cwnd; u8 ka_max_probe_cnt; u8 dup_ack_theshold; + __le16 reserved1; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 initial_rcv_wnd; __le32 rcv_next; __le32 snd_una; __le32 snd_next; __le32 snd_max; __le32 snd_wnd; - __le32 rcv_wnd; __le32 snd_wl1; __le32 ts_recent; __le32 ts_recent_age; @@ -122,16 +140,13 @@ struct tcp_offload_params { u8 rt_cnt; __le16 rtt_var; __le16 fw_internal; - __le32 ka_timeout; - __le32 ka_interval; - __le32 max_rt_time; - __le32 initial_rcv_wnd; u8 snd_wnd_scale; u8 ack_frequency; __le16 da_timeout_value; - __le32 reserved3[2]; + __le32 reserved3; }; +/* tcp offload parameters */ struct tcp_offload_params_opt2 { __le16 local_mac_addr_lo; __le16 local_mac_addr_mid; @@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 { __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; __le16 vlan_id; - u8 flags; -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 -#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F -#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 + __le16 flags; +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 +#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF +#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 u8 ip_version; + u8 reserved1[3]; __le32 remote_ip[4]; __le32 local_ip[4]; __le32 flow_label; @@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 { __le16 syn_ip_payload_length; __le32 syn_phy_addr_lo; __le32 syn_phy_addr_hi; - __le32 reserved1[22]; + __le32 cwnd; + u8 ka_max_probe_cnt; + u8 reserved2[3]; + __le32 ka_timeout; + __le32 ka_interval; + __le32 max_rt_time; + __le32 reserved3[16]; }; +/* tcp IPv4/IPv6 enum */ enum tcp_seg_placement_event { TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_NEW_ISLE, @@ -177,40 +202,41 @@ enum tcp_seg_placement_event { MAX_TCP_SEG_PLACEMENT_EVENT }; +/* tcp init parameters */ struct tcp_update_params { __le16 flags; -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 -#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 -#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 -#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 -#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 -#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 +#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 +#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 +#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 +#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 +#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 __le16 remote_mac_addr_lo; __le16 remote_mac_addr_mid; __le16 remote_mac_addr_hi; @@ -226,6 +252,7 @@ struct tcp_update_params { u8 reserved1[7]; }; +/* toe upload parameters */ struct tcp_upload_params { __le32 rcv_next; __le32 snd_una; diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 0c5c5f6ae1ec..e724d5a3dd80 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -165,7 +165,41 @@ struct sfp_eeprom_base { char vendor_rev[4]; union { __be16 optical_wavelength; - u8 cable_spec; + __be16 cable_compliance; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_2:6; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 reserved60_2:6; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed passive; + struct { +#if defined __BIG_ENDIAN_BITFIELD + u8 reserved60_4:4; + u8 fc_pi_4_lim:1; + u8 sff8431_lim:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_app_e:1; + u8 reserved61:8; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 sff8431_app_e:1; + u8 fc_pi_4_app_h:1; + u8 sff8431_lim:1; + u8 fc_pi_4_lim:1; + u8 reserved60_4:4; + u8 reserved61:8; +#else +#error Unknown Endian +#endif + } __packed active; } __packed; u8 reserved62; u8 cc_base; diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index ff3642d267f7..6dfda97a6c1a 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h @@ -5,19 +5,15 @@ #include <linux/phy.h> #include <linux/if_ether.h> -enum {EDMAC_LITTLE_ENDIAN}; - struct sh_eth_plat_data { int phy; int phy_irq; - int edmac_endian; phy_interface_t phy_interface; void (*set_mdio_gate)(void *addr); unsigned char mac_addr[ETH_ALEN]; unsigned no_ether_link:1; unsigned ether_link_active_low:1; - unsigned needs_init:1; }; #endif diff --git a/include/linux/tick.h b/include/linux/tick.h index f442d1a42025..7cc35921218e 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -119,6 +119,7 @@ extern void tick_nohz_idle_exit(void); extern void tick_nohz_irq_exit(void); extern ktime_t tick_nohz_get_sleep_length(void); extern unsigned long tick_nohz_get_idle_calls(void); +extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu); extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time); #else /* !CONFIG_NO_HZ_COMMON */ diff --git a/include/linux/timer.h b/include/linux/timer.h index 04af640ea95b..2448f9cc48a3 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -207,9 +207,11 @@ unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); #ifdef CONFIG_HOTPLUG_CPU +int timers_prepare_cpu(unsigned int cpu); int timers_dead_cpu(unsigned int cpu); #else -#define timers_dead_cpu NULL +#define timers_prepare_cpu NULL +#define timers_dead_cpu NULL #endif #endif diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index cb4d92b79cd9..ab30a22ef4fd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1773,6 +1773,8 @@ enum cfg80211_signal_type { * by %parent_bssid. * @parent_bssid: the BSS according to which %parent_tsf is set. This is set to * the BSS that requested the scan in which the beacon/probe was received. + * @chains: bitmask for filled values in @chain_signal. + * @chain_signal: per-chain signal strength of last received BSS in dBm. */ struct cfg80211_inform_bss { struct ieee80211_channel *chan; @@ -1781,6 +1783,8 @@ struct cfg80211_inform_bss { u64 boottime_ns; u64 parent_tsf; u8 parent_bssid[ETH_ALEN] __aligned(2); + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; }; /** @@ -1824,6 +1828,8 @@ struct cfg80211_bss_ies { * that holds the beacon data. @beacon_ies is still valid, of course, and * points to the same data as hidden_beacon_bss->beacon_ies in that case. * @signal: signal strength value (type depends on the wiphy's signal_type) + * @chains: bitmask for filled values in @chain_signal. + * @chain_signal: per-chain signal strength of last received BSS in dBm. * @priv: private area for driver use, has at least wiphy->bss_priv_size bytes */ struct cfg80211_bss { @@ -1842,6 +1848,8 @@ struct cfg80211_bss { u16 capability; u8 bssid[ETH_ALEN]; + u8 chains; + s8 chain_signal[IEEE80211_MAX_CHAINS]; u8 priv[0] __aligned(sizeof(void *)); }; @@ -2021,6 +2029,9 @@ struct cfg80211_disassoc_request { * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask * will be used in ht_capa. Un-supported values will be ignored. * @ht_capa_mask: The bits of ht_capa which are to be used. + * @wep_keys: static WEP keys, if not NULL points to an array of + * CFG80211_MAX_WEP_KEYS WEP keys + * @wep_tx_key: key index (0..3) of the default TX static WEP key */ struct cfg80211_ibss_params { const u8 *ssid; @@ -2037,6 +2048,8 @@ struct cfg80211_ibss_params { int mcast_rate[NUM_NL80211_BANDS]; struct ieee80211_ht_cap ht_capa; struct ieee80211_ht_cap ht_capa_mask; + struct key_params *wep_keys; + int wep_tx_key; }; /** @@ -5575,7 +5588,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * cfg80211_rx_mgmt - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in MHz - * @sig_dbm: signal strength in mBm, or 0 if unknown + * @sig_dbm: signal strength in dBm, or 0 if unknown * @buf: Management frame (header + body) * @len: length of the frame data * @flags: flags, as defined in enum nl80211_rxmgmt_flags @@ -5754,7 +5767,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, * @frame: the frame * @len: length of the frame * @freq: frequency the frame was received on - * @sig_dbm: signal strength in mBm, or 0 if unknown + * @sig_dbm: signal strength in dBm, or 0 if unknown * * Use this function to report to userspace when a beacon was * received. It is not useful to call this when there is no diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 44d96a91e745..34ec321d6a03 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -149,6 +149,7 @@ struct rt6_info { */ struct list_head rt6i_siblings; unsigned int rt6i_nsiblings; + atomic_t rt6i_nh_upper_bound; atomic_t rt6i_ref; @@ -170,10 +171,12 @@ struct rt6_info { u32 rt6i_metric; u32 rt6i_pmtu; /* more non-fragment space at head required */ + int rt6i_nh_weight; unsigned short rt6i_nfheader_len; u8 rt6i_protocol; u8 exception_bucket_flushed:1, - unused:7; + should_flush:1, + unused:6; }; #define for_each_fib6_node_rt_rcu(fn) \ @@ -404,6 +407,7 @@ unsigned int fib6_tables_seq_read(struct net *net); int fib6_tables_dump(struct net *net, struct notifier_block *nb); void fib6_update_sernum(struct rt6_info *rt); +void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt); #ifdef CONFIG_IPV6_MULTIPLE_TABLES int fib6_rules_init(void); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 18e442ea93d8..27d23a65f3cd 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } +static inline bool rt6_qualify_for_ecmp(const struct rt6_info *rt) +{ + return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == + RTF_GATEWAY; +} + void ip6_route_input(struct sk_buff *skb); struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, @@ -165,10 +171,13 @@ struct rt6_rtnl_dump_arg { }; int rt6_dump_route(struct rt6_info *rt, void *p_arg); -void rt6_ifdown(struct net *net, struct net_device *dev); void rt6_mtu_change(struct net_device *dev, unsigned int mtu); void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); +void rt6_sync_up(struct net_device *dev, unsigned int nh_flags); +void rt6_disable_ip(struct net_device *dev, unsigned long event); +void rt6_sync_down_dev(struct net_device *dev, unsigned long event); +void rt6_multipath_rebalance(struct rt6_info *rt); static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) { diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index ff68cf288f9b..eb0bec043c96 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -69,8 +69,7 @@ struct ip_vs_iphdr { }; static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset, - int len, void *buffer, - const struct ip_vs_iphdr *ipvsh) + int len, void *buffer) { return skb_header_pointer(skb, offset, len, buffer); } diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 25be4715578c..9dc1230d789c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -969,6 +969,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk) &inet6_sk(sk)->cork); } +unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst); + int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6); struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index eec143cca1c0..906e90223066 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1552,6 +1552,9 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif); * @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the * driver for a key to indicate that sufficient tailroom must always * be reserved for ICV or MIC, even when HW encryption is enabled. + * @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for + * a TKIP key if it only requires MIC space. Do not set together with + * @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key. */ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0), @@ -1562,6 +1565,7 @@ enum ieee80211_key_flags { IEEE80211_KEY_FLAG_PUT_IV_SPACE = BIT(5), IEEE80211_KEY_FLAG_RX_MGMT = BIT(6), IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7), + IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8), }; /** @@ -1593,8 +1597,8 @@ struct ieee80211_key_conf { u8 icv_len; u8 iv_len; u8 hw_key_idx; - u8 flags; s8 keyidx; + u16 flags; u8 keylen; u8 key[0]; }; @@ -2056,6 +2060,9 @@ struct ieee80211_txq { * The stack will not do fragmentation. * The callback for @set_frag_threshold should be set as well. * + * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on + * TDLS links. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2098,6 +2105,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_TX_FRAG_LIST, IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_SUPPORTS_TX_FRAG, + IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h index 4ed1040bbe4a..73f825732326 100644 --- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h +++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h @@ -13,17 +13,17 @@ const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp; #ifdef CONFIG_NF_CT_PROTO_DCCP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4; #endif #ifdef CONFIG_NF_CT_PROTO_SCTP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4; #endif int nf_conntrack_ipv4_compat_init(void); diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h index 9cd55be95853..effa8dfba68c 100644 --- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h +++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h @@ -4,17 +4,17 @@ extern const struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6; #ifdef CONFIG_NF_CT_PROTO_DCCP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6; #endif #ifdef CONFIG_NF_CT_PROTO_SCTP -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6; #endif #ifdef CONFIG_NF_CT_PROTO_UDPLITE -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6; #endif #include <linux/sysctl.h> diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h new file mode 100644 index 000000000000..adf8db44cf86 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_count.h @@ -0,0 +1,17 @@ +#ifndef _NF_CONNTRACK_COUNT_H +#define _NF_CONNTRACK_COUNT_H + +struct nf_conncount_data; + +struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, + unsigned int keylen); +void nf_conncount_destroy(struct net *net, unsigned int family, + struct nf_conncount_data *data); + +unsigned int nf_conncount_count(struct net *net, + struct nf_conncount_data *data, + const u32 *key, + unsigned int family, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone); +#endif diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 7ef56c13698a..a7220eef9aee 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -27,6 +27,9 @@ struct nf_conntrack_l4proto { /* Resolve clashes on insertion races. */ bool allow_clash; + /* protoinfo nlattr size, closes a hole */ + u16 nlattr_size; + /* Try to fill in the third arg: dataoff is offset past network protocol hdr. Return true if possible. */ bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int dataoff, @@ -66,8 +69,6 @@ struct nf_conntrack_l4proto { /* convert protoinfo to nfnetink attributes */ int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla, struct nf_conn *ct); - /* Calculate protoinfo nlattr size */ - int (*nlattr_size)(void); /* convert nfnetlink attributes to protoinfo */ int (*from_nlattr)(struct nlattr *tb[], struct nf_conn *ct); @@ -80,8 +81,6 @@ struct nf_conntrack_l4proto { struct nf_conntrack_tuple *t); const struct nla_policy *nla_policy; - size_t nla_size; - #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) struct { int (*nlattr_to_obj)(struct nlattr *tb[], @@ -109,7 +108,7 @@ struct nf_conntrack_l4proto { }; /* Existing built-in generic protocol */ -extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; +extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic; #define MAX_NF_CT_PROTO 256 @@ -126,18 +125,18 @@ int nf_ct_l4proto_pernet_register_one(struct net *net, void nf_ct_l4proto_pernet_unregister_one(struct net *net, const struct nf_conntrack_l4proto *proto); int nf_ct_l4proto_pernet_register(struct net *net, - struct nf_conntrack_l4proto *const proto[], + const struct nf_conntrack_l4proto *const proto[], unsigned int num_proto); void nf_ct_l4proto_pernet_unregister(struct net *net, - struct nf_conntrack_l4proto *const proto[], + const struct nf_conntrack_l4proto *const proto[], unsigned int num_proto); /* Protocol global registration. */ -int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto); +int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto); void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); -int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[], +int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[], unsigned int num_proto); -void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[], +void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[], unsigned int num_proto); /* Generic netlink helpers */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h new file mode 100644 index 000000000000..b22b22082733 --- /dev/null +++ b/include/net/netfilter/nf_flow_table.h @@ -0,0 +1,122 @@ +#ifndef _NF_FLOW_TABLE_H +#define _NF_FLOW_TABLE_H + +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/netdevice.h> +#include <linux/rhashtable.h> +#include <linux/rcupdate.h> +#include <net/dst.h> + +struct nf_flowtable; + +struct nf_flowtable_type { + struct list_head list; + int family; + void (*gc)(struct work_struct *work); + const struct rhashtable_params *params; + nf_hookfn *hook; + struct module *owner; +}; + +struct nf_flowtable { + struct rhashtable rhashtable; + const struct nf_flowtable_type *type; + struct delayed_work gc_work; +}; + +enum flow_offload_tuple_dir { + FLOW_OFFLOAD_DIR_ORIGINAL, + FLOW_OFFLOAD_DIR_REPLY, + __FLOW_OFFLOAD_DIR_MAX = FLOW_OFFLOAD_DIR_REPLY, +}; +#define FLOW_OFFLOAD_DIR_MAX (__FLOW_OFFLOAD_DIR_MAX + 1) + +struct flow_offload_tuple { + union { + struct in_addr src_v4; + struct in6_addr src_v6; + }; + union { + struct in_addr dst_v4; + struct in6_addr dst_v6; + }; + struct { + __be16 src_port; + __be16 dst_port; + }; + + int iifidx; + + u8 l3proto; + u8 l4proto; + u8 dir; + + int oifidx; + + struct dst_entry *dst_cache; +}; + +struct flow_offload_tuple_rhash { + struct rhash_head node; + struct flow_offload_tuple tuple; +}; + +#define FLOW_OFFLOAD_SNAT 0x1 +#define FLOW_OFFLOAD_DNAT 0x2 +#define FLOW_OFFLOAD_DYING 0x4 + +struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; + u32 flags; + union { + /* Your private driver data here. */ + u32 timeout; + }; +}; + +#define NF_FLOW_TIMEOUT (30 * HZ) + +struct nf_flow_route { + struct { + struct dst_entry *dst; + int ifindex; + } tuple[FLOW_OFFLOAD_DIR_MAX]; +}; + +struct flow_offload *flow_offload_alloc(struct nf_conn *ct, + struct nf_flow_route *route); +void flow_offload_free(struct flow_offload *flow); + +int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow); +void flow_offload_del(struct nf_flowtable *flow_table, struct flow_offload *flow); +struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table, + struct flow_offload_tuple *tuple); +int nf_flow_table_iterate(struct nf_flowtable *flow_table, + void (*iter)(struct flow_offload *flow, void *data), + void *data); +void nf_flow_offload_work_gc(struct work_struct *work); +extern const struct rhashtable_params nf_flow_offload_rhash_params; + +void flow_offload_dead(struct flow_offload *flow); + +int nf_flow_snat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir); +int nf_flow_dnat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir); + +struct flow_ports { + __be16 source, dest; +}; + +unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); +unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); + +#define MODULE_ALIAS_NF_FLOWTABLE(family) \ + MODULE_ALIAS("nf-flowtable-" __stringify(family)) + +#endif /* _FLOW_OFFLOAD_H */ diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 814058d0f167..a50a69f5334c 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -25,7 +25,7 @@ struct nf_queue_entry { struct nf_queue_handler { int (*outfn)(struct nf_queue_entry *entry, unsigned int queuenum); - unsigned int (*nf_hook_drop)(struct net *net); + void (*nf_hook_drop)(struct net *net); }; void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index fecc6112c768..dd238950df81 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -9,6 +9,7 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter/nf_tables.h> #include <linux/u64_stats_sync.h> +#include <net/netfilter/nf_flow_table.h> #include <net/netlink.h> #define NFT_JUMP_STACK_SIZE 16 @@ -54,8 +55,8 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt, pkt->xt.state = state; } -static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt, - struct sk_buff *skb) +static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt, + struct sk_buff *skb) { pkt->tprot_set = false; pkt->tprot = 0; @@ -63,14 +64,6 @@ static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt, pkt->xt.fragoff = 0; } -static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - nft_set_pktinfo(pkt, skb, state); - nft_set_pktinfo_proto_unspec(pkt, skb); -} - /** * struct nft_verdict - nf_tables verdict * @@ -424,6 +417,11 @@ struct nft_set { __attribute__((aligned(__alignof__(u64)))); }; +static inline bool nft_set_is_anonymous(const struct nft_set *set) +{ + return set->flags & NFT_SET_ANONYMOUS; +} + static inline void *nft_set_priv(const struct nft_set *set) { return (void *)set->data; @@ -883,7 +881,7 @@ enum nft_chain_type { * @family: address family * @owner: module owner * @hook_mask: mask of valid hooks - * @hooks: hookfn overrides + * @hooks: array of hook functions */ struct nf_chain_type { const char *name; @@ -905,8 +903,6 @@ struct nft_stats { struct u64_stats_sync syncp; }; -#define NFT_HOOK_OPS_MAX 2 - /** * struct nft_base_chain - nf_tables base chain * @@ -918,7 +914,7 @@ struct nft_stats { * @dev_name: device name that this base chain is attached to (if any) */ struct nft_base_chain { - struct nf_hook_ops ops[NFT_HOOK_OPS_MAX]; + struct nf_hook_ops ops; const struct nf_chain_type *type; u8 policy; u8 flags; @@ -948,6 +944,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); * @chains: chains in the table * @sets: sets in the table * @objects: stateful objects in the table + * @flowtables: flow tables in the table * @hgenerator: handle generator state * @use: number of chain references to this table * @flags: table flag (see enum nft_table_flags) @@ -959,6 +956,7 @@ struct nft_table { struct list_head chains; struct list_head sets; struct list_head objects; + struct list_head flowtables; u64 hgenerator; u32 use; u16 flags:14, @@ -979,9 +977,6 @@ enum nft_af_flags { * @owner: module owner * @tables: used internally * @flags: family flags - * @nops: number of hook ops in this family - * @hook_ops_init: initialization function for chain hook ops - * @hooks: hookfn overrides for packet validation */ struct nft_af_info { struct list_head list; @@ -990,10 +985,6 @@ struct nft_af_info { struct module *owner; struct list_head tables; u32 flags; - unsigned int nops; - void (*hook_ops_init)(struct nf_hook_ops *, - unsigned int); - nf_hookfn *hooks[NF_MAX_HOOKS]; }; int nft_register_afinfo(struct net *, struct nft_af_info *); @@ -1097,6 +1088,44 @@ int nft_register_obj(struct nft_object_type *obj_type); void nft_unregister_obj(struct nft_object_type *obj_type); /** + * struct nft_flowtable - nf_tables flow table + * + * @list: flow table list node in table list + * @table: the table the flow table is contained in + * @name: name of this flow table + * @hooknum: hook number + * @priority: hook priority + * @ops_len: number of hooks in array + * @genmask: generation mask + * @use: number of references to this flow table + * @data: rhashtable and garbage collector + * @ops: array of hooks + */ +struct nft_flowtable { + struct list_head list; + struct nft_table *table; + char *name; + int hooknum; + int priority; + int ops_len; + u32 genmask:2, + use:30; + /* runtime data below here */ + struct nf_hook_ops *ops ____cacheline_aligned; + struct nf_flowtable data; +}; + +struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table, + const struct nlattr *nla, + u8 genmask); +void nft_flow_table_iterate(struct net *net, + void (*iter)(struct nf_flowtable *flowtable, void *data), + void *data); + +void nft_register_flowtable_type(struct nf_flowtable_type *type); +void nft_unregister_flowtable_type(struct nf_flowtable_type *type); + +/** * struct nft_traceinfo - nft tracing information and state * * @pkt: pktinfo currently processed @@ -1125,9 +1154,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, void nft_trace_notify(struct nft_traceinfo *info); -#define nft_dereference(p) \ - nfnl_dereference(p, NFNL_SUBSYS_NFTABLES) - #define MODULE_ALIAS_NFT_FAMILY(family) \ MODULE_ALIAS("nft-afinfo-" __stringify(family)) @@ -1332,4 +1358,11 @@ struct nft_trans_obj { #define nft_trans_obj(trans) \ (((struct nft_trans_obj *)trans->data)->obj) +struct nft_trans_flowtable { + struct nft_flowtable *flowtable; +}; + +#define nft_trans_flowtable(trans) \ + (((struct nft_trans_flowtable *)trans->data)->flowtable) + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h index f0896ba456c4..ed7b511f0a59 100644 --- a/include/net/netfilter/nf_tables_ipv4.h +++ b/include/net/netfilter/nf_tables_ipv4.h @@ -5,15 +5,11 @@ #include <net/netfilter/nf_tables.h> #include <net/ip.h> -static inline void -nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, + struct sk_buff *skb) { struct iphdr *ip; - nft_set_pktinfo(pkt, skb, state); - ip = ip_hdr(pkt->skb); pkt->tprot_set = true; pkt->tprot = ip->protocol; @@ -21,10 +17,8 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt, pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET; } -static inline int -__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline int __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { struct iphdr *iph, _iph; u32 len, thoff; @@ -52,16 +46,11 @@ __nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, return 0; } -static inline void -nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { - nft_set_pktinfo(pkt, skb, state); - if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0) - nft_set_pktinfo_proto_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv4_validate(pkt, skb) < 0) + nft_set_pktinfo_unspec(pkt, skb); } -extern struct nft_af_info nft_af_ipv4; - #endif diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index b8065b72f56e..dabe6fdb553a 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -5,20 +5,16 @@ #include <linux/netfilter_ipv6/ip6_tables.h> #include <net/ipv6.h> -static inline void -nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, + struct sk_buff *skb) { unsigned int flags = IP6_FH_F_AUTH; int protohdr, thoff = 0; unsigned short frag_off; - nft_set_pktinfo(pkt, skb, state); - protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); if (protohdr < 0) { - nft_set_pktinfo_proto_unspec(pkt, skb); + nft_set_pktinfo_unspec(pkt, skb); return; } @@ -28,10 +24,8 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, pkt->xt.fragoff = frag_off; } -static inline int -__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline int __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { #if IS_ENABLED(CONFIG_IPV6) unsigned int flags = IP6_FH_F_AUTH; @@ -68,16 +62,11 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, #endif } -static inline void -nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, - struct sk_buff *skb, - const struct nf_hook_state *state) +static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, + struct sk_buff *skb) { - nft_set_pktinfo(pkt, skb, state); - if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0) - nft_set_pktinfo_proto_unspec(pkt, skb); + if (__nft_set_pktinfo_ipv6_validate(pkt, skb) < 0) + nft_set_pktinfo_unspec(pkt, skb); } -extern struct nft_af_info nft_af_ipv6; - #endif diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index cc00af2ac2d7..ca043342c0eb 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h @@ -17,7 +17,17 @@ struct netns_nf { #ifdef CONFIG_SYSCTL struct ctl_table_header *nf_log_dir_header; #endif - struct nf_hook_entries __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; + struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS]; + struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS]; +#ifdef CONFIG_NETFILTER_FAMILY_ARP + struct nf_hook_entries __rcu *hooks_arp[NF_ARP_NUMHOOKS]; +#endif +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + struct nf_hook_entries __rcu *hooks_bridge[NF_INET_NUMHOOKS]; +#endif +#if IS_ENABLED(CONFIG_DECNET) + struct nf_hook_entries __rcu *hooks_decnet[NF_DN_NUMHOOKS]; +#endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) bool defrag_ipv4; #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 31574c958673..0d1343cba84c 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -79,14 +79,16 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, #else static inline int tcf_block_get(struct tcf_block **p_block, - struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q) + struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, + struct netlink_ext_ack *extack) { return 0; } static inline int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, - struct tcf_block_ext_info *ei) + struct tcf_block_ext_info *ei, + struct netlink_ext_ack *extack) { return 0; } @@ -729,6 +731,11 @@ struct tc_cookie { u32 len; }; +struct tc_qopt_offload_stats { + struct gnet_stats_basic_packed *bstats; + struct gnet_stats_queue *qstats; +}; + enum tc_red_command { TC_RED_REPLACE, TC_RED_DESTROY, @@ -742,10 +749,6 @@ struct tc_red_qopt_offload_params { u32 probability; bool is_ecn; }; -struct tc_red_qopt_offload_stats { - struct gnet_stats_basic_packed *bstats; - struct gnet_stats_queue *qstats; -}; struct tc_red_qopt_offload { enum tc_red_command command; @@ -753,7 +756,7 @@ struct tc_red_qopt_offload { u32 parent; union { struct tc_red_qopt_offload_params set; - struct tc_red_qopt_offload_stats stats; + struct tc_qopt_offload_stats stats; struct red_stats *xstats; }; }; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 8ac4d5cdbfed..02369e379d35 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -993,7 +993,7 @@ void sctp_transport_burst_limited(struct sctp_transport *); void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); void sctp_transport_reset(struct sctp_transport *t); -void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); +bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu); void sctp_transport_immediate_rtx(struct sctp_transport *); void sctp_transport_dst_release(struct sctp_transport *t); void sctp_transport_dst_confirm(struct sctp_transport *t); diff --git a/include/net/sock.h b/include/net/sock.h index 6c1db823f8b9..73b7830b0bb8 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -72,6 +72,7 @@ #include <net/tcp_states.h> #include <linux/net_tstamp.h> #include <net/smc.h> +#include <net/l3mdev.h> /* * This structure really needs to be cleaned up. @@ -1515,6 +1516,11 @@ static inline bool sock_owned_by_user(const struct sock *sk) return sk->sk_lock.owned; } +static inline bool sock_owned_by_user_nocheck(const struct sock *sk) +{ + return sk->sk_lock.owned; +} + /* no reclassification while locks are held */ static inline bool sock_allow_reclassification(const struct sock *csk) { @@ -2394,4 +2400,23 @@ static inline void sk_pacing_shift_update(struct sock *sk, int val) sk->sk_pacing_shift = val; } +/* if a socket is bound to a device, check that the given device + * index is either the same or that the socket is bound to an L3 + * master device and the given device index is also enslaved to + * that L3 master + */ +static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) +{ + int mdif; + + if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif) + return true; + + mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif); + if (mdif && mdif == sk->sk_bound_dev_if) + return true; + + return false; +} + #endif /* _SOCK_H */ diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 13223396dc64..f96391e84a8a 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -146,7 +146,7 @@ struct vxlanhdr_gpe { np_applied:1, instance_applied:1, version:2, -reserved_flags2:2; + reserved_flags2:2; #elif defined(__BIG_ENDIAN_BITFIELD) u8 reserved_flags2:2, version:2, diff --git a/include/net/xdp.h b/include/net/xdp.h new file mode 100644 index 000000000000..b2362ddfa694 --- /dev/null +++ b/include/net/xdp.h @@ -0,0 +1,48 @@ +/* include/net/xdp.h + * + * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. + * Released under terms in GPL version 2. See COPYING. + */ +#ifndef __LINUX_NET_XDP_H__ +#define __LINUX_NET_XDP_H__ + +/** + * DOC: XDP RX-queue information + * + * The XDP RX-queue info (xdp_rxq_info) is associated with the driver + * level RX-ring queues. It is information that is specific to how + * the driver have configured a given RX-ring queue. + * + * Each xdp_buff frame received in the driver carry a (pointer) + * reference to this xdp_rxq_info structure. This provides the XDP + * data-path read-access to RX-info for both kernel and bpf-side + * (limited subset). + * + * For now, direct access is only safe while running in NAPI/softirq + * context. Contents is read-mostly and must not be updated during + * driver NAPI/softirq poll. + * + * The driver usage API is a register and unregister API. + * + * The struct is not directly tied to the XDP prog. A new XDP prog + * can be attached as long as it doesn't change the underlying + * RX-ring. If the RX-ring does change significantly, the NIC driver + * naturally need to stop the RX-ring before purging and reallocating + * memory. In that process the driver MUST call unregistor (which + * also apply for driver shutdown and unload). The register API is + * also mandatory during RX-ring setup. + */ + +struct xdp_rxq_info { + struct net_device *dev; + u32 queue_index; + u32 reg_state; +} ____cacheline_aligned; /* perf critical, avoid false-sharing */ + +int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index); +void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); +void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); +bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); + +#endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 1ec0c4760646..2e6d4fe6b0ba 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1051,6 +1051,7 @@ struct xfrm_offload { #define XFRM_GSO_SEGMENT 16 #define XFRM_GRO 32 #define XFRM_ESP_NO_TRAILER 64 +#define XFRM_DEV_RESUME 128 __u32 status; #define CRYPTO_SUCCESS 1 @@ -1600,6 +1601,9 @@ int xfrm_init_state(struct xfrm_state *x); int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); int xfrm_input_resume(struct sk_buff *skb, int nexthdr); +int xfrm_trans_queue(struct sk_buff *skb, + int (*finish)(struct net *, struct sock *, + struct sk_buff *)); int xfrm_output_resume(struct sk_buff *skb, int err); int xfrm_output(struct sock *sk, struct sk_buff *skb); int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); @@ -1874,21 +1878,28 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) { return skb->sp->xvec[skb->sp->len - 1]; } +#endif + static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) { +#ifdef CONFIG_XFRM struct sec_path *sp = skb->sp; if (!sp || !sp->olen || sp->len != sp->olen) return NULL; return &sp->ovec[sp->olen - 1]; -} +#else + return NULL; #endif +} void __net_init xfrm_dev_init(void); #ifdef CONFIG_XFRM_OFFLOAD -int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); +void xfrm_dev_resume(struct sk_buff *skb); +void xfrm_dev_backlog(struct softnet_data *sd); +struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again); int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); @@ -1902,6 +1913,8 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) return false; xdst = (struct xfrm_dst *) dst; + if (!x->xso.offload_handle && !xdst->child->xfrm) + return true; if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) return true; @@ -1923,15 +1936,24 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) struct net_device *dev = xso->dev; if (dev && dev->xfrmdev_ops) { - dev->xfrmdev_ops->xdo_dev_state_free(x); + if (dev->xfrmdev_ops->xdo_dev_state_free) + dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; dev_put(dev); } } #else -static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) +static inline void xfrm_dev_resume(struct sk_buff *skb) { - return 0; +} + +static inline void xfrm_dev_backlog(struct softnet_data *sd) +{ +} + +static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) +{ + return skb; } static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h index 758607226bfd..2cd449328aee 100644 --- a/include/trace/events/clk.h +++ b/include/trace/events/clk.h @@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent, TP_STRUCT__entry( __string( name, core->name ) - __string( pname, parent->name ) + __string( pname, parent ? parent->name : "none" ) ), TP_fast_assign( __assign_str(name, core->name); - __assign_str(pname, parent->name); + __assign_str(pname, parent ? parent->name : "none"); ), TP_printk("%s %s", __get_str(name), __get_str(pname)) diff --git a/include/trace/events/net_probe_common.h b/include/trace/events/net_probe_common.h new file mode 100644 index 000000000000..3930119cab08 --- /dev/null +++ b/include/trace/events/net_probe_common.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#if !defined(_TRACE_NET_PROBE_COMMON_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NET_PROBE_COMMON_H + +#define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk) \ + do { \ + struct sockaddr_in *v4 = (void *)__entry->saddr; \ + \ + v4->sin_family = AF_INET; \ + v4->sin_port = inet->inet_sport; \ + v4->sin_addr.s_addr = inet->inet_saddr; \ + v4 = (void *)__entry->daddr; \ + v4->sin_family = AF_INET; \ + v4->sin_port = inet->inet_dport; \ + v4->sin_addr.s_addr = inet->inet_daddr; \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) + +#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \ + do { \ + if (sk->sk_family == AF_INET6) { \ + struct sockaddr_in6 *v6 = (void *)__entry->saddr; \ + \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = inet->inet_sport; \ + v6->sin6_addr = inet6_sk(sk)->saddr; \ + v6 = (void *)__entry->daddr; \ + v6->sin6_family = AF_INET6; \ + v6->sin6_port = inet->inet_dport; \ + v6->sin6_addr = sk->sk_v6_daddr; \ + } else \ + TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); \ + } while (0) + +#else + +#define TP_STORE_ADDR_PORTS(__entry, inet, sk) \ + TP_STORE_ADDR_PORTS_V4(__entry, inet, sk); + +#endif + +#endif diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h new file mode 100644 index 000000000000..7475c7be165a --- /dev/null +++ b/include/trace/events/sctp.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM sctp + +#if !defined(_TRACE_SCTP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_SCTP_H + +#include <net/sctp/structs.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(sctp_probe_path, + + TP_PROTO(struct sctp_transport *sp, + const struct sctp_association *asoc), + + TP_ARGS(sp, asoc), + + TP_STRUCT__entry( + __field(__u64, asoc) + __field(__u32, primary) + __array(__u8, ipaddr, sizeof(union sctp_addr)) + __field(__u32, state) + __field(__u32, cwnd) + __field(__u32, ssthresh) + __field(__u32, flight_size) + __field(__u32, partial_bytes_acked) + __field(__u32, pathmtu) + ), + + TP_fast_assign( + __entry->asoc = (unsigned long)asoc; + __entry->primary = (sp == asoc->peer.primary_path); + memcpy(__entry->ipaddr, &sp->ipaddr, sizeof(union sctp_addr)); + __entry->state = sp->state; + __entry->cwnd = sp->cwnd; + __entry->ssthresh = sp->ssthresh; + __entry->flight_size = sp->flight_size; + __entry->partial_bytes_acked = sp->partial_bytes_acked; + __entry->pathmtu = sp->pathmtu; + ), + + TP_printk("asoc=%#llx%s ipaddr=%pISpc state=%u cwnd=%u ssthresh=%u " + "flight_size=%u partial_bytes_acked=%u pathmtu=%u", + __entry->asoc, __entry->primary ? "(*)" : "", + __entry->ipaddr, __entry->state, __entry->cwnd, + __entry->ssthresh, __entry->flight_size, + __entry->partial_bytes_acked, __entry->pathmtu) +); + +TRACE_EVENT(sctp_probe, + + TP_PROTO(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk), + + TP_ARGS(ep, asoc, chunk), + + TP_STRUCT__entry( + __field(__u64, asoc) + __field(__u32, mark) + __field(__u16, bind_port) + __field(__u16, peer_port) + __field(__u32, pathmtu) + __field(__u32, rwnd) + __field(__u16, unack_data) + ), + + TP_fast_assign( + struct sk_buff *skb = chunk->skb; + + __entry->asoc = (unsigned long)asoc; + __entry->mark = skb->mark; + __entry->bind_port = ep->base.bind_addr.port; + __entry->peer_port = asoc->peer.port; + __entry->pathmtu = asoc->pathmtu; + __entry->rwnd = asoc->peer.rwnd; + __entry->unack_data = asoc->unack_data; + + if (trace_sctp_probe_path_enabled()) { + struct sctp_transport *sp; + + list_for_each_entry(sp, &asoc->peer.transport_addr_list, + transports) { + trace_sctp_probe_path(sp, asoc); + } + } + ), + + TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d " + "rwnd=%u unack_data=%d", + __entry->asoc, __entry->mark, __entry->bind_port, + __entry->peer_port, __entry->pathmtu, __entry->rwnd, + __entry->unack_data) +); + +#endif /* _TRACE_SCTP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index 3b9094a07b80..3176a3931107 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -11,7 +11,11 @@ #include <linux/ipv6.h> #include <linux/tcp.h> -/* The protocol traced by sock_set_state */ +#define family_names \ + EM(AF_INET) \ + EMe(AF_INET6) + +/* The protocol traced by inet_sock_set_state */ #define inet_protocol_names \ EM(IPPROTO_TCP) \ EM(IPPROTO_DCCP) \ @@ -37,6 +41,7 @@ #define EM(a) TRACE_DEFINE_ENUM(a); #define EMe(a) TRACE_DEFINE_ENUM(a); +family_names inet_protocol_names tcp_state_names @@ -45,6 +50,9 @@ tcp_state_names #define EM(a) { a, #a }, #define EMe(a) { a, #a } +#define show_family_name(val) \ + __print_symbolic(val, family_names) + #define show_inet_protocol_name(val) \ __print_symbolic(val, inet_protocol_names) @@ -118,6 +126,7 @@ TRACE_EVENT(inet_sock_set_state, __field(int, newstate) __field(__u16, sport) __field(__u16, dport) + __field(__u16, family) __field(__u8, protocol) __array(__u8, saddr, 4) __array(__u8, daddr, 4) @@ -134,6 +143,7 @@ TRACE_EVENT(inet_sock_set_state, __entry->oldstate = oldstate; __entry->newstate = newstate; + __entry->family = sk->sk_family; __entry->protocol = sk->sk_protocol; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); @@ -160,8 +170,8 @@ TRACE_EVENT(inet_sock_set_state, } ), - TP_printk("protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4" - "saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", + TP_printk("family=%s protocol=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", + show_family_name(__entry->family), show_inet_protocol_name(__entry->protocol), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 8e88a1671538..878b2be7ce77 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM tcp @@ -8,6 +9,36 @@ #include <linux/tcp.h> #include <linux/tracepoint.h> #include <net/ipv6.h> +#include <net/tcp.h> + +#define TP_STORE_V4MAPPED(__entry, saddr, daddr) \ + do { \ + struct in6_addr *pin6; \ + \ + pin6 = (struct in6_addr *)__entry->saddr_v6; \ + ipv6_addr_set_v4mapped(saddr, pin6); \ + pin6 = (struct in6_addr *)__entry->daddr_v6; \ + ipv6_addr_set_v4mapped(daddr, pin6); \ + } while (0) + +#if IS_ENABLED(CONFIG_IPV6) +#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ + do { \ + if (sk->sk_family == AF_INET6) { \ + struct in6_addr *pin6; \ + \ + pin6 = (struct in6_addr *)__entry->saddr_v6; \ + *pin6 = saddr6; \ + pin6 = (struct in6_addr *)__entry->daddr_v6; \ + *pin6 = daddr6; \ + } else { \ + TP_STORE_V4MAPPED(__entry, saddr, daddr); \ + } \ + } while (0) +#else +#define TP_STORE_ADDRS(__entry, saddr, daddr, saddr6, daddr6) \ + TP_STORE_V4MAPPED(__entry, saddr, daddr) +#endif /* * tcp event with arguments sk and skb @@ -34,7 +65,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, TP_fast_assign( struct inet_sock *inet = inet_sk(sk); - struct in6_addr *pin6; __be32 *p32; __entry->skbaddr = skb; @@ -49,20 +79,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk_skb, p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - pin6 = (struct in6_addr *)__entry->saddr_v6; - *pin6 = sk->sk_v6_rcv_saddr; - pin6 = (struct in6_addr *)__entry->daddr_v6; - *pin6 = sk->sk_v6_daddr; - } else -#endif - { - pin6 = (struct in6_addr *)__entry->saddr_v6; - ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); - pin6 = (struct in6_addr *)__entry->daddr_v6; - ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); - } + TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, + sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", @@ -111,7 +129,6 @@ DECLARE_EVENT_CLASS(tcp_event_sk, TP_fast_assign( struct inet_sock *inet = inet_sk(sk); - struct in6_addr *pin6; __be32 *p32; __entry->skaddr = sk; @@ -125,20 +142,8 @@ DECLARE_EVENT_CLASS(tcp_event_sk, p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - pin6 = (struct in6_addr *)__entry->saddr_v6; - *pin6 = sk->sk_v6_rcv_saddr; - pin6 = (struct in6_addr *)__entry->daddr_v6; - *pin6 = sk->sk_v6_daddr; - } else -#endif - { - pin6 = (struct in6_addr *)__entry->saddr_v6; - ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); - pin6 = (struct in6_addr *)__entry->daddr_v6; - ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); - } + TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, + sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", @@ -181,7 +186,6 @@ TRACE_EVENT(tcp_set_state, TP_fast_assign( struct inet_sock *inet = inet_sk(sk); - struct in6_addr *pin6; __be32 *p32; __entry->skaddr = sk; @@ -197,20 +201,8 @@ TRACE_EVENT(tcp_set_state, p32 = (__be32 *) __entry->daddr; *p32 = inet->inet_daddr; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - pin6 = (struct in6_addr *)__entry->saddr_v6; - *pin6 = sk->sk_v6_rcv_saddr; - pin6 = (struct in6_addr *)__entry->daddr_v6; - *pin6 = sk->sk_v6_daddr; - } else -#endif - { - pin6 = (struct in6_addr *)__entry->saddr_v6; - ipv6_addr_set_v4mapped(inet->inet_saddr, pin6); - pin6 = (struct in6_addr *)__entry->daddr_v6; - ipv6_addr_set_v4mapped(inet->inet_daddr, pin6); - } + TP_STORE_ADDRS(__entry, inet->inet_saddr, inet->inet_daddr, + sk->sk_v6_rcv_saddr, sk->sk_v6_daddr); ), TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c oldstate=%s newstate=%s", @@ -240,7 +232,6 @@ TRACE_EVENT(tcp_retransmit_synack, TP_fast_assign( struct inet_request_sock *ireq = inet_rsk(req); - struct in6_addr *pin6; __be32 *p32; __entry->skaddr = sk; @@ -255,20 +246,8 @@ TRACE_EVENT(tcp_retransmit_synack, p32 = (__be32 *) __entry->daddr; *p32 = ireq->ir_rmt_addr; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) { - pin6 = (struct in6_addr *)__entry->saddr_v6; - *pin6 = ireq->ir_v6_loc_addr; - pin6 = (struct in6_addr *)__entry->daddr_v6; - *pin6 = ireq->ir_v6_rmt_addr; - } else -#endif - { - pin6 = (struct in6_addr *)__entry->saddr_v6; - ipv6_addr_set_v4mapped(ireq->ir_loc_addr, pin6); - pin6 = (struct in6_addr *)__entry->daddr_v6; - ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, pin6); - } + TP_STORE_ADDRS(__entry, ireq->ir_loc_addr, ireq->ir_rmt_addr, + ireq->ir_v6_loc_addr, ireq->ir_v6_rmt_addr); ), TP_printk("sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 saddrv6=%pI6c daddrv6=%pI6c", @@ -277,6 +256,64 @@ TRACE_EVENT(tcp_retransmit_synack, __entry->saddr_v6, __entry->daddr_v6) ); +#include <trace/events/net_probe_common.h> + +TRACE_EVENT(tcp_probe, + + TP_PROTO(struct sock *sk, struct sk_buff *skb), + + TP_ARGS(sk, skb), + + TP_STRUCT__entry( + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(__u16, sport) + __field(__u16, dport) + __field(__u32, mark) + __field(__u16, length) + __field(__u32, snd_nxt) + __field(__u32, snd_una) + __field(__u32, snd_cwnd) + __field(__u32, ssthresh) + __field(__u32, snd_wnd) + __field(__u32, srtt) + __field(__u32, rcv_wnd) + ), + + TP_fast_assign( + const struct tcp_sock *tp = tcp_sk(sk); + const struct inet_sock *inet = inet_sk(sk); + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + /* For filtering use */ + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + __entry->mark = skb->mark; + + __entry->length = skb->len; + __entry->snd_nxt = tp->snd_nxt; + __entry->snd_una = tp->snd_una; + __entry->snd_cwnd = tp->snd_cwnd; + __entry->snd_wnd = tp->snd_wnd; + __entry->rcv_wnd = tp->rcv_wnd; + __entry->ssthresh = tcp_current_ssthresh(sk); + __entry->srtt = tp->srtt_us >> 3; + ), + + TP_printk("src=%pISpc dest=%pISpc mark=%#x length=%d snd_nxt=%#x " + "snd_una=%#x snd_cwnd=%u ssthresh=%u snd_wnd=%u srtt=%u " + "rcv_wnd=%u", + __entry->saddr, __entry->daddr, __entry->mark, + __entry->length, __entry->snd_nxt, __entry->snd_una, + __entry->snd_cwnd, __entry->ssthresh, __entry->snd_wnd, + __entry->srtt, __entry->rcv_wnd) +); + #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d01f1cb3cfc0..405317f9c064 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -899,6 +899,9 @@ struct xdp_md { __u32 data; __u32 data_end; __u32 data_meta; + /* Below access go though struct xdp_rxq_info */ + __u32 ingress_ifindex; /* rxq->dev->ifindex */ + __u32 rx_queue_index; /* rxq->queue_index */ }; enum sk_action { @@ -921,6 +924,9 @@ struct bpf_prog_info { __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u64 netns_dev; + __u64 netns_ino; } __attribute__((aligned(8))); struct bpf_map_info { @@ -1012,7 +1018,8 @@ struct bpf_perf_event_value { #define BPF_DEVCG_DEV_CHAR (1ULL << 1) struct bpf_cgroup_dev_ctx { - __u32 access_type; /* (access << 16) | type */ + /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ + __u32 access_type; __u32 major; __u32 minor; }; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 87b7529fcdfe..f8cb5760ea4f 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -23,6 +23,7 @@ #define _UAPI_LINUX_IF_ETHER_H #include <linux/types.h> +#include <linux/libc-compat.h> /* * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble @@ -150,11 +151,13 @@ * This is an Ethernet frame header. */ +#if __UAPI_DEF_ETHHDR struct ethhdr { unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_source[ETH_ALEN]; /* source ether addr */ __be16 h_proto; /* packet type ID field */ } __attribute__((packed)); +#endif #endif /* _UAPI_LINUX_IF_ETHER_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 19fc02660e0c..f8f04fed6186 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -732,6 +732,8 @@ enum { IFLA_VF_STATS_BROADCAST, IFLA_VF_STATS_MULTICAST, IFLA_VF_STATS_PAD, + IFLA_VF_STATS_RX_DROPPED, + IFLA_VF_STATS_TX_DROPPED, __IFLA_VF_STATS_MAX, }; diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h index 719d243471f4..2e522835a4af 100644 --- a/include/uapi/linux/if_macsec.h +++ b/include/uapi/linux/if_macsec.h @@ -18,12 +18,17 @@ #define MACSEC_GENL_NAME "macsec" #define MACSEC_GENL_VERSION 1 -#define MACSEC_MAX_KEY_LEN 128 +#define MACSEC_MAX_KEY_LEN 256 #define MACSEC_KEYID_LEN 16 -#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL -#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL +/* cipher IDs as per IEEE802.1AEbn-2011 */ +#define MACSEC_CIPHER_ID_GCM_AES_128 0x0080C20001000001ULL +#define MACSEC_CIPHER_ID_GCM_AES_256 0x0080C20001000002ULL + +#define MACSEC_DEFAULT_CIPHER_ID MACSEC_CIPHER_ID_GCM_AES_128 +/* deprecated cipher ID for GCM-AES-128 */ +#define MACSEC_DEFAULT_CIPHER_ALT 0x0080020001000001ULL #define MACSEC_MIN_ICV_LEN 8 #define MACSEC_MAX_ICV_LEN 32 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 817d807e9481..14565d703291 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -92,6 +92,8 @@ enum { INET_DIAG_BC_D_COND, INET_DIAG_BC_DEV_COND, /* u32 ifindex */ INET_DIAG_BC_MARK_COND, + INET_DIAG_BC_S_EQ, + INET_DIAG_BC_D_EQ, }; struct inet_diag_hostcond { diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h index d84ce5c1c9aa..71e62795104d 100644 --- a/include/uapi/linux/l2tp.h +++ b/include/uapi/linux/l2tp.h @@ -65,7 +65,7 @@ struct sockaddr_l2tpip6 { * TUNNEL_MODIFY - CONN_ID, udpcsum * TUNNEL_GETSTATS - CONN_ID, (stats) * TUNNEL_GET - CONN_ID, (...) - * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec + * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec * SESSION_DELETE - SESSION_ID * SESSION_MODIFY - SESSION_ID, data_seq * SESSION_GET - SESSION_ID, (...) @@ -94,7 +94,7 @@ enum { L2TP_ATTR_NONE, /* no data */ L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */ L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */ - L2TP_ATTR_OFFSET, /* u16 */ + L2TP_ATTR_OFFSET, /* u16 (not used) */ L2TP_ATTR_DATA_SEQ, /* u16 */ L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */ L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */ diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index 282875cf8056..fc29efaa918c 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h @@ -168,47 +168,106 @@ /* If we did not see any headers from any supported C libraries, * or we are being included in the kernel, then define everything - * that we need. */ + * that we need. Check for previous __UAPI_* definitions to give + * unsupported C libraries a way to opt out of any kernel definition. */ #else /* !defined(__GLIBC__) */ /* Definitions for if.h */ +#ifndef __UAPI_DEF_IF_IFCONF #define __UAPI_DEF_IF_IFCONF 1 +#endif +#ifndef __UAPI_DEF_IF_IFMAP #define __UAPI_DEF_IF_IFMAP 1 +#endif +#ifndef __UAPI_DEF_IF_IFNAMSIZ #define __UAPI_DEF_IF_IFNAMSIZ 1 +#endif +#ifndef __UAPI_DEF_IF_IFREQ #define __UAPI_DEF_IF_IFREQ 1 +#endif /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */ +#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1 +#endif /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */ +#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1 +#endif /* Definitions for in.h */ +#ifndef __UAPI_DEF_IN_ADDR #define __UAPI_DEF_IN_ADDR 1 +#endif +#ifndef __UAPI_DEF_IN_IPPROTO #define __UAPI_DEF_IN_IPPROTO 1 +#endif +#ifndef __UAPI_DEF_IN_PKTINFO #define __UAPI_DEF_IN_PKTINFO 1 +#endif +#ifndef __UAPI_DEF_IP_MREQ #define __UAPI_DEF_IP_MREQ 1 +#endif +#ifndef __UAPI_DEF_SOCKADDR_IN #define __UAPI_DEF_SOCKADDR_IN 1 +#endif +#ifndef __UAPI_DEF_IN_CLASS #define __UAPI_DEF_IN_CLASS 1 +#endif /* Definitions for in6.h */ +#ifndef __UAPI_DEF_IN6_ADDR #define __UAPI_DEF_IN6_ADDR 1 +#endif +#ifndef __UAPI_DEF_IN6_ADDR_ALT #define __UAPI_DEF_IN6_ADDR_ALT 1 +#endif +#ifndef __UAPI_DEF_SOCKADDR_IN6 #define __UAPI_DEF_SOCKADDR_IN6 1 +#endif +#ifndef __UAPI_DEF_IPV6_MREQ #define __UAPI_DEF_IPV6_MREQ 1 +#endif +#ifndef __UAPI_DEF_IPPROTO_V6 #define __UAPI_DEF_IPPROTO_V6 1 +#endif +#ifndef __UAPI_DEF_IPV6_OPTIONS #define __UAPI_DEF_IPV6_OPTIONS 1 +#endif +#ifndef __UAPI_DEF_IN6_PKTINFO #define __UAPI_DEF_IN6_PKTINFO 1 +#endif +#ifndef __UAPI_DEF_IP6_MTUINFO #define __UAPI_DEF_IP6_MTUINFO 1 +#endif /* Definitions for ipx.h */ +#ifndef __UAPI_DEF_SOCKADDR_IPX #define __UAPI_DEF_SOCKADDR_IPX 1 +#endif +#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION #define __UAPI_DEF_IPX_ROUTE_DEFINITION 1 +#endif +#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION #define __UAPI_DEF_IPX_INTERFACE_DEFINITION 1 +#endif +#ifndef __UAPI_DEF_IPX_CONFIG_DATA #define __UAPI_DEF_IPX_CONFIG_DATA 1 +#endif +#ifndef __UAPI_DEF_IPX_ROUTE_DEF #define __UAPI_DEF_IPX_ROUTE_DEF 1 +#endif /* Definitions for xattr.h */ +#ifndef __UAPI_DEF_XATTR #define __UAPI_DEF_XATTR 1 +#endif #endif /* __GLIBC__ */ +/* Definitions for if_ether.h */ +/* allow libcs like musl to deactivate this, glibc does not implement this. */ +#ifndef __UAPI_DEF_ETHHDR +#define __UAPI_DEF_ETHHDR 1 +#endif + #endif /* _UAPI_LIBC_COMPAT_H */ diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index 3fea7709a441..9574bd40870b 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -36,7 +36,7 @@ enum ip_conntrack_info { #define NF_CT_STATE_INVALID_BIT (1 << 0) #define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1)) -#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1)) +#define NF_CT_STATE_UNTRACKED_BIT (1 << 6) /* Bitset representing status of connection. */ enum ip_conntrack_status { @@ -101,12 +101,16 @@ enum ip_conntrack_status { IPS_HELPER_BIT = 13, IPS_HELPER = (1 << IPS_HELPER_BIT), + /* Conntrack has been offloaded to flow table. */ + IPS_OFFLOAD_BIT = 14, + IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT), + /* Be careful here, modifying these bits can make things messy, * so don't let users modify them directly. */ IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | - IPS_SEQ_ADJUST | IPS_TEMPLATE), + IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), __IPS_MAX_BIT = 14, }; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index a3ee277b17a1..53e8dd2a3a03 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -92,6 +92,9 @@ enum nft_verdicts { * @NFT_MSG_GETOBJ: get a stateful object (enum nft_obj_attributes) * @NFT_MSG_DELOBJ: delete a stateful object (enum nft_obj_attributes) * @NFT_MSG_GETOBJ_RESET: get and reset a stateful object (enum nft_obj_attributes) + * @NFT_MSG_NEWFLOWTABLE: add new flow table (enum nft_flowtable_attributes) + * @NFT_MSG_GETFLOWTABLE: get flow table (enum nft_flowtable_attributes) + * @NFT_MSG_DELFLOWTABLE: delete flow table (enum nft_flowtable_attributes) */ enum nf_tables_msg_types { NFT_MSG_NEWTABLE, @@ -116,6 +119,9 @@ enum nf_tables_msg_types { NFT_MSG_GETOBJ, NFT_MSG_DELOBJ, NFT_MSG_GETOBJ_RESET, + NFT_MSG_NEWFLOWTABLE, + NFT_MSG_GETFLOWTABLE, + NFT_MSG_DELFLOWTABLE, NFT_MSG_MAX, }; @@ -777,6 +783,7 @@ enum nft_exthdr_attributes { * @NFT_META_OIFGROUP: packet output interface group * @NFT_META_CGROUP: socket control group (skb->sk->sk_classid) * @NFT_META_PRANDOM: a 32bit pseudo-random number + * @NFT_META_SECPATH: boolean, secpath_exists (!!skb->sp) */ enum nft_meta_keys { NFT_META_LEN, @@ -804,6 +811,7 @@ enum nft_meta_keys { NFT_META_OIFGROUP, NFT_META_CGROUP, NFT_META_PRANDOM, + NFT_META_SECPATH, }; /** @@ -949,6 +957,17 @@ enum nft_ct_attributes { }; #define NFTA_CT_MAX (__NFTA_CT_MAX - 1) +/** + * enum nft_flow_attributes - ct offload expression attributes + * @NFTA_FLOW_TABLE_NAME: flow table name (NLA_STRING) + */ +enum nft_offload_attributes { + NFTA_FLOW_UNSPEC, + NFTA_FLOW_TABLE_NAME, + __NFTA_FLOW_MAX, +}; +#define NFTA_FLOW_MAX (__NFTA_FLOW_MAX - 1) + enum nft_limit_type { NFT_LIMIT_PKTS, NFT_LIMIT_PKT_BYTES @@ -1308,6 +1327,53 @@ enum nft_object_attributes { #define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1) /** + * enum nft_flowtable_attributes - nf_tables flow table netlink attributes + * + * @NFTA_FLOWTABLE_TABLE: name of the table containing the expression (NLA_STRING) + * @NFTA_FLOWTABLE_NAME: name of this flow table (NLA_STRING) + * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32) + * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32) + */ +enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC, + NFTA_FLOWTABLE_TABLE, + NFTA_FLOWTABLE_NAME, + NFTA_FLOWTABLE_HOOK, + NFTA_FLOWTABLE_USE, + __NFTA_FLOWTABLE_MAX +}; +#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1) + +/** + * enum nft_flowtable_hook_attributes - nf_tables flow table hook netlink attributes + * + * @NFTA_FLOWTABLE_HOOK_NUM: netfilter hook number (NLA_U32) + * @NFTA_FLOWTABLE_HOOK_PRIORITY: netfilter hook priority (NLA_U32) + * @NFTA_FLOWTABLE_HOOK_DEVS: input devices this flow table is bound to (NLA_NESTED) + */ +enum nft_flowtable_hook_attributes { + NFTA_FLOWTABLE_HOOK_UNSPEC, + NFTA_FLOWTABLE_HOOK_NUM, + NFTA_FLOWTABLE_HOOK_PRIORITY, + NFTA_FLOWTABLE_HOOK_DEVS, + __NFTA_FLOWTABLE_HOOK_MAX +}; +#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1) + +/** + * enum nft_device_attributes - nf_tables device netlink attributes + * + * @NFTA_DEVICE_NAME: name of this device (NLA_STRING) + */ +enum nft_devices_attributes { + NFTA_DEVICE_UNSPEC, + NFTA_DEVICE_NAME, + __NFTA_DEVICE_MAX +}; +#define NFTA_DEVICE_MAX (__NFTA_DEVICE_MAX - 1) + + +/** * enum nft_trace_attributes - nf_tables trace netlink attributes * * @NFTA_TRACE_TABLE: name of the table (NLA_STRING) diff --git a/include/uapi/linux/netfilter/xt_connlimit.h b/include/uapi/linux/netfilter/xt_connlimit.h index 07e5e9d47882..d4d1943dcd11 100644 --- a/include/uapi/linux/netfilter/xt_connlimit.h +++ b/include/uapi/linux/netfilter/xt_connlimit.h @@ -27,7 +27,7 @@ struct xt_connlimit_info { __u32 flags; /* Used internally by the kernel */ - struct xt_connlimit_data *data __attribute__((aligned(8))); + struct nf_conncount_data *data __attribute__((aligned(8))); }; #endif /* _XT_CONNLIMIT_H */ diff --git a/include/uapi/linux/netfilter_arp.h b/include/uapi/linux/netfilter_arp.h index 81b6a4cbcb72..791dfc5ae907 100644 --- a/include/uapi/linux/netfilter_arp.h +++ b/include/uapi/linux/netfilter_arp.h @@ -15,6 +15,9 @@ #define NF_ARP_IN 0 #define NF_ARP_OUT 1 #define NF_ARP_FORWARD 2 + +#ifndef __KERNEL__ #define NF_ARP_NUMHOOKS 3 +#endif #endif /* __LINUX_ARP_NETFILTER_H */ diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h index 9089c38f6abe..61f1c7dfd033 100644 --- a/include/uapi/linux/netfilter_decnet.h +++ b/include/uapi/linux/netfilter_decnet.h @@ -24,6 +24,9 @@ #define NFC_DN_IF_IN 0x0004 /* Output device. */ #define NFC_DN_IF_OUT 0x0008 + +/* kernel define is in netfilter_defs.h */ +#define NF_DN_NUMHOOKS 7 #endif /* ! __KERNEL__ */ /* DECnet Hooks */ @@ -41,7 +44,6 @@ #define NF_DN_HELLO 5 /* Input Routing Packets */ #define NF_DN_ROUTE 6 -#define NF_DN_NUMHOOKS 7 enum nf_dn_hook_priorities { NF_DN_PRI_FIRST = INT_MIN, diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f882fe1f9709..c587a61c32bf 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3862,6 +3862,9 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_PARENT_BSSID. (u64). * @NL80211_BSS_PARENT_BSSID: the BSS according to which @NL80211_BSS_PARENT_TSF * is set. + * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update. + * Contains a nested array of signal strength attributes (u8, dBm), + * using the nesting index as the antenna number. * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -3885,6 +3888,7 @@ enum nl80211_bss { NL80211_BSS_PAD, NL80211_BSS_PARENT_TSF, NL80211_BSS_PARENT_BSSID, + NL80211_BSS_CHAIN_SIGNAL, /* keep last */ __NL80211_BSS_AFTER_LAST, diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h index 35f79d1f8c3a..14bacc7e6cef 100644 --- a/include/uapi/linux/tipc.h +++ b/include/uapi/linux/tipc.h @@ -117,10 +117,9 @@ static inline unsigned int tipc_node(__u32 addr) /* * Publication scopes when binding port names and port name sequences */ - -#define TIPC_ZONE_SCOPE 1 -#define TIPC_CLUSTER_SCOPE 2 -#define TIPC_NODE_SCOPE 3 +#define TIPC_ZONE_SCOPE 1 +#define TIPC_CLUSTER_SCOPE 2 +#define TIPC_NODE_SCOPE 3 /* * Limiting values for messages diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index fc353b518288..5de6ed37695b 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -57,6 +57,8 @@ * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */ + #ifndef VIRTIO_NET_NO_LEGACY #define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ #endif /* VIRTIO_NET_NO_LEGACY */ @@ -76,6 +78,17 @@ struct virtio_net_config { __u16 max_virtqueue_pairs; /* Default maximum transmit unit advice */ __u16 mtu; + /* + * speed, in units of 1Mb. All values 0 to INT_MAX are legal. + * Any other value stands for unknown. + */ + __u32 speed; + /* + * 0x00 - half duplex + * 0x01 - full duplex + * Any other value stands for unknown. + */ + __u8 duplex; } __attribute__((packed)); /* diff --git a/include/xen/balloon.h b/include/xen/balloon.h index 4914b93a23f2..61f410fd74e4 100644 --- a/include/xen/balloon.h +++ b/include/xen/balloon.h @@ -44,3 +44,8 @@ static inline void xen_balloon_init(void) { } #endif + +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +struct resource; +void arch_xen_balloon_init(struct resource *hostmem_resource); +#endif diff --git a/init/Kconfig b/init/Kconfig index 2934249fba46..690a381adee0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -461,10 +461,14 @@ endmenu # "CPU/Task time and stats accounting" config CPU_ISOLATION bool "CPU isolation" + default y help Make sure that CPUs running critical tasks are not disturbed by any source of "noise" such as unbound workqueues, timers, kthreads... - Unbound jobs get offloaded to housekeeping CPUs. + Unbound jobs get offloaded to housekeeping CPUs. This is driven by + the "isolcpus=" boot parameter. + + Say Y if unsure. source "kernel/rcu/Kconfig" diff --git a/init/main.c b/init/main.c index e96e3a14533c..a8100b954839 100644 --- a/init/main.c +++ b/init/main.c @@ -75,6 +75,7 @@ #include <linux/slab.h> #include <linux/perf_event.h> #include <linux/ptrace.h> +#include <linux/pti.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/sched_clock.h> @@ -504,6 +505,10 @@ static void __init mm_init(void) pgtable_init(); vmalloc_init(); ioremap_huge_init(); + /* Should be run before the first non-init thread is created */ + init_espfix_bsp(); + /* Should be run after espfix64 is set up. */ + pti_init(); } asmlinkage __visible void __init start_kernel(void) @@ -679,10 +684,6 @@ asmlinkage __visible void __init start_kernel(void) if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_enter_virtual_mode(); #endif -#ifdef CONFIG_X86_ESPFIX64 - /* Should be run before the first non-init thread is created */ - init_espfix_bsp(); -#endif thread_stack_cache_init(); cred_init(); fork_init(); diff --git a/kernel/acct.c b/kernel/acct.c index d15c0ee4d955..addf7732fb56 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -102,7 +102,7 @@ static int check_free_space(struct bsd_acct_struct *acct) { struct kstatfs sbuf; - if (time_is_before_jiffies(acct->needcheck)) + if (time_is_after_jiffies(acct->needcheck)) goto out; /* May block */ diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index e691da0b3bab..a713fd23ec88 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -9,9 +9,11 @@ obj-$(CONFIG_BPF_SYSCALL) += devmap.o obj-$(CONFIG_BPF_SYSCALL) += cpumap.o obj-$(CONFIG_BPF_SYSCALL) += offload.o ifeq ($(CONFIG_STREAM_PARSER),y) +ifeq ($(CONFIG_INET),y) obj-$(CONFIG_BPF_SYSCALL) += sockmap.o endif endif +endif ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index b789ab78d28f..c1c0b60d3f2f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { + const int size_default = sizeof(__u32); + if (type == BPF_WRITE) return false; @@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size, /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; - if (size != sizeof(__u32)) - return false; + + switch (off) { + case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): + bpf_ctx_record_field_size(info, size_default); + if (!bpf_ctx_narrow_access_ok(off, size, size_default)) + return false; + break; + default: + if (size != size_default) + return false; + } return true; } diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 768e0a02d8c8..70a534549cd3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -771,7 +771,9 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) /* Base function for offset calculation. Needs to go into .text section, * therefore keeping it non-static as well; will also be used by JITs - * anyway later on, so do not let the compiler omit it. + * anyway later on, so do not let the compiler omit it. This also needs + * to go into kallsyms for correlation from e.g. bpftool, so naming + * must not change. */ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index 883f88fa5bfc..8740406df2cd 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -21,10 +21,39 @@ static const char * const func_id_str[] = { }; #undef __BPF_FUNC_STR_FN -const char *func_id_name(int id) +static const char *__func_get_name(const struct bpf_insn_cbs *cbs, + const struct bpf_insn *insn, + char *buff, size_t len) { BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); + if (insn->src_reg != BPF_PSEUDO_CALL && + insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID && + func_id_str[insn->imm]) + return func_id_str[insn->imm]; + + if (cbs && cbs->cb_call) + return cbs->cb_call(cbs->private_data, insn); + + if (insn->src_reg == BPF_PSEUDO_CALL) + snprintf(buff, len, "%+d", insn->imm); + + return buff; +} + +static const char *__func_imm_name(const struct bpf_insn_cbs *cbs, + const struct bpf_insn *insn, + u64 full_imm, char *buff, size_t len) +{ + if (cbs && cbs->cb_imm) + return cbs->cb_imm(cbs->private_data, insn, full_imm); + + snprintf(buff, len, "0x%llx", (unsigned long long)full_imm); + return buff; +} + +const char *func_id_name(int id) +{ if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) return func_id_str[id]; else @@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = { [BPF_EXIT >> 4] = "exit", }; -static void print_bpf_end_insn(bpf_insn_print_cb verbose, +static void print_bpf_end_insn(bpf_insn_print_t verbose, struct bpf_verifier_env *env, const struct bpf_insn *insn) { @@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose, insn->imm, insn->dst_reg); } -void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, - const struct bpf_insn *insn, bool allow_ptr_leaks) +void print_bpf_insn(const struct bpf_insn_cbs *cbs, + struct bpf_verifier_env *env, + const struct bpf_insn *insn, + bool allow_ptr_leaks) { + const bpf_insn_print_t verbose = cbs->cb_print; u8 class = BPF_CLASS(insn->code); if (class == BPF_ALU || class == BPF_ALU64) { @@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, */ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; + char tmp[64]; if (map_ptr && !allow_ptr_leaks) imm = 0; - verbose(env, "(%02x) r%d = 0x%llx\n", insn->code, - insn->dst_reg, (unsigned long long)imm); + verbose(env, "(%02x) r%d = %s\n", + insn->code, insn->dst_reg, + __func_imm_name(cbs, insn, imm, + tmp, sizeof(tmp))); } else { verbose(env, "BUG_ld_%02x\n", insn->code); return; @@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { - if (insn->src_reg == BPF_PSEUDO_CALL) - verbose(env, "(%02x) call pc%+d\n", insn->code, - insn->imm); - else + char tmp[64]; + + if (insn->src_reg == BPF_PSEUDO_CALL) { + verbose(env, "(%02x) call pc%s\n", + insn->code, + __func_get_name(cbs, insn, + tmp, sizeof(tmp))); + } else { + strcpy(tmp, "unknown"); verbose(env, "(%02x) call %s#%d\n", insn->code, - func_id_name(insn->imm), insn->imm); + __func_get_name(cbs, insn, + tmp, sizeof(tmp)), + insn->imm); + } } else if (insn->code == (BPF_JMP | BPF_JA)) { verbose(env, "(%02x) goto pc%+d\n", insn->code, insn->off); diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h index 8de977e420b6..e0857d016f89 100644 --- a/kernel/bpf/disasm.h +++ b/kernel/bpf/disasm.h @@ -17,16 +17,35 @@ #include <linux/bpf.h> #include <linux/kernel.h> #include <linux/stringify.h> +#ifndef __KERNEL__ +#include <stdio.h> +#include <string.h> +#endif + +struct bpf_verifier_env; extern const char *const bpf_alu_string[16]; extern const char *const bpf_class_string[8]; const char *func_id_name(int id); -struct bpf_verifier_env; -typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env, - const char *, ...); -void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, - const struct bpf_insn *insn, bool allow_ptr_leaks); +typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env, + const char *, ...); +typedef const char *(*bpf_insn_revmap_call_t)(void *private_data, + const struct bpf_insn *insn); +typedef const char *(*bpf_insn_print_imm_t)(void *private_data, + const struct bpf_insn *insn, + __u64 full_imm); + +struct bpf_insn_cbs { + bpf_insn_print_t cb_print; + bpf_insn_revmap_call_t cb_call; + bpf_insn_print_imm_t cb_imm; + void *private_data; +}; +void print_bpf_insn(const struct bpf_insn_cbs *cbs, + struct bpf_verifier_env *env, + const struct bpf_insn *insn, + bool allow_ptr_leaks); #endif diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 01aaef1a77c5..5bb5e49ef4c3 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -368,7 +368,45 @@ out: putname(pname); return ret; } -EXPORT_SYMBOL_GPL(bpf_obj_get_user); + +static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) +{ + struct bpf_prog *prog; + int ret = inode_permission(inode, MAY_READ | MAY_WRITE); + if (ret) + return ERR_PTR(ret); + + if (inode->i_op == &bpf_map_iops) + return ERR_PTR(-EINVAL); + if (inode->i_op != &bpf_prog_iops) + return ERR_PTR(-EACCES); + + prog = inode->i_private; + + ret = security_bpf_prog(prog); + if (ret < 0) + return ERR_PTR(ret); + + if (!bpf_prog_get_ok(prog, &type, false)) + return ERR_PTR(-EINVAL); + + return bpf_prog_inc(prog); +} + +struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) +{ + struct bpf_prog *prog; + struct path path; + int ret = kern_path(name, LOOKUP_FOLLOW, &path); + if (ret) + return ERR_PTR(ret); + prog = __get_prog_inode(d_backing_inode(path.dentry), type); + if (!IS_ERR(prog)) + touch_atime(&path); + path_put(&path); + return prog; +} +EXPORT_SYMBOL(bpf_prog_get_type_path); static void bpf_evict_inode(struct inode *inode) { diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 8455b89d1bbf..040d4e0edf3f 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -16,17 +16,22 @@ #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/bug.h> +#include <linux/kdev_t.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/printk.h> +#include <linux/proc_ns.h> #include <linux/rtnetlink.h> +#include <linux/rwsem.h> -/* protected by RTNL */ +/* Protects bpf_prog_offload_devs and offload members of all progs. + * RTNL lock cannot be taken when holding this lock. + */ +static DECLARE_RWSEM(bpf_devs_lock); static LIST_HEAD(bpf_prog_offload_devs); int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) { - struct net *net = current->nsproxy->net_ns; struct bpf_dev_offload *offload; if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && @@ -41,32 +46,40 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) return -ENOMEM; offload->prog = prog; - init_waitqueue_head(&offload->verifier_done); - rtnl_lock(); - offload->netdev = __dev_get_by_index(net, attr->prog_ifindex); - if (!offload->netdev) { - rtnl_unlock(); - kfree(offload); - return -EINVAL; - } + offload->netdev = dev_get_by_index(current->nsproxy->net_ns, + attr->prog_ifindex); + if (!offload->netdev) + goto err_free; + down_write(&bpf_devs_lock); + if (offload->netdev->reg_state != NETREG_REGISTERED) + goto err_unlock; prog->aux->offload = offload; list_add_tail(&offload->offloads, &bpf_prog_offload_devs); - rtnl_unlock(); + dev_put(offload->netdev); + up_write(&bpf_devs_lock); return 0; +err_unlock: + up_write(&bpf_devs_lock); + dev_put(offload->netdev); +err_free: + kfree(offload); + return -EINVAL; } static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, struct netdev_bpf *data) { - struct net_device *netdev = prog->aux->offload->netdev; + struct bpf_dev_offload *offload = prog->aux->offload; + struct net_device *netdev; ASSERT_RTNL(); - if (!netdev) + if (!offload) return -ENODEV; + netdev = offload->netdev; if (!netdev->netdev_ops->ndo_bpf) return -EOPNOTSUPP; @@ -87,62 +100,63 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) if (err) goto exit_unlock; - env->dev_ops = data.verifier.ops; - + env->prog->aux->offload->dev_ops = data.verifier.ops; env->prog->aux->offload->dev_state = true; - env->prog->aux->offload->verifier_running = true; exit_unlock: rtnl_unlock(); return err; } +int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, + int insn_idx, int prev_insn_idx) +{ + struct bpf_dev_offload *offload; + int ret = -ENODEV; + + down_read(&bpf_devs_lock); + offload = env->prog->aux->offload; + if (offload) + ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); + up_read(&bpf_devs_lock); + + return ret; +} + static void __bpf_prog_offload_destroy(struct bpf_prog *prog) { struct bpf_dev_offload *offload = prog->aux->offload; struct netdev_bpf data = {}; - /* Caution - if netdev is destroyed before the program, this function - * will be called twice. - */ - data.offload.prog = prog; - if (offload->verifier_running) - wait_event(offload->verifier_done, !offload->verifier_running); - if (offload->dev_state) WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data)); - offload->dev_state = false; + /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */ + bpf_prog_free_id(prog, true); + list_del_init(&offload->offloads); - offload->netdev = NULL; + kfree(offload); + prog->aux->offload = NULL; } void bpf_prog_offload_destroy(struct bpf_prog *prog) { - struct bpf_dev_offload *offload = prog->aux->offload; - - offload->verifier_running = false; - wake_up(&offload->verifier_done); - rtnl_lock(); - __bpf_prog_offload_destroy(prog); + down_write(&bpf_devs_lock); + if (prog->aux->offload) + __bpf_prog_offload_destroy(prog); + up_write(&bpf_devs_lock); rtnl_unlock(); - - kfree(offload); } static int bpf_prog_offload_translate(struct bpf_prog *prog) { - struct bpf_dev_offload *offload = prog->aux->offload; struct netdev_bpf data = {}; int ret; data.offload.prog = prog; - offload->verifier_running = false; - wake_up(&offload->verifier_done); - rtnl_lock(); ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data); rtnl_unlock(); @@ -164,6 +178,63 @@ int bpf_prog_offload_compile(struct bpf_prog *prog) return bpf_prog_offload_translate(prog); } +struct ns_get_path_bpf_prog_args { + struct bpf_prog *prog; + struct bpf_prog_info *info; +}; + +static struct ns_common *bpf_prog_offload_info_fill_ns(void *private_data) +{ + struct ns_get_path_bpf_prog_args *args = private_data; + struct bpf_prog_aux *aux = args->prog->aux; + struct ns_common *ns; + struct net *net; + + rtnl_lock(); + down_read(&bpf_devs_lock); + + if (aux->offload) { + args->info->ifindex = aux->offload->netdev->ifindex; + net = dev_net(aux->offload->netdev); + get_net(net); + ns = &net->ns; + } else { + args->info->ifindex = 0; + ns = NULL; + } + + up_read(&bpf_devs_lock); + rtnl_unlock(); + + return ns; +} + +int bpf_prog_offload_info_fill(struct bpf_prog_info *info, + struct bpf_prog *prog) +{ + struct ns_get_path_bpf_prog_args args = { + .prog = prog, + .info = info, + }; + struct inode *ns_inode; + struct path ns_path; + void *res; + + res = ns_get_path_cb(&ns_path, bpf_prog_offload_info_fill_ns, &args); + if (IS_ERR(res)) { + if (!info->ifindex) + return -ENODEV; + return PTR_ERR(res); + } + + ns_inode = ns_path.dentry->d_inode; + info->netns_dev = new_encode_dev(ns_inode->i_sb->s_dev); + info->netns_ino = ns_inode->i_ino; + path_put(&ns_path); + + return 0; +} + const struct bpf_prog_ops bpf_offload_prog_ops = { }; @@ -181,11 +252,13 @@ static int bpf_offload_notification(struct notifier_block *notifier, if (netdev->reg_state != NETREG_UNREGISTERING) break; + down_write(&bpf_devs_lock); list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) { if (offload->netdev == netdev) __bpf_prog_offload_destroy(offload->prog); } + up_write(&bpf_devs_lock); break; default: break; diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 5ee2e41893d9..3f662ee23a34 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -96,14 +96,6 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk) return rcu_dereference_sk_user_data(sk); } -/* compute the linear packet data range [data, data_end) for skb when - * sk_skb type programs are in use. - */ -static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) -{ - TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); -} - enum __sk_action { __SK_DROP = 0, __SK_PASS, diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a15bc636cc98..6c63c2222ea8 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -226,9 +226,33 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) return 0; } -static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key) +static int stack_map_get_next_key(struct bpf_map *map, void *key, + void *next_key) { - return -EINVAL; + struct bpf_stack_map *smap = container_of(map, + struct bpf_stack_map, map); + u32 id; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (!key) { + id = 0; + } else { + id = *(u32 *)key; + if (id >= smap->n_buckets || !smap->buckets[id]) + id = 0; + else + id++; + } + + while (id < smap->n_buckets && !smap->buckets[id]) + id++; + + if (id >= smap->n_buckets) + return -ENOENT; + + *(u32 *)next_key = id; + return 0; } static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index e2e1c78ce1dc..2bac0dc8baba 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -905,9 +905,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog) return id > 0 ? 0 : id; } -static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) +void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) { - /* cBPF to eBPF migrations are currently not in the idr store. */ + /* cBPF to eBPF migrations are currently not in the idr store. + * Offloaded programs are removed from the store when their device + * disappears - even if someone grabs an fd to them they are unusable, + * simply waiting for refcnt to drop to be freed. + */ if (!prog->aux->id) return; @@ -917,6 +921,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) __acquire(&prog_idr_lock); idr_remove(&prog_idr, prog->aux->id); + prog->aux->id = 0; if (do_idr_lock) spin_unlock_bh(&prog_idr_lock); @@ -937,10 +942,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) { if (atomic_dec_and_test(&prog->aux->refcnt)) { + int i; + trace_bpf_prog_put_rcu(prog); /* bpf_prog_free_id() must be called first */ bpf_prog_free_id(prog, do_idr_lock); + + for (i = 0; i < prog->aux->func_cnt; i++) + bpf_prog_kallsyms_del(prog->aux->func[i]); bpf_prog_kallsyms_del(prog); + call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); } } @@ -1057,7 +1068,7 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) } EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); -static bool bpf_prog_get_ok(struct bpf_prog *prog, +bool bpf_prog_get_ok(struct bpf_prog *prog, enum bpf_prog_type *attach_type, bool attach_drv) { /* not an attachment, just a refcount inc, always allow */ @@ -1151,6 +1162,8 @@ static int bpf_prog_load(union bpf_attr *attr) if (!prog) return -ENOMEM; + prog->aux->offload_requested = !!attr->prog_ifindex; + err = security_bpf_prog_alloc(prog->aux); if (err) goto free_prog_nouncharge; @@ -1172,7 +1185,7 @@ static int bpf_prog_load(union bpf_attr *attr) atomic_set(&prog->aux->refcnt, 1); prog->gpl_compatible = is_gpl ? 1 : 0; - if (attr->prog_ifindex) { + if (bpf_prog_is_dev_bound(prog->aux)) { err = bpf_prog_offload_init(prog, attr); if (err) goto free_prog; @@ -1552,6 +1565,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr) return fd; } +static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, + unsigned long addr) +{ + int i; + + for (i = 0; i < prog->aux->used_map_cnt; i++) + if (prog->aux->used_maps[i] == (void *)addr) + return prog->aux->used_maps[i]; + return NULL; +} + +static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog) +{ + const struct bpf_map *map; + struct bpf_insn *insns; + u64 imm; + int i; + + insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), + GFP_USER); + if (!insns) + return insns; + + for (i = 0; i < prog->len; i++) { + if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { + insns[i].code = BPF_JMP | BPF_CALL; + insns[i].imm = BPF_FUNC_tail_call; + /* fall-through */ + } + if (insns[i].code == (BPF_JMP | BPF_CALL) || + insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { + if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) + insns[i].code = BPF_JMP | BPF_CALL; + if (!bpf_dump_raw_ok()) + insns[i].imm = 0; + continue; + } + + if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) + continue; + + imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; + map = bpf_map_from_imm(prog, imm); + if (map) { + insns[i].src_reg = BPF_PSEUDO_MAP_FD; + insns[i].imm = map->id; + insns[i + 1].imm = 0; + continue; + } + + if (!bpf_dump_raw_ok() && + imm == (unsigned long)prog->aux) { + insns[i].imm = 0; + insns[i + 1].imm = 0; + continue; + } + } + + return insns; +} + static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, const union bpf_attr *attr, union bpf_attr __user *uattr) @@ -1602,21 +1676,43 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, ulen = info.jited_prog_len; info.jited_prog_len = prog->jited_len; if (info.jited_prog_len && ulen) { - uinsns = u64_to_user_ptr(info.jited_prog_insns); - ulen = min_t(u32, info.jited_prog_len, ulen); - if (copy_to_user(uinsns, prog->bpf_func, ulen)) - return -EFAULT; + if (bpf_dump_raw_ok()) { + uinsns = u64_to_user_ptr(info.jited_prog_insns); + ulen = min_t(u32, info.jited_prog_len, ulen); + if (copy_to_user(uinsns, prog->bpf_func, ulen)) + return -EFAULT; + } else { + info.jited_prog_insns = 0; + } } ulen = info.xlated_prog_len; info.xlated_prog_len = bpf_prog_insn_size(prog); if (info.xlated_prog_len && ulen) { + struct bpf_insn *insns_sanitized; + bool fault; + + if (prog->blinded && !bpf_dump_raw_ok()) { + info.xlated_prog_insns = 0; + goto done; + } + insns_sanitized = bpf_insn_prepare_dump(prog); + if (!insns_sanitized) + return -ENOMEM; uinsns = u64_to_user_ptr(info.xlated_prog_insns); ulen = min_t(u32, info.xlated_prog_len, ulen); - if (copy_to_user(uinsns, prog->insnsi, ulen)) + fault = copy_to_user(uinsns, insns_sanitized, ulen); + kfree(insns_sanitized); + if (fault) return -EFAULT; } + if (bpf_prog_is_dev_bound(prog->aux)) { + err = bpf_prog_offload_info_fill(&info, prog); + if (err) + return err; + } + done: if (copy_to_user(uinfo, &info, info_len) || put_user(info_len, &uattr->info.info_len)) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 1cd2c2d28fc3..a2b211262c25 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -772,7 +772,7 @@ static int check_subprogs(struct bpf_verifier_env *env) return -EPERM; } if (bpf_prog_is_dev_bound(env->prog->aux)) { - verbose(env, "funcation calls in offloaded programs are not supported yet\n"); + verbose(env, "function calls in offloaded programs are not supported yet\n"); return -EINVAL; } ret = add_subprog(env, i + insn[i].imm + 1); @@ -823,6 +823,7 @@ next: return 0; } +static struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, @@ -867,7 +868,7 @@ bug: verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); verbose(env, "regno %d parent frame %d current frame %d\n", regno, parent->curframe, state->curframe); - return 0; + return NULL; } static int mark_reg_read(struct bpf_verifier_env *env, @@ -1434,33 +1435,80 @@ static int update_stack_depth(struct bpf_verifier_env *env, const struct bpf_func_state *func, int off) { - u16 stack = env->subprog_stack_depth[func->subprogno], total = 0; - struct bpf_verifier_state *cur = env->cur_state; - int i; + u16 stack = env->subprog_stack_depth[func->subprogno]; if (stack >= -off) return 0; /* update known max for given subprogram */ env->subprog_stack_depth[func->subprogno] = -off; + return 0; +} - /* compute the total for current call chain */ - for (i = 0; i <= cur->curframe; i++) { - u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno]; - - /* round up to 32-bytes, since this is granularity - * of interpreter stack sizes - */ - depth = round_up(depth, 32); - total += depth; - } +/* starting from main bpf function walk all instructions of the function + * and recursively walk all callees that given function can call. + * Ignore jump and exit insns. + * Since recursion is prevented by check_cfg() this algorithm + * only needs a local stack of MAX_CALL_FRAMES to remember callsites + */ +static int check_max_stack_depth(struct bpf_verifier_env *env) +{ + int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end; + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; + int ret_insn[MAX_CALL_FRAMES]; + int ret_prog[MAX_CALL_FRAMES]; - if (total > MAX_BPF_STACK) { +process_func: + /* round up to 32-bytes, since this is granularity + * of interpreter stack size + */ + depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); + if (depth > MAX_BPF_STACK) { verbose(env, "combined stack size of %d calls is %d. Too large\n", - cur->curframe, total); + frame + 1, depth); return -EACCES; } - return 0; +continue_func: + if (env->subprog_cnt == subprog) + subprog_end = insn_cnt; + else + subprog_end = env->subprog_starts[subprog]; + for (; i < subprog_end; i++) { + if (insn[i].code != (BPF_JMP | BPF_CALL)) + continue; + if (insn[i].src_reg != BPF_PSEUDO_CALL) + continue; + /* remember insn and function to return to */ + ret_insn[frame] = i + 1; + ret_prog[frame] = subprog; + + /* find the callee */ + i = i + insn[i].imm + 1; + subprog = find_subprog(env, i); + if (subprog < 0) { + WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", + i); + return -EFAULT; + } + subprog++; + frame++; + if (frame >= MAX_CALL_FRAMES) { + WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); + return -EFAULT; + } + goto process_func; + } + /* end of for() loop means the last insn of the 'subprog' + * was reached. Doesn't matter whether it was JA or EXIT + */ + if (frame == 0) + return 0; + depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); + frame--; + i = ret_insn[frame]; + subprog = ret_prog[frame]; + goto continue_func; } static int get_callee_stack_depth(struct bpf_verifier_env *env, @@ -2105,9 +2153,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_func_state *caller, *callee; int i, subprog, target_insn; - if (state->curframe >= MAX_CALL_FRAMES) { + if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", - state->curframe); + state->curframe + 2); return -E2BIG; } @@ -4155,7 +4203,7 @@ static bool stacksafe(struct bpf_func_state *old, if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) /* explored state didn't use this */ - return true; + continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; @@ -4390,15 +4438,6 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return 0; } -static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, - int insn_idx, int prev_insn_idx) -{ - if (env->dev_ops && env->dev_ops->insn_hook) - return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); - - return 0; -} - static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; @@ -4475,14 +4514,20 @@ static int do_check(struct bpf_verifier_env *env) } if (env->log.level) { + const struct bpf_insn_cbs cbs = { + .cb_print = verbose, + }; + verbose(env, "%d: ", insn_idx); - print_bpf_insn(verbose, env, insn, - env->allow_ptr_leaks); + print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks); } - err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); - if (err) - return err; + if (bpf_prog_is_dev_bound(env->prog->aux)) { + err = bpf_prog_offload_verify_insn(env, insn_idx, + prev_insn_idx); + if (err) + return err; + } regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; @@ -5065,14 +5110,14 @@ static int jit_subprogs(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog, **func, *tmp; int i, j, subprog_start, subprog_end = 0, len, subprog; - struct bpf_insn *insn = prog->insnsi; + struct bpf_insn *insn; void *old_bpf_func; int err = -ENOMEM; if (env->subprog_cnt == 0) return 0; - for (i = 0; i < prog->len; i++, insn++) { + for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; @@ -5111,7 +5156,10 @@ static int jit_subprogs(struct bpf_verifier_env *env) goto out_free; memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); + func[i]->type = prog->type; func[i]->len = len; + if (bpf_prog_calc_tag(func[i])) + goto out_free; func[i]->is_func = 1; /* Use bpf_prog_F_tag to indicate functions in stack traces. * Long term would need debug info to populate names @@ -5161,6 +5209,25 @@ static int jit_subprogs(struct bpf_verifier_env *env) bpf_prog_lock_ro(func[i]); bpf_prog_kallsyms_add(func[i]); } + + /* Last step: make now unused interpreter insns from main + * prog consistent for later dump requests, so they can + * later look the same as if they were interpreted only. + */ + for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { + unsigned long addr; + + if (insn->code != (BPF_JMP | BPF_CALL) || + insn->src_reg != BPF_PSEUDO_CALL) + continue; + insn->off = env->insn_aux_data[i].call_imm; + subprog = find_subprog(env, i + insn->off + 1); + addr = (unsigned long)func[subprog + 1]->bpf_func; + addr &= PAGE_MASK; + insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) + addr - __bpf_call_base; + } + prog->jited = 1; prog->bpf_func = func[0]->bpf_func; prog->aux->func = func; @@ -5390,7 +5457,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; - if (env->prog->aux->offload) { + if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto err_unlock; @@ -5427,6 +5494,9 @@ skip_full_check: sanitize_dead_code(env); if (ret == 0) + ret = check_max_stack_depth(env); + + if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 024085daab1a..a2c05d2476ac 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -123,7 +123,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) */ do { css_task_iter_start(&from->self, 0, &it); - task = css_task_iter_next(&it); + + do { + task = css_task_iter_next(&it); + } while (task && (task->flags & PF_EXITING)); + if (task) get_task_struct(task); css_task_iter_end(&it); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 0b1ffe147f24..2cf06c274e4c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -1397,7 +1397,7 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name, cft->name); else - strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX); + strlcpy(buf, cft->name, CGROUP_FILE_NAME_MAX); return buf; } @@ -1864,9 +1864,9 @@ void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts) root->flags = opts->flags; if (opts->release_agent) - strcpy(root->release_agent_path, opts->release_agent); + strlcpy(root->release_agent_path, opts->release_agent, PATH_MAX); if (opts->name) - strcpy(root->name, opts->name); + strlcpy(root->name, opts->name, MAX_CGROUP_ROOT_NAMELEN); if (opts->cpuset_clone_children) set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags); } @@ -4125,26 +4125,24 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it) static void css_task_iter_advance(struct css_task_iter *it) { - struct list_head *l = it->task_pos; + struct list_head *next; lockdep_assert_held(&css_set_lock); - WARN_ON_ONCE(!l); - repeat: /* * Advance iterator to find next entry. cset->tasks is consumed * first and then ->mg_tasks. After ->mg_tasks, we move onto the * next cset. */ - l = l->next; + next = it->task_pos->next; - if (l == it->tasks_head) - l = it->mg_tasks_head->next; + if (next == it->tasks_head) + next = it->mg_tasks_head->next; - if (l == it->mg_tasks_head) + if (next == it->mg_tasks_head) css_task_iter_advance_css_set(it); else - it->task_pos = l; + it->task_pos = next; /* if PROCS, skip over tasks which aren't group leaders */ if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos && diff --git a/kernel/cpu.c b/kernel/cpu.c index 41376c3ac93b..53f7dc65f9a3 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -80,19 +80,19 @@ static struct lockdep_map cpuhp_state_down_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); -static void inline cpuhp_lock_acquire(bool bringup) +static inline void cpuhp_lock_acquire(bool bringup) { lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); } -static void inline cpuhp_lock_release(bool bringup) +static inline void cpuhp_lock_release(bool bringup) { lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); } #else -static void inline cpuhp_lock_acquire(bool bringup) { } -static void inline cpuhp_lock_release(bool bringup) { } +static inline void cpuhp_lock_acquire(bool bringup) { } +static inline void cpuhp_lock_release(bool bringup) { } #endif @@ -1277,9 +1277,9 @@ static struct cpuhp_step cpuhp_bp_states[] = { * before blk_mq_queue_reinit_notify() from notify_dead(), * otherwise a RCU stall occurs. */ - [CPUHP_TIMERS_DEAD] = { + [CPUHP_TIMERS_PREPARE] = { .name = "timers:dead", - .startup.single = NULL, + .startup.single = timers_prepare_cpu, .teardown.single = timers_dead_cpu, }, /* Kicks the plugged cpu into life */ diff --git a/kernel/exit.c b/kernel/exit.c index df0c91d5606c..995453d9fb55 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1763,3 +1763,4 @@ __weak void abort(void) /* if that doesn't kill us, halt */ panic("Oops failed to kill thread"); } +EXPORT_SYMBOL(abort); diff --git a/kernel/fork.c b/kernel/fork.c index 432eadf6b58c..2295fc69717f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -721,8 +721,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, goto out; } /* a new mm has just been created */ - arch_dup_mmap(oldmm, mm); - retval = 0; + retval = arch_dup_mmap(oldmm, mm); out: up_write(&mm->mmap_sem); flush_tlb_mm(oldmm); diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index 17f05ef8f575..e4d3819a91cc 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h @@ -12,6 +12,11 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) { + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); + + if (!__ratelimit(&ratelimit)) + return; + printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); printk("->handle_irq(): %p, ", desc->handle_irq); diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 7f608ac39653..acfaaef8672a 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c @@ -113,6 +113,7 @@ static const struct irq_bit_descr irqdata_states[] = { BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING), BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED), BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN), + BIT_MASK_DESCR(IRQD_CAN_RESERVE), BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU), diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index c26c5bb6b491..508c03dfef25 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c @@ -364,10 +364,11 @@ irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq) EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip); /* - * Separate lockdep class for interrupt chip which can nest irq_desc - * lock. + * Separate lockdep classes for interrupt chip which can nest irq_desc + * lock and request mutex. */ static struct lock_class_key irq_nested_lock_class; +static struct lock_class_key irq_nested_request_class; /* * irq_map_generic_chip - Map a generic chip for an irq domain @@ -409,7 +410,8 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, set_bit(idx, &gc->installed); if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK) - irq_set_lockdep_class(virq, &irq_nested_lock_class); + irq_set_lockdep_class(virq, &irq_nested_lock_class, + &irq_nested_request_class); if (chip->irq_calc_mask) chip->irq_calc_mask(data); @@ -479,7 +481,8 @@ void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, continue; if (flags & IRQ_GC_INIT_NESTED_LOCK) - irq_set_lockdep_class(i, &irq_nested_lock_class); + irq_set_lockdep_class(i, &irq_nested_lock_class, + &irq_nested_request_class); if (!(flags & IRQ_GC_NO_MASK)) { struct irq_data *d = irq_get_irq_data(i); diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 07d08ca701ec..ab19371eab9b 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -440,7 +440,7 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) #endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) -static inline int irq_domain_activate_irq(struct irq_data *data, bool early) +static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) { irqd_set_activated(data); return 0; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 4f4f60015e8a..62068ad46930 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -1693,7 +1693,7 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data) } } -static int __irq_domain_activate_irq(struct irq_data *irqd, bool early) +static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve) { int ret = 0; @@ -1702,9 +1702,9 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early) if (irqd->parent_data) ret = __irq_domain_activate_irq(irqd->parent_data, - early); + reserve); if (!ret && domain->ops->activate) { - ret = domain->ops->activate(domain, irqd, early); + ret = domain->ops->activate(domain, irqd, reserve); /* Rollback in case of error */ if (ret && irqd->parent_data) __irq_domain_deactivate_irq(irqd->parent_data); @@ -1716,17 +1716,18 @@ static int __irq_domain_activate_irq(struct irq_data *irqd, bool early) /** * irq_domain_activate_irq - Call domain_ops->activate recursively to activate * interrupt - * @irq_data: outermost irq_data associated with interrupt + * @irq_data: Outermost irq_data associated with interrupt + * @reserve: If set only reserve an interrupt vector instead of assigning one * * This is the second step to call domain_ops->activate to program interrupt * controllers, so the interrupt could actually get delivered. */ -int irq_domain_activate_irq(struct irq_data *irq_data, bool early) +int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve) { int ret = 0; if (!irqd_is_activated(irq_data)) - ret = __irq_domain_activate_irq(irq_data, early); + ret = __irq_domain_activate_irq(irq_data, reserve); if (!ret) irqd_set_activated(irq_data); return ret; diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index edb987b2c58d..2f3c4f5382cc 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -339,6 +339,40 @@ int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev, return ret; } +/* + * Carefully check whether the device can use reservation mode. If + * reservation mode is enabled then the early activation will assign a + * dummy vector to the device. If the PCI/MSI device does not support + * masking of the entry then this can result in spurious interrupts when + * the device driver is not absolutely careful. But even then a malfunction + * of the hardware could result in a spurious interrupt on the dummy vector + * and render the device unusable. If the entry can be masked then the core + * logic will prevent the spurious interrupt and reservation mode can be + * used. For now reservation mode is restricted to PCI/MSI. + */ +static bool msi_check_reservation_mode(struct irq_domain *domain, + struct msi_domain_info *info, + struct device *dev) +{ + struct msi_desc *desc; + + if (domain->bus_token != DOMAIN_BUS_PCI_MSI) + return false; + + if (!(info->flags & MSI_FLAG_MUST_REACTIVATE)) + return false; + + if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask) + return false; + + /* + * Checking the first MSI descriptor is sufficient. MSIX supports + * masking and MSI does so when the maskbit is set. + */ + desc = first_msi_entry(dev); + return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit; +} + /** * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain * @domain: The domain to allocate from @@ -353,9 +387,11 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, { struct msi_domain_info *info = domain->host_data; struct msi_domain_ops *ops = info->ops; - msi_alloc_info_t arg; + struct irq_data *irq_data; struct msi_desc *desc; + msi_alloc_info_t arg; int i, ret, virq; + bool can_reserve; ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); if (ret) @@ -385,6 +421,8 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, if (ops->msi_finish) ops->msi_finish(&arg, 0); + can_reserve = msi_check_reservation_mode(domain, info, dev); + for_each_msi_entry(desc, dev) { virq = desc->irq; if (desc->nvec_used == 1) @@ -397,15 +435,25 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, * the MSI entries before the PCI layer enables MSI in the * card. Otherwise the card latches a random msi message. */ - if (info->flags & MSI_FLAG_ACTIVATE_EARLY) { - struct irq_data *irq_data; + if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY)) + continue; + irq_data = irq_domain_get_irq_data(domain, desc->irq); + if (!can_reserve) + irqd_clr_can_reserve(irq_data); + ret = irq_domain_activate_irq(irq_data, can_reserve); + if (ret) + goto cleanup; + } + + /* + * If these interrupts use reservation mode, clear the activated bit + * so request_irq() will assign the final vector. + */ + if (can_reserve) { + for_each_msi_entry(desc, dev) { irq_data = irq_domain_get_irq_data(domain, desc->irq); - ret = irq_domain_activate_irq(irq_data, true); - if (ret) - goto cleanup; - if (info->flags & MSI_FLAG_MUST_REACTIVATE) - irqd_clr_activated(irq_data); + irqd_clr_activated(irq_data); } } return 0; diff --git a/kernel/pid.c b/kernel/pid.c index b13b624e2c49..1e8bb6550ec4 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -193,10 +193,8 @@ struct pid *alloc_pid(struct pid_namespace *ns) } if (unlikely(is_child_reaper(pid))) { - if (pid_ns_prepare_proc(ns)) { - disable_pid_allocation(ns); + if (pid_ns_prepare_proc(ns)) goto out_free; - } } get_pid_ns(ns); @@ -226,6 +224,10 @@ out_free: while (++i <= ns->level) idr_remove(&ns->idr, (pid->numbers + i)->nr); + /* On failure to allocate the first pid, reset the state */ + if (ns->pid_allocated == PIDNS_ADDING) + idr_set_cursor(&ns->idr, 0); + spin_unlock_irq(&pidmap_lock); kmem_cache_free(ns->pid_cachep, pid); diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2f52ec0f1539..d6717a3331a1 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -244,7 +244,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util, #ifdef CONFIG_NO_HZ_COMMON static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { - unsigned long idle_calls = tick_nohz_get_idle_calls(); + unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); bool ret = idle_calls == sg_cpu->saved_idle_calls; sg_cpu->saved_idle_calls = idle_calls; diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index e776fc8cc1df..f6b5f19223d6 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig @@ -95,6 +95,7 @@ config NO_HZ_FULL select RCU_NOCB_CPU select VIRT_CPU_ACCOUNTING_GEN select IRQ_WORK + select CPU_ISOLATION help Adaptively try to shutdown the tick whenever possible, even when the CPU is running tasks. Typically this requires running a single diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 99578f06c8d4..f7cc7abfcf25 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -650,6 +650,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) ts->next_tick = 0; } +static inline bool local_timer_softirq_pending(void) +{ + return local_softirq_pending() & TIMER_SOFTIRQ; +} + static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, ktime_t now, int cpu) { @@ -666,8 +671,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, } while (read_seqretry(&jiffies_lock, seq)); ts->last_jiffies = basejiff; - if (rcu_needs_cpu(basemono, &next_rcu) || - arch_needs_cpu() || irq_work_needs_cpu()) { + /* + * Keep the periodic tick, when RCU, architecture or irq_work + * requests it. + * Aside of that check whether the local timer softirq is + * pending. If so its a bad idea to call get_next_timer_interrupt() + * because there is an already expired timer, so it will request + * immeditate expiry, which rearms the hardware timer with a + * minimal delta which brings us back to this place + * immediately. Lather, rinse and repeat... + */ + if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || + irq_work_needs_cpu() || local_timer_softirq_pending()) { next_tick = basemono + TICK_NSEC; } else { /* @@ -986,6 +1001,19 @@ ktime_t tick_nohz_get_sleep_length(void) } /** + * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value + * for a particular CPU. + * + * Called from the schedutil frequency scaling governor in scheduler context. + */ +unsigned long tick_nohz_get_idle_calls_cpu(int cpu) +{ + struct tick_sched *ts = tick_get_tick_sched(cpu); + + return ts->idle_calls; +} + +/** * tick_nohz_get_idle_calls - return the current idle calls counter value * * Called from the schedutil frequency scaling governor in scheduler context. diff --git a/kernel/time/timer.c b/kernel/time/timer.c index ffebcf878fba..89a9e1b4264a 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -823,11 +823,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu) struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu); /* - * If the timer is deferrable and nohz is active then we need to use - * the deferrable base. + * If the timer is deferrable and NO_HZ_COMMON is set then we need + * to use the deferrable base. */ - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && - (tflags & TIMER_DEFERRABLE)) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu); return base; } @@ -837,11 +836,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags) struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); /* - * If the timer is deferrable and nohz is active then we need to use - * the deferrable base. + * If the timer is deferrable and NO_HZ_COMMON is set then we need + * to use the deferrable base. */ - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active && - (tflags & TIMER_DEFERRABLE)) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE)) base = this_cpu_ptr(&timer_bases[BASE_DEF]); return base; } @@ -1009,8 +1007,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option if (!ret && (options & MOD_TIMER_PENDING_ONLY)) goto out_unlock; - debug_activate(timer, expires); - new_base = get_target_base(base, timer->flags); if (base != new_base) { @@ -1034,6 +1030,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option } } + debug_activate(timer, expires); + timer->expires = expires; /* * If 'idx' was calculated above and the base time did not advance @@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) base->must_forward_clk = false; __run_timers(base); - if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) + if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); } @@ -1855,6 +1853,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h } } +int timers_prepare_cpu(unsigned int cpu) +{ + struct timer_base *base; + int b; + + for (b = 0; b < NR_BASES; b++) { + base = per_cpu_ptr(&timer_bases[b], cpu); + base->clk = jiffies; + base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA; + base->is_idle = false; + base->must_forward_clk = true; + } + return 0; +} + int timers_dead_cpu(unsigned int cpu) { struct timer_base *old_base; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index c87766c1c204..9ab18995ff1e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); /* Missed count stored at end */ #define RB_MISSED_STORED (1 << 30) +#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED) + struct buffer_data_page { u64 time_stamp; /* page time stamp */ local_t commit; /* write committed index */ @@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage) */ size_t ring_buffer_page_len(void *page) { - return local_read(&((struct buffer_data_page *)page)->commit) + struct buffer_data_page *bpage = page; + + return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS) + BUF_PAGE_HDR_SIZE; } @@ -4400,8 +4404,13 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct buffer_data_page *bpage = data; + struct page *page = virt_to_page(bpage); unsigned long flags; + /* If the page is still in use someplace else, we can't reuse it */ + if (page_ref_count(page) > 1) + goto out; + local_irq_save(flags); arch_spin_lock(&cpu_buffer->lock); @@ -4413,6 +4422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); + out: free_page((unsigned long)bpage); } EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 59518b8126d0..2a8d8a294345 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6769,7 +6769,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; - int entries, size, i; + int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE @@ -6823,14 +6823,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, break; } - /* - * zero out any left over data, this is going to - * user land. - */ - size = ring_buffer_page_len(ref->page); - if (size < PAGE_SIZE) - memset(ref->page + size, 0, PAGE_SIZE - size); - page = virt_to_page(ref->page); spd.pages[i] = page; @@ -7588,6 +7580,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); + buf->buffer = NULL; return -ENOMEM; } @@ -7611,7 +7604,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size) allocate_snapshot ? size : 1); if (WARN_ON(ret)) { ring_buffer_free(tr->trace_buffer.buffer); + tr->trace_buffer.buffer = NULL; free_percpu(tr->trace_buffer.data); + tr->trace_buffer.data = NULL; return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index c3e84edc47c9..2615074d3de5 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -346,7 +346,8 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj, static void zap_modalias_env(struct kobj_uevent_env *env) { static const char modalias_prefix[] = "MODALIAS="; - int i; + size_t len; + int i, j; for (i = 0; i < env->envp_idx;) { if (strncmp(env->envp[i], modalias_prefix, @@ -355,11 +356,18 @@ static void zap_modalias_env(struct kobj_uevent_env *env) continue; } - if (i != env->envp_idx - 1) - memmove(&env->envp[i], &env->envp[i + 1], - sizeof(env->envp[i]) * env->envp_idx - 1); + len = strlen(env->envp[i]) + 1; + + if (i != env->envp_idx - 1) { + memmove(env->envp[i], env->envp[i + 1], + env->buflen - len); + + for (j = i; j < env->envp_idx - 1; j++) + env->envp[j] = env->envp[j + 1] - len; + } env->envp_idx--; + env->buflen -= len; } } diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index 57fd45ab7af1..08c60d10747f 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -671,7 +671,23 @@ do { \ ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 -#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) +#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 +/* + * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C + * code below, so we special case MIPS64r6 until the compiler can do better. + */ +#define umul_ppmm(w1, w0, u, v) \ +do { \ + __asm__ ("dmulu %0,%1,%2" \ + : "=d" ((UDItype)(w0)) \ + : "d" ((UDItype)(u)), \ + "d" ((UDItype)(v))); \ + __asm__ ("dmuhu %0,%1,%2" \ + : "=d" ((UDItype)(w1)) \ + : "d" ((UDItype)(u)), \ + "d" ((UDItype)(v))); \ +} while (0) +#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) #define umul_ppmm(w1, w0, u, v) \ do { \ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ diff --git a/lib/timerqueue.c b/lib/timerqueue.c index 4a720ed4fdaf..0d54bcbc8170 100644 --- a/lib/timerqueue.c +++ b/lib/timerqueue.c @@ -33,8 +33,9 @@ * @head: head of timerqueue * @node: timer node to be added * - * Adds the timer node to the timerqueue, sorted by the - * node's expires value. + * Adds the timer node to the timerqueue, sorted by the node's expires + * value. Returns true if the newly added timer is the first expiring timer in + * the queue. */ bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) { @@ -70,7 +71,8 @@ EXPORT_SYMBOL_GPL(timerqueue_add); * @head: head of timerqueue * @node: timer node to be removed * - * Removes the timer node from the timerqueue. + * Removes the timer node from the timerqueue. Returns true if the queue is + * not empty after the remove. */ bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) { diff --git a/mm/debug.c b/mm/debug.c index d947f3e03b0d..56e2d9125ea5 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -50,7 +50,7 @@ void __dump_page(struct page *page, const char *reason) */ int mapcount = PageSlab(page) ? 0 : page_mapcount(page); - pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx", + pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx", page, page_ref_count(page), mapcount, page->mapping, page_to_pgoff(page)); if (PageCompound(page)) @@ -69,7 +69,7 @@ void __dump_page(struct page *page, const char *reason) #ifdef CONFIG_MEMCG if (page->mem_cgroup) - pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); + pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup); #endif } @@ -84,10 +84,10 @@ EXPORT_SYMBOL(dump_page); void dump_vma(const struct vm_area_struct *vma) { - pr_emerg("vma %p start %p end %p\n" - "next %p prev %p mm %p\n" - "prot %lx anon_vma %p vm_ops %p\n" - "pgoff %lx file %p private_data %p\n" + pr_emerg("vma %px start %px end %px\n" + "next %px prev %px mm %px\n" + "prot %lx anon_vma %px vm_ops %px\n" + "pgoff %lx file %px private_data %px\n" "flags: %#lx(%pGv)\n", vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, vma->vm_prev, vma->vm_mm, @@ -100,27 +100,27 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n" + pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" #ifdef CONFIG_MMU - "get_unmapped_area %p\n" + "get_unmapped_area %px\n" #endif "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" - "pgd %p mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" + "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" "pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n" "start_code %lx end_code %lx start_data %lx end_data %lx\n" "start_brk %lx brk %lx start_stack %lx\n" "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" - "binfmt %p flags %lx core_state %p\n" + "binfmt %px flags %lx core_state %px\n" #ifdef CONFIG_AIO - "ioctx_table %p\n" + "ioctx_table %px\n" #endif #ifdef CONFIG_MEMCG - "owner %p " + "owner %px " #endif - "exe_file %p\n" + "exe_file %px\n" #ifdef CONFIG_MMU_NOTIFIER - "mmu_notifier_mm %p\n" + "mmu_notifier_mm %px\n" #endif #ifdef CONFIG_NUMA_BALANCING "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" diff --git a/mm/mprotect.c b/mm/mprotect.c index ec39f730a0bf..58b629bb70de 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -166,7 +166,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, next = pmd_addr_end(addr, end); if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd) && pmd_none_or_clear_bad(pmd)) - continue; + goto next; /* invoke the mmu notifier if the pmd is populated */ if (!mni_start) { @@ -188,7 +188,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, } /* huge pmd was handled */ - continue; + goto next; } } /* fall through, the trans huge pmd just split */ @@ -196,6 +196,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, this_pages = change_pte_range(vma, pmd, addr, next, newprot, dirty_accountable, prot_numa); pages += this_pages; +next: + cond_resched(); } while (pmd++, addr = next, addr != end); if (mni_start) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7e5e775e97f4..76c9688b6a0a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6260,6 +6260,8 @@ void __paginginit zero_resv_unavail(void) pgcnt = 0; for_each_resv_unavail_range(i, &start, &end) { for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) { + if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) + continue; mm_zero_struct_page(pfn_to_page(pfn)); pgcnt++; } diff --git a/mm/sparse.c b/mm/sparse.c index 7a5dacaa06e3..2609aba121e8 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -211,7 +211,7 @@ void __init memory_present(int nid, unsigned long start, unsigned long end) if (unlikely(!mem_section)) { unsigned long size, align; - size = sizeof(struct mem_section) * NR_SECTION_ROOTS; + size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); mem_section = memblock_virt_alloc(size, align); } diff --git a/mm/vmscan.c b/mm/vmscan.c index c02c850ea349..47d5ced51f2d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -297,10 +297,13 @@ EXPORT_SYMBOL(register_shrinker); */ void unregister_shrinker(struct shrinker *shrinker) { + if (!shrinker->nr_deferred) + return; down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; } EXPORT_SYMBOL(unregister_shrinker); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 685049a9048d..683c0651098c 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -53,6 +53,7 @@ #include <linux/mount.h> #include <linux/migrate.h> #include <linux/pagemap.h> +#include <linux/fs.h> #define ZSPAGE_MAGIC 0x58 diff --git a/net/Kconfig b/net/Kconfig index 9dba2715919d..37ec8e67af57 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -182,6 +182,7 @@ config BRIDGE_NETFILTER depends on BRIDGE depends on NETFILTER && INET depends on NETFILTER_ADVANCED + select NETFILTER_FAMILY_BRIDGE default m ---help--- Enabling this option will let arptables resp. iptables see bridged @@ -336,23 +337,6 @@ config NET_PKTGEN To compile this code as a module, choose M here: the module will be called pktgen. -config NET_TCPPROBE - tristate "TCP connection probing" - depends on INET && PROC_FS && KPROBES - ---help--- - This module allows for capturing the changes to TCP connection - state in response to incoming packets. It is used for debugging - TCP congestion avoidance modules. If you don't understand - what was just said, you don't need it: say N. - - Documentation on how to use TCP connection probing can be found - at: - - http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe - - To compile this code as a module, choose M here: the - module will be called tcp_probe. - config NET_DROP_MONITOR tristate "Network packet drop alerting service" depends on INET && TRACEPOINTS diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index c2eea1b8737a..27f1d4f2114a 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -991,7 +991,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, unsigned int i; int ret; - e = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]); + e = rcu_dereference(net->nf.hooks_bridge[hook]); if (!e) return okfn(net, sk, skb); diff --git a/net/bridge/netfilter/Kconfig b/net/bridge/netfilter/Kconfig index e7ef1a1ef3a6..225d1668dfdd 100644 --- a/net/bridge/netfilter/Kconfig +++ b/net/bridge/netfilter/Kconfig @@ -4,6 +4,7 @@ # menuconfig NF_TABLES_BRIDGE depends on BRIDGE && NETFILTER && NF_TABLES + select NETFILTER_FAMILY_BRIDGE tristate "Ethernet Bridge nf_tables support" if NF_TABLES_BRIDGE @@ -29,6 +30,7 @@ endif # NF_TABLES_BRIDGE menuconfig BRIDGE_NF_EBTABLES tristate "Ethernet Bridge tables (ebtables) support" depends on BRIDGE && NETFILTER && NETFILTER_XTABLES + select NETFILTER_FAMILY_BRIDGE help ebtables is a general, extensible frame/packet identification framework. Say 'Y' or 'M' here if you want to do Ethernet diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index 97afdc0744e6..86774b5c3b73 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c @@ -25,15 +25,17 @@ nft_do_chain_bridge(void *priv, { struct nft_pktinfo pkt; + nft_set_pktinfo(&pkt, skb, state); + switch (eth_hdr(skb)->h_proto) { case htons(ETH_P_IP): - nft_set_pktinfo_ipv4_validate(&pkt, skb, state); + nft_set_pktinfo_ipv4_validate(&pkt, skb); break; case htons(ETH_P_IPV6): - nft_set_pktinfo_ipv6_validate(&pkt, skb, state); + nft_set_pktinfo_ipv6_validate(&pkt, skb); break; default: - nft_set_pktinfo_unspec(&pkt, skb, state); + nft_set_pktinfo_unspec(&pkt, skb); break; } @@ -44,14 +46,6 @@ static struct nft_af_info nft_af_bridge __read_mostly = { .family = NFPROTO_BRIDGE, .nhooks = NF_BR_NUMHOOKS, .owner = THIS_MODULE, - .nops = 1, - .hooks = { - [NF_BR_PRE_ROUTING] = nft_do_chain_bridge, - [NF_BR_LOCAL_IN] = nft_do_chain_bridge, - [NF_BR_FORWARD] = nft_do_chain_bridge, - [NF_BR_LOCAL_OUT] = nft_do_chain_bridge, - [NF_BR_POST_ROUTING] = nft_do_chain_bridge, - }, }; static int nf_tables_bridge_init_net(struct net *net) @@ -92,67 +86,32 @@ static const struct nf_chain_type filter_bridge = { (1 << NF_BR_FORWARD) | (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_POST_ROUTING), -}; - -static void nf_br_saveroute(const struct sk_buff *skb, - struct nf_queue_entry *entry) -{ -} - -static int nf_br_reroute(struct net *net, struct sk_buff *skb, - const struct nf_queue_entry *entry) -{ - return 0; -} - -static __sum16 nf_br_checksum(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, u_int8_t protocol) -{ - return 0; -} - -static __sum16 nf_br_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol) -{ - return 0; -} - -static int nf_br_route(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict __always_unused) -{ - return 0; -} - -static const struct nf_afinfo nf_br_afinfo = { - .family = AF_BRIDGE, - .checksum = nf_br_checksum, - .checksum_partial = nf_br_checksum_partial, - .route = nf_br_route, - .saveroute = nf_br_saveroute, - .reroute = nf_br_reroute, - .route_key_size = 0, + .hooks = { + [NF_BR_PRE_ROUTING] = nft_do_chain_bridge, + [NF_BR_LOCAL_IN] = nft_do_chain_bridge, + [NF_BR_FORWARD] = nft_do_chain_bridge, + [NF_BR_LOCAL_OUT] = nft_do_chain_bridge, + [NF_BR_POST_ROUTING] = nft_do_chain_bridge, + }, }; static int __init nf_tables_bridge_init(void) { int ret; - nf_register_afinfo(&nf_br_afinfo); ret = nft_register_chain_type(&filter_bridge); if (ret < 0) - goto err1; + return ret; ret = register_pernet_subsys(&nf_tables_bridge_net_ops); if (ret < 0) - goto err2; + goto err_register_subsys; return ret; -err2: +err_register_subsys: nft_unregister_chain_type(&filter_bridge); -err1: - nf_unregister_afinfo(&nf_br_afinfo); + return ret; } @@ -160,7 +119,6 @@ static void __exit nf_tables_bridge_exit(void) { unregister_pernet_subsys(&nf_tables_bridge_net_ops); nft_unregister_chain_type(&filter_bridge); - nf_unregister_afinfo(&nf_br_afinfo); } module_init(nf_tables_bridge_init); diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 71b6ab240dea..38c2b7a890dd 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -8,7 +8,6 @@ #include <linux/string.h> #include <linux/skbuff.h> -#include <linux/hardirq.h> #include <linux/export.h> #include <net/caif/cfpkt.h> diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 922ac1d605b3..53ecda10b790 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -8,7 +8,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/fs.h> -#include <linux/hardirq.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> diff --git a/net/core/Makefile b/net/core/Makefile index 1fd0a9c88b1b..6dbbba8c57ae 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ - fib_notifier.o + fib_notifier.o xdp.o obj-y += net-sysfs.o obj-$(CONFIG_PROC_FS) += net-procfs.o diff --git a/net/core/dev.c b/net/core/dev.c index 59ead3910ab7..5cb782f074d7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1146,7 +1146,19 @@ EXPORT_SYMBOL(dev_alloc_name); int dev_get_valid_name(struct net *net, struct net_device *dev, const char *name) { - return dev_alloc_name_ns(net, dev, name); + BUG_ON(!net); + + if (!dev_valid_name(name)) + return -EINVAL; + + if (strchr(name, '%')) + return dev_alloc_name_ns(net, dev, name); + else if (__dev_get_by_name(net, name)) + return -EEXIST; + else if (dev->name != name) + strlcpy(dev->name, name, IFNAMSIZ); + + return 0; } EXPORT_SYMBOL(dev_get_valid_name); @@ -3059,7 +3071,7 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, } EXPORT_SYMBOL(skb_csum_hwoffload_help); -static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev) +static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev, bool *again) { netdev_features_t features; @@ -3083,9 +3095,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device __skb_linearize(skb)) goto out_kfree_skb; - if (validate_xmit_xfrm(skb, features)) - goto out_kfree_skb; - /* If packet is not checksummed and device does not * support checksumming for this protocol, complete * checksumming here. @@ -3102,6 +3111,8 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device } } + skb = validate_xmit_xfrm(skb, features, again); + return skb; out_kfree_skb: @@ -3111,7 +3122,7 @@ out_null: return NULL; } -struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev) +struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again) { struct sk_buff *next, *head = NULL, *tail; @@ -3122,7 +3133,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d /* in case skb wont be segmented, point to itself */ skb->prev = skb; - skb = validate_xmit_skb(skb, dev); + skb = validate_xmit_skb(skb, dev, again); if (!skb) continue; @@ -3449,6 +3460,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) struct netdev_queue *txq; struct Qdisc *q; int rc = -ENOMEM; + bool again = false; skb_reset_mac_header(skb); @@ -3510,7 +3522,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) XMIT_RECURSION_LIMIT)) goto recursion_alert; - skb = validate_xmit_skb(skb, dev); + skb = validate_xmit_skb(skb, dev, &again); if (!skb) goto out; @@ -3906,9 +3918,33 @@ drop: return NET_RX_DROP; } +static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + struct netdev_rx_queue *rxqueue; + + rxqueue = dev->_rx; + + if (skb_rx_queue_recorded(skb)) { + u16 index = skb_get_rx_queue(skb); + + if (unlikely(index >= dev->real_num_rx_queues)) { + WARN_ONCE(dev->real_num_rx_queues > 1, + "%s received packet on queue %u, but number " + "of RX queues is %u\n", + dev->name, index, dev->real_num_rx_queues); + + return rxqueue; /* Return first rxqueue */ + } + rxqueue += index; + } + return rxqueue; +} + static u32 netif_receive_generic_xdp(struct sk_buff *skb, struct bpf_prog *xdp_prog) { + struct netdev_rx_queue *rxqueue; u32 metalen, act = XDP_DROP; struct xdp_buff xdp; void *orig_data; @@ -3952,6 +3988,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, xdp.data_hard_start = skb->data - skb_headroom(skb); orig_data = xdp.data; + rxqueue = netif_get_rxqueue(skb); + xdp.rxq = &rxqueue->xdp_rxq; + act = bpf_prog_run_xdp(xdp_prog, &xdp); off = xdp.data - orig_data; @@ -4194,6 +4233,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) spin_unlock(root_lock); } } + + xfrm_dev_backlog(sd); } #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_ATM_LANE) @@ -7587,12 +7628,12 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev, } EXPORT_SYMBOL(netif_stacked_transfer_operstate); -#ifdef CONFIG_SYSFS static int netif_alloc_rx_queues(struct net_device *dev) { unsigned int i, count = dev->num_rx_queues; struct netdev_rx_queue *rx; size_t sz = count * sizeof(*rx); + int err = 0; BUG_ON(count < 1); @@ -7602,11 +7643,39 @@ static int netif_alloc_rx_queues(struct net_device *dev) dev->_rx = rx; - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { rx[i].dev = dev; + + /* XDP RX-queue setup */ + err = xdp_rxq_info_reg(&rx[i].xdp_rxq, dev, i); + if (err < 0) + goto err_rxq_info; + } return 0; + +err_rxq_info: + /* Rollback successful reg's and free other resources */ + while (i--) + xdp_rxq_info_unreg(&rx[i].xdp_rxq); + kfree(dev->_rx); + dev->_rx = NULL; + return err; +} + +static void netif_free_rx_queues(struct net_device *dev) +{ + unsigned int i, count = dev->num_rx_queues; + struct netdev_rx_queue *rx; + + /* netif_alloc_rx_queues alloc failed, resources have been unreg'ed */ + if (!dev->_rx) + return; + + rx = dev->_rx; + + for (i = 0; i < count; i++) + xdp_rxq_info_unreg(&rx[i].xdp_rxq); } -#endif static void netdev_init_one_queue(struct net_device *dev, struct netdev_queue *queue, void *_unused) @@ -8167,12 +8236,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, return NULL; } -#ifdef CONFIG_SYSFS if (rxqs < 1) { pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n"); return NULL; } -#endif alloc_size = sizeof(struct net_device); if (sizeof_priv) { @@ -8229,12 +8296,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, if (netif_alloc_netdev_queues(dev)) goto free_all; -#ifdef CONFIG_SYSFS dev->num_rx_queues = rxqs; dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_all; -#endif strcpy(dev->name, name); dev->name_assign_type = name_assign_type; @@ -8273,9 +8338,7 @@ void free_netdev(struct net_device *dev) might_sleep(); netif_free_tx_queues(dev); -#ifdef CONFIG_SYSFS - kvfree(dev->_rx); -#endif + netif_free_rx_queues(dev); kfree(rcu_dereference_protected(dev->ingress_queue, 1)); @@ -8875,6 +8938,9 @@ static int __init net_dev_init(void) skb_queue_head_init(&sd->input_pkt_queue); skb_queue_head_init(&sd->process_queue); +#ifdef CONFIG_XFRM_OFFLOAD + skb_queue_head_init(&sd->xfrm_backlog); +#endif INIT_LIST_HEAD(&sd->poll_list); sd->output_queue_tailp = &sd->output_queue; #ifdef CONFIG_RPS diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 50a79203043b..107b122c8969 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -771,15 +771,6 @@ static int ethtool_set_link_ksettings(struct net_device *dev, return dev->ethtool_ops->set_link_ksettings(dev, &link_ksettings); } -static void -warn_incomplete_ethtool_legacy_settings_conversion(const char *details) -{ - char name[sizeof(current->comm)]; - - pr_info_once("warning: `%s' uses legacy ethtool link settings API, %s\n", - get_task_comm(name, current), details); -} - /* Query device for its ethtool_cmd settings. * * Backward compatibility note: for compatibility with legacy ethtool, @@ -806,10 +797,8 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) &link_ksettings); if (err < 0) return err; - if (!convert_link_ksettings_to_legacy_settings(&cmd, - &link_ksettings)) - warn_incomplete_ethtool_legacy_settings_conversion( - "link modes are only partially reported"); + convert_link_ksettings_to_legacy_settings(&cmd, + &link_ksettings); /* send a sensible cmd tag back to user */ cmd.cmd = ETHTOOL_GSET; @@ -1704,14 +1693,23 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) { - struct ethtool_ringparam ringparam; + struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM }; - if (!dev->ethtool_ops->set_ringparam) + if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam) return -EOPNOTSUPP; if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) return -EFAULT; + dev->ethtool_ops->get_ringparam(dev, &max); + + /* ensure new ring parameters are within the maximums */ + if (ringparam.rx_pending > max.rx_max_pending || + ringparam.rx_mini_pending > max.rx_mini_max_pending || + ringparam.rx_jumbo_pending > max.rx_jumbo_max_pending || + ringparam.tx_pending > max.tx_max_pending) + return -EINVAL; + return dev->ethtool_ops->set_ringparam(dev, &ringparam); } diff --git a/net/core/filter.c b/net/core/filter.c index 754abe1041b7..acdb94c0e97f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2684,8 +2684,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd) return 0; } -int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, - struct bpf_prog *xdp_prog) +static int xdp_do_generic_redirect_map(struct net_device *dev, + struct sk_buff *skb, + struct bpf_prog *xdp_prog) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); unsigned long map_owner = ri->map_owner; @@ -4303,6 +4304,25 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data_end)); break; + case offsetof(struct xdp_md, ingress_ifindex): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), + si->dst_reg, si->src_reg, + offsetof(struct xdp_buff, rxq)); + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), + si->dst_reg, si->dst_reg, + offsetof(struct xdp_rxq_info, dev)); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, + bpf_target_off(struct net_device, + ifindex, 4, target_size)); + break; + case offsetof(struct xdp_md, rx_queue_index): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), + si->dst_reg, si->src_reg, + offsetof(struct xdp_buff, rxq)); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, + bpf_target_off(struct xdp_rxq_info, + queue_index, 4, target_size)); + break; } return insn - insn_buf; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index c688dc564b11..16d644a4f974 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -904,6 +904,10 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_MULTICAST */ nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_RX_DROPPED */ + nla_total_size_64bit(sizeof(__u64)) + + /* IFLA_VF_STATS_TX_DROPPED */ + nla_total_size_64bit(sizeof(__u64)) + nla_total_size(sizeof(struct ifla_vf_trust))); return size; } else @@ -1258,7 +1262,11 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb, nla_put_u64_64bit(skb, IFLA_VF_STATS_BROADCAST, vf_stats.broadcast, IFLA_VF_STATS_PAD) || nla_put_u64_64bit(skb, IFLA_VF_STATS_MULTICAST, - vf_stats.multicast, IFLA_VF_STATS_PAD)) { + vf_stats.multicast, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_RX_DROPPED, + vf_stats.rx_dropped, IFLA_VF_STATS_PAD) || + nla_put_u64_64bit(skb, IFLA_VF_STATS_TX_DROPPED, + vf_stats.tx_dropped, IFLA_VF_STATS_PAD)) { nla_nest_cancel(skb, vfstats); goto nla_put_vf_failure; } @@ -1751,18 +1759,18 @@ static bool link_dump_filtered(struct net_device *dev, return false; } -static struct net *get_target_net(struct sk_buff *skb, int netnsid) +static struct net *get_target_net(struct sock *sk, int netnsid) { struct net *net; - net = get_net_ns_by_id(sock_net(skb->sk), netnsid); + net = get_net_ns_by_id(sock_net(sk), netnsid); if (!net) return ERR_PTR(-EINVAL); /* For now, the caller is required to have CAP_NET_ADMIN in * the user namespace owning the target net ns. */ - if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { + if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) { put_net(net); return ERR_PTR(-EACCES); } @@ -1803,7 +1811,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) ifla_policy, NULL) >= 0) { if (tb[IFLA_IF_NETNSID]) { netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); - tgt_net = get_target_net(skb, netnsid); + tgt_net = get_target_net(skb->sk, netnsid); if (IS_ERR(tgt_net)) { tgt_net = net; netnsid = -1; @@ -2985,7 +2993,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh, if (tb[IFLA_IF_NETNSID]) { netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]); - tgt_net = get_target_net(skb, netnsid); + tgt_net = get_target_net(NETLINK_CB(skb).sk, netnsid); if (IS_ERR(tgt_net)) return PTR_ERR(tgt_net); } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a3cb0be4c6f3..01e8285aea73 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1177,12 +1177,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) int i, new_frags; u32 d_off; - if (!num_frags) - goto release; - if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) return -EINVAL; + if (!num_frags) + goto release; + new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < new_frags; i++) { page = alloc_page(gfp_mask); @@ -3656,6 +3656,10 @@ normal: skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & SKBTX_SHARED_FRAG; + if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || + skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) + goto err; + while (pos < offset + len) { if (i >= nfrags) { BUG_ON(skb_headlen(list_skb)); @@ -3667,6 +3671,11 @@ normal: BUG_ON(!nfrags); + if (skb_orphan_frags(frag_skb, GFP_ATOMIC) || + skb_zerocopy_clone(nskb, frag_skb, + GFP_ATOMIC)) + goto err; + list_skb = list_skb->next; } @@ -3678,11 +3687,6 @@ normal: goto err; } - if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) - goto err; - if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC)) - goto err; - *nskb_frag = *frag; __skb_frag_ref(nskb_frag); size = skb_frag_size(nskb_frag); diff --git a/net/core/xdp.c b/net/core/xdp.c new file mode 100644 index 000000000000..097a0f74e004 --- /dev/null +++ b/net/core/xdp.c @@ -0,0 +1,73 @@ +/* net/core/xdp.c + * + * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. + * Released under terms in GPL version 2. See COPYING. + */ +#include <linux/types.h> +#include <linux/mm.h> + +#include <net/xdp.h> + +#define REG_STATE_NEW 0x0 +#define REG_STATE_REGISTERED 0x1 +#define REG_STATE_UNREGISTERED 0x2 +#define REG_STATE_UNUSED 0x3 + +void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) +{ + /* Simplify driver cleanup code paths, allow unreg "unused" */ + if (xdp_rxq->reg_state == REG_STATE_UNUSED) + return; + + WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG"); + + xdp_rxq->reg_state = REG_STATE_UNREGISTERED; + xdp_rxq->dev = NULL; +} +EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); + +static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) +{ + memset(xdp_rxq, 0, sizeof(*xdp_rxq)); +} + +/* Returns 0 on success, negative on failure */ +int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index) +{ + if (xdp_rxq->reg_state == REG_STATE_UNUSED) { + WARN(1, "Driver promised not to register this"); + return -EINVAL; + } + + if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { + WARN(1, "Missing unregister, handled but fix driver"); + xdp_rxq_info_unreg(xdp_rxq); + } + + if (!dev) { + WARN(1, "Missing net_device from driver"); + return -ENODEV; + } + + /* State either UNREGISTERED or NEW */ + xdp_rxq_info_init(xdp_rxq); + xdp_rxq->dev = dev; + xdp_rxq->queue_index = queue_index; + + xdp_rxq->reg_state = REG_STATE_REGISTERED; + return 0; +} +EXPORT_SYMBOL_GPL(xdp_rxq_info_reg); + +void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) +{ + xdp_rxq->reg_state = REG_STATE_UNUSED; +} +EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); + +bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) +{ + return (xdp_rxq->reg_state == REG_STATE_REGISTERED); +} +EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index 8c0ef71bed2f..b270e84d9c13 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig @@ -39,23 +39,6 @@ config IP_DCCP_DEBUG Just say N. -config NET_DCCPPROBE - tristate "DCCP connection probing" - depends on PROC_FS && KPROBES - ---help--- - This module allows for capturing the changes to DCCP connection - state in response to incoming packets. It is used for debugging - DCCP congestion avoidance modules. If you don't understand - what was just said, you don't need it: say N. - - Documentation on how to use DCCP connection probing can be found - at: - - http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe - - To compile this code as a module, choose M here: the - module will be called dccp_probe. - endmenu diff --git a/net/dccp/Makefile b/net/dccp/Makefile index 2e7b56097bc4..5b4ff37bc806 100644 --- a/net/dccp/Makefile +++ b/net/dccp/Makefile @@ -21,9 +21,10 @@ obj-$(subst y,$(CONFIG_IP_DCCP),$(CONFIG_IPV6)) += dccp_ipv6.o dccp_ipv6-y := ipv6.o obj-$(CONFIG_INET_DCCP_DIAG) += dccp_diag.o -obj-$(CONFIG_NET_DCCPPROBE) += dccp_probe.o dccp-$(CONFIG_SYSCTL) += sysctl.o dccp_diag-y := diag.o -dccp_probe-y := probe.o + +# build with local directory for trace.h +CFLAGS_proto.o := -I$(src) diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 3de0d0362d7f..2a24f7d171a5 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -228,7 +228,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets, } if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) { - DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n"); + DCCP_CRIT("Ack Vector buffer overflow: dropping old entries"); av->av_overflow = true; } diff --git a/net/dccp/probe.c b/net/dccp/probe.c deleted file mode 100644 index 3d3fda05b32d..000000000000 --- a/net/dccp/probe.c +++ /dev/null @@ -1,203 +0,0 @@ -/* - * dccp_probe - Observe the DCCP flow with kprobes. - * - * The idea for this came from Werner Almesberger's umlsim - * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> - * - * Modified for DCCP from Stephen Hemminger's code - * Copyright (C) 2006, Ian McDonald <ian.mcdonald@jandi.co.nz> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#include <linux/kernel.h> -#include <linux/kprobes.h> -#include <linux/socket.h> -#include <linux/dccp.h> -#include <linux/proc_fs.h> -#include <linux/module.h> -#include <linux/kfifo.h> -#include <linux/vmalloc.h> -#include <linux/time64.h> -#include <linux/gfp.h> -#include <net/net_namespace.h> - -#include "dccp.h" -#include "ccid.h" -#include "ccids/ccid3.h" - -static int port; - -static int bufsize = 64 * 1024; - -static const char procname[] = "dccpprobe"; - -static struct { - struct kfifo fifo; - spinlock_t lock; - wait_queue_head_t wait; - struct timespec64 tstart; -} dccpw; - -static void printl(const char *fmt, ...) -{ - va_list args; - int len; - struct timespec64 now; - char tbuf[256]; - - va_start(args, fmt); - getnstimeofday64(&now); - - now = timespec64_sub(now, dccpw.tstart); - - len = sprintf(tbuf, "%lu.%06lu ", - (unsigned long) now.tv_sec, - (unsigned long) now.tv_nsec / NSEC_PER_USEC); - len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); - va_end(args); - - kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock); - wake_up(&dccpw.wait); -} - -static int jdccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) -{ - const struct inet_sock *inet = inet_sk(sk); - struct ccid3_hc_tx_sock *hc = NULL; - - if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) - hc = ccid3_hc_tx_sk(sk); - - if (port == 0 || ntohs(inet->inet_dport) == port || - ntohs(inet->inet_sport) == port) { - if (hc) - printl("%pI4:%u %pI4:%u %d %d %d %d %u %llu %llu %d\n", - &inet->inet_saddr, ntohs(inet->inet_sport), - &inet->inet_daddr, ntohs(inet->inet_dport), size, - hc->tx_s, hc->tx_rtt, hc->tx_p, - hc->tx_x_calc, hc->tx_x_recv >> 6, - hc->tx_x >> 6, hc->tx_t_ipi); - else - printl("%pI4:%u %pI4:%u %d\n", - &inet->inet_saddr, ntohs(inet->inet_sport), - &inet->inet_daddr, ntohs(inet->inet_dport), - size); - } - - jprobe_return(); - return 0; -} - -static struct jprobe dccp_send_probe = { - .kp = { - .symbol_name = "dccp_sendmsg", - }, - .entry = jdccp_sendmsg, -}; - -static int dccpprobe_open(struct inode *inode, struct file *file) -{ - kfifo_reset(&dccpw.fifo); - getnstimeofday64(&dccpw.tstart); - return 0; -} - -static ssize_t dccpprobe_read(struct file *file, char __user *buf, - size_t len, loff_t *ppos) -{ - int error = 0, cnt = 0; - unsigned char *tbuf; - - if (!buf) - return -EINVAL; - - if (len == 0) - return 0; - - tbuf = vmalloc(len); - if (!tbuf) - return -ENOMEM; - - error = wait_event_interruptible(dccpw.wait, - kfifo_len(&dccpw.fifo) != 0); - if (error) - goto out_free; - - cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock); - error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; - -out_free: - vfree(tbuf); - - return error ? error : cnt; -} - -static const struct file_operations dccpprobe_fops = { - .owner = THIS_MODULE, - .open = dccpprobe_open, - .read = dccpprobe_read, - .llseek = noop_llseek, -}; - -static __init int dccpprobe_init(void) -{ - int ret = -ENOMEM; - - init_waitqueue_head(&dccpw.wait); - spin_lock_init(&dccpw.lock); - if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL)) - return ret; - if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops)) - goto err0; - - ret = register_jprobe(&dccp_send_probe); - if (ret) { - ret = request_module("dccp"); - if (!ret) - ret = register_jprobe(&dccp_send_probe); - } - - if (ret) - goto err1; - - pr_info("DCCP watch registered (port=%d)\n", port); - return 0; -err1: - remove_proc_entry(procname, init_net.proc_net); -err0: - kfifo_free(&dccpw.fifo); - return ret; -} -module_init(dccpprobe_init); - -static __exit void dccpprobe_exit(void) -{ - kfifo_free(&dccpw.fifo); - remove_proc_entry(procname, init_net.proc_net); - unregister_jprobe(&dccp_send_probe); - -} -module_exit(dccpprobe_exit); - -MODULE_PARM_DESC(port, "Port to match (0=all)"); -module_param(port, int, 0); - -MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); -module_param(bufsize, int, 0); - -MODULE_AUTHOR("Ian McDonald <ian.mcdonald@jandi.co.nz>"); -MODULE_DESCRIPTION("DCCP snooper"); -MODULE_LICENSE("GPL"); diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 7a75a1d3568b..fa7e92e08920 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -38,6 +38,9 @@ #include "dccp.h" #include "feat.h" +#define CREATE_TRACE_POINTS +#include "trace.h" + DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly; EXPORT_SYMBOL_GPL(dccp_statistics); @@ -761,6 +764,8 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int rc, size; long timeo; + trace_dccp_probe(sk, len); + if (len > dp->dccps_mss_cache) return -EMSGSIZE; diff --git a/net/dccp/trace.h b/net/dccp/trace.h new file mode 100644 index 000000000000..5062421beee9 --- /dev/null +++ b/net/dccp/trace.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dccp + +#if !defined(_TRACE_DCCP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_DCCP_H + +#include <net/sock.h> +#include "dccp.h" +#include "ccids/ccid3.h" +#include <linux/tracepoint.h> +#include <trace/events/net_probe_common.h> + +TRACE_EVENT(dccp_probe, + + TP_PROTO(struct sock *sk, size_t size), + + TP_ARGS(sk, size), + + TP_STRUCT__entry( + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, size) + __field(__u16, tx_s) + __field(__u32, tx_rtt) + __field(__u32, tx_p) + __field(__u32, tx_x_calc) + __field(__u64, tx_x_recv) + __field(__u64, tx_x) + __field(__u32, tx_t_ipi) + ), + + TP_fast_assign( + const struct inet_sock *inet = inet_sk(sk); + struct ccid3_hc_tx_sock *hc = NULL; + + if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) + hc = ccid3_hc_tx_sk(sk); + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + /* For filtering use */ + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + + __entry->size = size; + if (hc) { + __entry->tx_s = hc->tx_s; + __entry->tx_rtt = hc->tx_rtt; + __entry->tx_p = hc->tx_p; + __entry->tx_x_calc = hc->tx_x_calc; + __entry->tx_x_recv = hc->tx_x_recv >> 6; + __entry->tx_x = hc->tx_x >> 6; + __entry->tx_t_ipi = hc->tx_t_ipi; + } else { + __entry->tx_s = 0; + memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi - + (void *)&__entry->tx_rtt + + sizeof(__entry->tx_t_ipi)); + } + ), + + TP_printk("src=%pISpc dest=%pISpc size=%d tx_s=%d tx_rtt=%d " + "tx_p=%d tx_x_calc=%u tx_x_recv=%llu tx_x=%llu tx_t_ipi=%d", + __entry->saddr, __entry->daddr, __entry->size, + __entry->tx_s, __entry->tx_rtt, __entry->tx_p, + __entry->tx_x_calc, __entry->tx_x_recv, __entry->tx_x, + __entry->tx_t_ipi) +); + +#endif /* _TRACE_TCP_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index b03665e8fb4e..cefb0c3c6d51 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -103,7 +103,7 @@ void dsa_legacy_unregister(void); #else static inline int dsa_legacy_register(void) { - return -ENODEV; + return 0; } static inline void dsa_legacy_unregister(void) { } diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index e6e0b7b6025c..2b06bb91318b 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -70,6 +70,18 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb, if (skb_cow_head(skb, BRCM_TAG_LEN) < 0) return NULL; + /* The Ethernet switch we are interfaced with needs packets to be at + * least 64 bytes (including FCS) otherwise they will be discarded when + * they enter the switch port logic. When Broadcom tags are enabled, we + * need to make sure that packets are at least 68 bytes + * (including FCS and tag) because the length verification is done after + * the Broadcom tag is stripped off the ingress packet. + * + * Let dsa_slave_xmit() free the SKB + */ + if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false)) + return NULL; + skb_push(skb, BRCM_TAG_LEN); if (offset) diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index c6c8ad1d4b6d..47a0a6649a9d 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -43,7 +43,6 @@ obj-$(CONFIG_INET_DIAG) += inet_diag.o obj-$(CONFIG_INET_TCP_DIAG) += tcp_diag.o obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o obj-$(CONFIG_INET_RAW_DIAG) += raw_diag.o -obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o obj-$(CONFIG_TCP_CONG_BBR) += tcp_bbr.o obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o obj-$(CONFIG_TCP_CONG_CDG) += tcp_cdg.o diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index bab98a4fedad..54cccdd8b1e3 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -790,7 +790,8 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int addr_len = 0; int err; - sock_rps_record_flow(sk); + if (likely(!(flags & MSG_ERRQUEUE))) + sock_rps_record_flow(sk); err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, flags & ~MSG_DONTWAIT, &addr_len); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index d57aa64fa7c7..6f00e43120a8 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -121,14 +121,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; + struct xfrm_offload *xo = xfrm_offload(skb); void *tmp; - struct dst_entry *dst = skb_dst(skb); - struct xfrm_state *x = dst->xfrm; + struct xfrm_state *x; + + if (xo && (xo->flags & XFRM_DEV_RESUME)) + x = skb->sp->xvec[skb->sp->len - 1]; + else + x = skb_dst(skb)->xfrm; tmp = ESP_SKB_CB(skb)->tmp; esp_ssg_unref(x, tmp); kfree(tmp); - xfrm_output_resume(skb, err); + + if (xo && (xo->flags & XFRM_DEV_RESUME)) { + if (err) { + XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); + kfree_skb(skb); + return; + } + + skb_push(skb, skb->data - skb_mac_header(skb)); + secpath_reset(skb); + xfrm_dev_resume(skb); + } else { + xfrm_output_resume(skb, err); + } } /* Move ESP header back into place. */ @@ -825,17 +843,13 @@ static int esp_init_aead(struct xfrm_state *x) char aead_name[CRYPTO_MAX_ALG_NAME]; struct crypto_aead *aead; int err; - u32 mask = 0; err = -ENAMETOOLONG; if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; - if (x->xso.offload_handle) - mask |= CRYPTO_ALG_ASYNC; - - aead = crypto_alloc_aead(aead_name, 0, mask); + aead = crypto_alloc_aead(aead_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; @@ -865,7 +879,6 @@ static int esp_init_authenc(struct xfrm_state *x) char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; - u32 mask = 0; err = -EINVAL; if (!x->ealg) @@ -891,10 +904,7 @@ static int esp_init_authenc(struct xfrm_state *x) goto error; } - if (x->xso.offload_handle) - mask |= CRYPTO_ALG_ASYNC; - - aead = crypto_alloc_aead(authenc_name, 0, mask); + aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index f8b918c766b0..c359f3cfeec3 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -108,75 +108,36 @@ static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, netdev_features_t features) { - __u32 seq; - int err = 0; - struct sk_buff *skb2; struct xfrm_state *x; struct ip_esp_hdr *esph; struct crypto_aead *aead; - struct sk_buff *segs = ERR_PTR(-EINVAL); netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); if (!xo) - goto out; - - seq = xo->seq.low; + return ERR_PTR(-EINVAL); x = skb->sp->xvec[skb->sp->len - 1]; aead = x->data; esph = ip_esp_hdr(skb); if (esph->spi != x->id.spi) - goto out; + return ERR_PTR(-EINVAL); if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) - goto out; + return ERR_PTR(-EINVAL); __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); skb->encap_hdr_csum = 1; - if (!(features & NETIF_F_HW_ESP)) + if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || + (x->xso.dev != skb->dev)) esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); - segs = x->outer_mode->gso_segment(x, skb, esp_features); - if (IS_ERR_OR_NULL(segs)) - goto out; - - __skb_pull(skb, skb->data - skb_mac_header(skb)); - - skb2 = segs; - do { - struct sk_buff *nskb = skb2->next; - - xo = xfrm_offload(skb2); - xo->flags |= XFRM_GSO_SEGMENT; - xo->seq.low = seq; - xo->seq.hi = xfrm_replay_seqhi(x, seq); + xo->flags |= XFRM_GSO_SEGMENT; - if(!(features & NETIF_F_HW_ESP)) - xo->flags |= CRYPTO_FALLBACK; - - x->outer_mode->xmit(x, skb2); - - err = x->type_offload->xmit(x, skb2, esp_features); - if (err) { - kfree_skb_list(segs); - return ERR_PTR(err); - } - - if (!skb_is_gso(skb2)) - seq++; - else - seq += skb_shinfo(skb2)->gso_segs; - - skb_push(skb2, skb2->mac_len); - skb2 = nskb; - } while (skb2); - -out: - return segs; + return x->outer_mode->gso_segment(x, skb, esp_features); } static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) @@ -203,6 +164,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ struct crypto_aead *aead; struct esp_info esp; bool hw_offload = true; + __u32 seq; esp.inplace = true; @@ -241,23 +203,30 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ return esp.nfrags; } + seq = xo->seq.low; + esph = esp.esph; esph->spi = x->id.spi; skb_push(skb, -skb_network_offset(skb)); if (xo->flags & XFRM_GSO_SEGMENT) { - esph->seq_no = htonl(xo->seq.low); - } else { - ip_hdr(skb)->tot_len = htons(skb->len); - ip_send_check(ip_hdr(skb)); + esph->seq_no = htonl(seq); + + if (!skb_is_gso(skb)) + xo->seq.low++; + else + xo->seq.low += skb_shinfo(skb)->gso_segs; } + esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); + + ip_hdr(skb)->tot_len = htons(skb->len); + ip_send_check(ip_hdr(skb)); + if (hw_offload) return 0; - esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); - err = esp_output_tail(x, skb, &esp); if (err) return err; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index c9c35b61a027..a383f299ce24 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -564,12 +564,18 @@ static int inet_diag_bc_run(const struct nlattr *_bc, case INET_DIAG_BC_JMP: yes = 0; break; + case INET_DIAG_BC_S_EQ: + yes = entry->sport == op[1].no; + break; case INET_DIAG_BC_S_GE: yes = entry->sport >= op[1].no; break; case INET_DIAG_BC_S_LE: yes = entry->sport <= op[1].no; break; + case INET_DIAG_BC_D_EQ: + yes = entry->dport == op[1].no; + break; case INET_DIAG_BC_D_GE: yes = entry->dport >= op[1].no; break; @@ -802,8 +808,10 @@ static int inet_diag_bc_audit(const struct nlattr *attr, if (!valid_devcond(bc, len, &min_len)) return -EINVAL; break; + case INET_DIAG_BC_S_EQ: case INET_DIAG_BC_S_GE: case INET_DIAG_BC_S_LE: + case INET_DIAG_BC_D_EQ: case INET_DIAG_BC_D_GE: case INET_DIAG_BC_D_LE: if (!valid_port_comparison(bc, len, &min_len)) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 78365094f56c..b61f2285816d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -313,11 +313,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, return PACKET_REJECT; md = ip_tunnel_info_opts(&tun_dst->u.tun_info); - if (!md) { - dst_release((struct dst_entry *)tun_dst); - return PACKET_REJECT; - } - memcpy(md, pkt_md, sizeof(*md)); md->version = ver; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index c0cc6aa8cfaa..e6774ccb7731 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -80,35 +80,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t } EXPORT_SYMBOL(ip_route_me_harder); -/* - * Extra routing may needed on local out, as the QUEUE target never - * returns control to the table. - */ - -struct ip_rt_info { - __be32 daddr; - __be32 saddr; - u_int8_t tos; - u_int32_t mark; -}; - -static void nf_ip_saveroute(const struct sk_buff *skb, - struct nf_queue_entry *entry) -{ - struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); - - if (entry->state.hook == NF_INET_LOCAL_OUT) { - const struct iphdr *iph = ip_hdr(skb); - - rt_info->tos = iph->tos; - rt_info->daddr = iph->daddr; - rt_info->saddr = iph->saddr; - rt_info->mark = skb->mark; - } -} - -static int nf_ip_reroute(struct net *net, struct sk_buff *skb, - const struct nf_queue_entry *entry) +int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); @@ -119,10 +91,12 @@ static int nf_ip_reroute(struct net *net, struct sk_buff *skb, skb->mark == rt_info->mark && iph->daddr == rt_info->daddr && iph->saddr == rt_info->saddr)) - return ip_route_me_harder(net, skb, RTN_UNSPEC); + return ip_route_me_harder(entry->state.net, skb, + RTN_UNSPEC); } return 0; } +EXPORT_SYMBOL_GPL(nf_ip_reroute); __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) @@ -155,9 +129,9 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, } EXPORT_SYMBOL(nf_ip_checksum); -static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, - unsigned int dataoff, unsigned int len, - u_int8_t protocol) +__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; @@ -175,9 +149,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, } return csum; } +EXPORT_SYMBOL_GPL(nf_ip_checksum_partial); -static int nf_ip_route(struct net *net, struct dst_entry **dst, - struct flowi *fl, bool strict __always_unused) +int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict __always_unused) { struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); if (IS_ERR(rt)) @@ -185,19 +160,4 @@ static int nf_ip_route(struct net *net, struct dst_entry **dst, *dst = &rt->dst; return 0; } - -static const struct nf_afinfo nf_ip_afinfo = { - .family = AF_INET, - .checksum = nf_ip_checksum, - .checksum_partial = nf_ip_checksum_partial, - .route = nf_ip_route, - .saveroute = nf_ip_saveroute, - .reroute = nf_ip_reroute, - .route_key_size = sizeof(struct ip_rt_info), -}; - -static int __init ipv4_netfilter_init(void) -{ - return nf_register_afinfo(&nf_ip_afinfo); -} -subsys_initcall(ipv4_netfilter_init); +EXPORT_SYMBOL_GPL(nf_ip_route); diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index c11eb1744ab1..7d5d444964aa 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -72,11 +72,20 @@ endif # NF_TABLES_IPV4 config NF_TABLES_ARP tristate "ARP nf_tables support" + select NETFILTER_FAMILY_ARP help This option enables the ARP support for nf_tables. endif # NF_TABLES +config NF_FLOW_TABLE_IPV4 + select NF_FLOW_TABLE + tristate "Netfilter flow table IPv4 module" + help + This option adds the flow table IPv4 support. + + To compile it as a module, choose M here. + config NF_DUP_IPV4 tristate "Netfilter IPv4 packet duplication to alternate destination" depends on !NF_CONNTRACK || NF_CONNTRACK @@ -392,6 +401,7 @@ endif # IP_NF_IPTABLES config IP_NF_ARPTABLES tristate "ARP tables support" select NETFILTER_XTABLES + select NETFILTER_FAMILY_ARP depends on NETFILTER_ADVANCED help arptables is a general, extensible packet identification framework. diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index adcdae358365..8bb1f0c7a375 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -43,6 +43,9 @@ obj-$(CONFIG_NFT_REDIR_IPV4) += nft_redir_ipv4.o obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o +# flow table support +obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o + # generic IP tables obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 0c3c944a7b72..bf8a5340f15e 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -810,9 +810,8 @@ static int get_info(struct net *net, void __user *user, if (compat) xt_compat_lock(NFPROTO_ARP); #endif - t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), - "arptable_%s", name); - if (t) { + t = xt_request_find_table_lock(net, NFPROTO_ARP, name); + if (!IS_ERR(t)) { struct arpt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT @@ -841,7 +840,7 @@ static int get_info(struct net *net, void __user *user, xt_table_unlock(t); module_put(t->me); } else - ret = -ENOENT; + ret = PTR_ERR(t); #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(NFPROTO_ARP); @@ -866,7 +865,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, NFPROTO_ARP, get.name); - if (t) { + if (!IS_ERR(t)) { const struct xt_table_info *private = t->private; if (get.size == private->size) @@ -878,7 +877,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr, module_put(t->me); xt_table_unlock(t); } else - ret = -ENOENT; + ret = PTR_ERR(t); return ret; } @@ -903,10 +902,9 @@ static int __do_replace(struct net *net, const char *name, goto out; } - t = try_then_request_module(xt_find_table_lock(net, NFPROTO_ARP, name), - "arptable_%s", name); - if (!t) { - ret = -ENOENT; + t = xt_request_find_table_lock(net, NFPROTO_ARP, name); + if (IS_ERR(t)) { + ret = PTR_ERR(t); goto free_newinfo_counters_untrans; } @@ -1020,8 +1018,8 @@ static int do_add_counters(struct net *net, const void __user *user, return PTR_ERR(paddc); t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name); - if (!t) { - ret = -ENOENT; + if (IS_ERR(t)) { + ret = PTR_ERR(t); goto free; } @@ -1408,7 +1406,7 @@ static int compat_get_entries(struct net *net, xt_compat_lock(NFPROTO_ARP); t = xt_find_table_lock(net, NFPROTO_ARP, get.name); - if (t) { + if (!IS_ERR(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; @@ -1423,7 +1421,7 @@ static int compat_get_entries(struct net *net, module_put(t->me); xt_table_unlock(t); } else - ret = -ENOENT; + ret = PTR_ERR(t); xt_compat_unlock(NFPROTO_ARP); return ret; diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2e0d339028bb..0b975aa2d363 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -973,9 +973,8 @@ static int get_info(struct net *net, void __user *user, if (compat) xt_compat_lock(AF_INET); #endif - t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), - "iptable_%s", name); - if (t) { + t = xt_request_find_table_lock(net, AF_INET, name); + if (!IS_ERR(t)) { struct ipt_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT @@ -1005,7 +1004,7 @@ static int get_info(struct net *net, void __user *user, xt_table_unlock(t); module_put(t->me); } else - ret = -ENOENT; + ret = PTR_ERR(t); #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET); @@ -1030,7 +1029,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, AF_INET, get.name); - if (t) { + if (!IS_ERR(t)) { const struct xt_table_info *private = t->private; if (get.size == private->size) ret = copy_entries_to_user(private->size, @@ -1041,7 +1040,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr, module_put(t->me); xt_table_unlock(t); } else - ret = -ENOENT; + ret = PTR_ERR(t); return ret; } @@ -1064,10 +1063,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, goto out; } - t = try_then_request_module(xt_find_table_lock(net, AF_INET, name), - "iptable_%s", name); - if (!t) { - ret = -ENOENT; + t = xt_request_find_table_lock(net, AF_INET, name); + if (IS_ERR(t)) { + ret = PTR_ERR(t); goto free_newinfo_counters_untrans; } @@ -1181,8 +1179,8 @@ do_add_counters(struct net *net, const void __user *user, return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET, tmp.name); - if (!t) { - ret = -ENOENT; + if (IS_ERR(t)) { + ret = PTR_ERR(t); goto free; } @@ -1625,7 +1623,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, xt_compat_lock(AF_INET); t = xt_find_table_lock(net, AF_INET, get.name); - if (t) { + if (!IS_ERR(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; ret = compat_table_info(private, &info); @@ -1639,7 +1637,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, module_put(t->me); xt_table_unlock(t); } else - ret = -ENOENT; + ret = PTR_ERR(t); xt_compat_unlock(AF_INET); return ret; diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c index 7667f223d7f8..9ac92ea7b93c 100644 --- a/net/ipv4/netfilter/iptable_filter.c +++ b/net/ipv4/netfilter/iptable_filter.c @@ -38,12 +38,6 @@ static unsigned int iptable_filter_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - if (state->hook == NF_INET_LOCAL_OUT && - (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr))) - /* root is playing with raw sockets. */ - return NF_ACCEPT; - return ipt_do_table(skb, state, state->net->ipv4.iptable_filter); } diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index aebdb337fd7e..dea138ca8925 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -49,11 +49,6 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) u_int32_t mark; int err; - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - /* Save things which could affect route */ mark = skb->mark; iph = ip_hdr(skb); diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index a1a07b338ccf..0f7255cc65ee 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -72,6 +72,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = { { .hook = iptable_nat_ipv4_in, .pf = NFPROTO_IPV4, + .nat_hook = true, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_NAT_DST, }, @@ -79,6 +80,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = { { .hook = iptable_nat_ipv4_out, .pf = NFPROTO_IPV4, + .nat_hook = true, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP_PRI_NAT_SRC, }, @@ -86,6 +88,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = { { .hook = iptable_nat_ipv4_local_fn, .pf = NFPROTO_IPV4, + .nat_hook = true, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_NAT_DST, }, @@ -93,6 +96,7 @@ static const struct nf_hook_ops nf_nat_ipv4_ops[] = { { .hook = iptable_nat_ipv4_fn, .pf = NFPROTO_IPV4, + .nat_hook = true, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP_PRI_NAT_SRC, }, diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 2642ecd2645c..a869d1fea7d9 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -26,12 +26,6 @@ static unsigned int iptable_raw_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - if (state->hook == NF_INET_LOCAL_OUT && - (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr))) - /* root is playing with raw sockets. */ - return NF_ACCEPT; - return ipt_do_table(skb, state, state->net->ipv4.iptable_raw); } diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c index ff226596e4b5..e5379fe57b64 100644 --- a/net/ipv4/netfilter/iptable_security.c +++ b/net/ipv4/netfilter/iptable_security.c @@ -43,12 +43,6 @@ static unsigned int iptable_security_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - if (state->hook == NF_INET_LOCAL_OUT && - (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr))) - /* Somebody is playing with raw sockets. */ - return NF_ACCEPT; - return ipt_do_table(skb, state, state->net->ipv4.iptable_security); } diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 89af9d88ca21..de213a397ea8 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -154,11 +154,6 @@ static unsigned int ipv4_conntrack_local(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */ return NF_ACCEPT; @@ -368,7 +363,7 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET)); MODULE_ALIAS("ip_conntrack"); MODULE_LICENSE("GPL"); -static struct nf_conntrack_l4proto *builtin_l4proto4[] = { +static const struct nf_conntrack_l4proto * const builtin_l4proto4[] = { &nf_conntrack_l4proto_tcp4, &nf_conntrack_l4proto_udp4, &nf_conntrack_l4proto_icmp, diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index 1849fedd9b81..5c15beafa711 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -22,7 +22,7 @@ #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_log.h> -static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ; +static const unsigned int nf_ct_icmp_timeout = 30*HZ; static inline struct nf_icmp_net *icmp_pernet(struct net *net) { @@ -351,7 +351,7 @@ static struct nf_proto_net *icmp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.icmp.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp = { .l3proto = PF_INET, .l4proto = IPPROTO_ICMP, diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c new file mode 100644 index 000000000000..b2d01eb25f2c --- /dev/null +++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c @@ -0,0 +1,284 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netfilter.h> +#include <linux/rhashtable.h> +#include <linux/ip.h> +#include <linux/netdevice.h> +#include <net/ip.h> +#include <net/neighbour.h> +#include <net/netfilter/nf_flow_table.h> +#include <net/netfilter/nf_tables.h> +/* For layer 4 checksum field offset. */ +#include <linux/tcp.h> +#include <linux/udp.h> + +static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, + __be32 addr, __be32 new_addr) +{ + struct tcphdr *tcph; + + if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || + skb_try_make_writable(skb, thoff + sizeof(*tcph))) + return -1; + + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); + + return 0; +} + +static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, + __be32 addr, __be32 new_addr) +{ + struct udphdr *udph; + + if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || + skb_try_make_writable(skb, thoff + sizeof(*udph))) + return -1; + + udph = (void *)(skb_network_header(skb) + thoff); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace4(&udph->check, skb, addr, + new_addr, true); + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } + + return 0; +} + +static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, + unsigned int thoff, __be32 addr, + __be32 new_addr) +{ + switch (iph->protocol) { + case IPPROTO_TCP: + if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0) + return NF_DROP; + break; + case IPPROTO_UDP: + if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0) + return NF_DROP; + break; + } + + return 0; +} + +static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb, + struct iphdr *iph, unsigned int thoff, + enum flow_offload_tuple_dir dir) +{ + __be32 addr, new_addr; + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + addr = iph->saddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; + iph->saddr = new_addr; + break; + case FLOW_OFFLOAD_DIR_REPLY: + addr = iph->daddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; + iph->daddr = new_addr; + break; + default: + return -1; + } + csum_replace4(&iph->check, addr, new_addr); + + return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); +} + +static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb, + struct iphdr *iph, unsigned int thoff, + enum flow_offload_tuple_dir dir) +{ + __be32 addr, new_addr; + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + addr = iph->daddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; + iph->daddr = new_addr; + break; + case FLOW_OFFLOAD_DIR_REPLY: + addr = iph->saddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; + iph->saddr = new_addr; + break; + default: + return -1; + } + + return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); +} + +static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, + enum flow_offload_tuple_dir dir) +{ + struct iphdr *iph = ip_hdr(skb); + unsigned int thoff = iph->ihl * 4; + + if (flow->flags & FLOW_OFFLOAD_SNAT && + (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 || + nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0)) + return -1; + if (flow->flags & FLOW_OFFLOAD_DNAT && + (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 || + nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0)) + return -1; + + return 0; +} + +static bool ip_has_options(unsigned int thoff) +{ + return thoff != sizeof(struct iphdr); +} + +static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, + struct flow_offload_tuple *tuple) +{ + struct flow_ports *ports; + unsigned int thoff; + struct iphdr *iph; + + if (!pskb_may_pull(skb, sizeof(*iph))) + return -1; + + iph = ip_hdr(skb); + thoff = iph->ihl * 4; + + if (ip_is_fragment(iph) || + unlikely(ip_has_options(thoff))) + return -1; + + if (iph->protocol != IPPROTO_TCP && + iph->protocol != IPPROTO_UDP) + return -1; + + thoff = iph->ihl * 4; + if (!pskb_may_pull(skb, thoff + sizeof(*ports))) + return -1; + + ports = (struct flow_ports *)(skb_network_header(skb) + thoff); + + tuple->src_v4.s_addr = iph->saddr; + tuple->dst_v4.s_addr = iph->daddr; + tuple->src_port = ports->source; + tuple->dst_port = ports->dest; + tuple->l3proto = AF_INET; + tuple->l4proto = iph->protocol; + tuple->iifidx = dev->ifindex; + + return 0; +} + +/* Based on ip_exceeds_mtu(). */ +static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) +{ + if (skb->len <= mtu) + return false; + + if ((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) + return false; + + if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) + return false; + + return true; +} + +static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rtable *rt) +{ + u32 mtu; + + mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); + if (__nf_flow_exceeds_mtu(skb, mtu)) + return true; + + return false; +} + +unsigned int +nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *flow_table = priv; + struct flow_offload_tuple tuple = {}; + enum flow_offload_tuple_dir dir; + struct flow_offload *flow; + struct net_device *outdev; + const struct rtable *rt; + struct iphdr *iph; + __be32 nexthop; + + if (skb->protocol != htons(ETH_P_IP)) + return NF_ACCEPT; + + if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0) + return NF_ACCEPT; + + tuplehash = flow_offload_lookup(flow_table, &tuple); + if (tuplehash == NULL) + return NF_ACCEPT; + + outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx); + if (!outdev) + return NF_ACCEPT; + + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); + + rt = (const struct rtable *)flow->tuplehash[dir].tuple.dst_cache; + if (unlikely(nf_flow_exceeds_mtu(skb, rt))) + return NF_ACCEPT; + + if (skb_try_make_writable(skb, sizeof(*iph))) + return NF_DROP; + + if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) && + nf_flow_nat_ip(flow, skb, dir) < 0) + return NF_DROP; + + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + iph = ip_hdr(skb); + ip_decrease_ttl(iph); + + skb->dev = outdev; + nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); + neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); + + return NF_STOLEN; +} +EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook); + +static struct nf_flowtable_type flowtable_ipv4 = { + .family = NFPROTO_IPV4, + .params = &nf_flow_offload_rhash_params, + .gc = nf_flow_offload_work_gc, + .hook = nf_flow_offload_ip_hook, + .owner = THIS_MODULE, +}; + +static int __init nf_flow_ipv4_module_init(void) +{ + nft_register_flowtable_type(&flowtable_ipv4); + + return 0; +} + +static void __exit nf_flow_ipv4_module_exit(void) +{ + nft_unregister_flowtable_type(&flowtable_ipv4); +} + +module_init(nf_flow_ipv4_module_init); +module_exit(nf_flow_ipv4_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); +MODULE_ALIAS_NF_FLOWTABLE(AF_INET); diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 0443ca4120b0..f7ff6a364d7b 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c @@ -356,11 +356,6 @@ nf_nat_ipv4_out(void *priv, struct sk_buff *skb, #endif unsigned int ret; - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - ret = nf_nat_ipv4_fn(priv, skb, state, do_chain); #ifdef CONFIG_XFRM if (ret != NF_DROP && ret != NF_STOLEN && @@ -396,11 +391,6 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, unsigned int ret; int err; - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - ret = nf_nat_ipv4_fn(priv, skb, state, do_chain); if (ret != NF_DROP && ret != NF_STOLEN && (ct = nf_ct_get(skb, &ctinfo)) != NULL) { diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c index 4bbc273b45e8..f84c17763f6f 100644 --- a/net/ipv4/netfilter/nf_tables_arp.c +++ b/net/ipv4/netfilter/nf_tables_arp.c @@ -21,7 +21,8 @@ nft_do_chain_arp(void *priv, { struct nft_pktinfo pkt; - nft_set_pktinfo_unspec(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_unspec(&pkt, skb); return nft_do_chain(&pkt, priv); } @@ -30,12 +31,6 @@ static struct nft_af_info nft_af_arp __read_mostly = { .family = NFPROTO_ARP, .nhooks = NF_ARP_NUMHOOKS, .owner = THIS_MODULE, - .nops = 1, - .hooks = { - [NF_ARP_IN] = nft_do_chain_arp, - [NF_ARP_OUT] = nft_do_chain_arp, - [NF_ARP_FORWARD] = nft_do_chain_arp, - }, }; static int nf_tables_arp_init_net(struct net *net) @@ -73,6 +68,10 @@ static const struct nf_chain_type filter_arp = { .owner = THIS_MODULE, .hook_mask = (1 << NF_ARP_IN) | (1 << NF_ARP_OUT), + .hooks = { + [NF_ARP_IN] = nft_do_chain_arp, + [NF_ARP_OUT] = nft_do_chain_arp, + }, }; static int __init nf_tables_arp_init(void) diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c index 2840a29b2e04..f4675253f1e6 100644 --- a/net/ipv4/netfilter/nf_tables_ipv4.c +++ b/net/ipv4/netfilter/nf_tables_ipv4.c @@ -24,40 +24,17 @@ static unsigned int nft_do_chain_ipv4(void *priv, { struct nft_pktinfo pkt; - nft_set_pktinfo_ipv4(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_ipv4(&pkt, skb); return nft_do_chain(&pkt, priv); } -static unsigned int nft_ipv4_output(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - if (unlikely(skb->len < sizeof(struct iphdr) || - ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) { - if (net_ratelimit()) - pr_info("nf_tables_ipv4: ignoring short SOCK_RAW " - "packet\n"); - return NF_ACCEPT; - } - - return nft_do_chain_ipv4(priv, skb, state); -} - -struct nft_af_info nft_af_ipv4 __read_mostly = { +static struct nft_af_info nft_af_ipv4 __read_mostly = { .family = NFPROTO_IPV4, .nhooks = NF_INET_NUMHOOKS, .owner = THIS_MODULE, - .nops = 1, - .hooks = { - [NF_INET_LOCAL_IN] = nft_do_chain_ipv4, - [NF_INET_LOCAL_OUT] = nft_ipv4_output, - [NF_INET_FORWARD] = nft_do_chain_ipv4, - [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4, - [NF_INET_POST_ROUTING] = nft_do_chain_ipv4, - }, }; -EXPORT_SYMBOL_GPL(nft_af_ipv4); static int nf_tables_ipv4_init_net(struct net *net) { @@ -97,6 +74,13 @@ static const struct nf_chain_type filter_ipv4 = { (1 << NF_INET_FORWARD) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING), + .hooks = { + [NF_INET_LOCAL_IN] = nft_do_chain_ipv4, + [NF_INET_LOCAL_OUT] = nft_do_chain_ipv4, + [NF_INET_FORWARD] = nft_do_chain_ipv4, + [NF_INET_PRE_ROUTING] = nft_do_chain_ipv4, + [NF_INET_POST_ROUTING] = nft_do_chain_ipv4, + }, }; static int __init nf_tables_ipv4_init(void) diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c index f5c66a7a4bf2..f2a490981594 100644 --- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c +++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c @@ -33,7 +33,8 @@ static unsigned int nft_nat_do_chain(void *priv, { struct nft_pktinfo pkt; - nft_set_pktinfo_ipv4(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_ipv4(&pkt, skb); return nft_do_chain(&pkt, priv); } diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c index 30493beb611a..d965c225b9f6 100644 --- a/net/ipv4/netfilter/nft_chain_route_ipv4.c +++ b/net/ipv4/netfilter/nft_chain_route_ipv4.c @@ -33,12 +33,8 @@ static unsigned int nf_route_table_hook(void *priv, const struct iphdr *iph; int err; - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) - return NF_ACCEPT; - - nft_set_pktinfo_ipv4(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_ipv4(&pkt, skb); mark = skb->mark; iph = ip_hdr(skb); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 67d39b79c801..d7cf861bf699 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -498,8 +498,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) const struct tcp_sock *tp = tcp_sk(sk); int state; - sock_rps_record_flow(sk); - sock_poll_wait(file, sk_sleep(sk), wait); state = inet_sk_state_load(sk); @@ -1104,12 +1102,15 @@ static int linear_payload_sz(bool first_skb) return 0; } -static int select_size(const struct sock *sk, bool sg, bool first_skb) +static int select_size(const struct sock *sk, bool sg, bool first_skb, bool zc) { const struct tcp_sock *tp = tcp_sk(sk); int tmp = tp->mss_cache; if (sg) { + if (zc) + return 0; + if (sk_can_gso(sk)) { tmp = linear_payload_sz(first_skb); } else { @@ -1186,7 +1187,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; bool process_backlog = false; - bool sg; + bool sg, zc = false; long timeo; flags = msg->msg_flags; @@ -1204,7 +1205,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) goto out_err; } - if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG)) + zc = sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG; + if (!zc) uarg->zerocopy = 0; } @@ -1281,6 +1283,7 @@ restart: if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { bool first_skb; + int linear; new_segment: /* Allocate new segment. If the interface is SG, @@ -1294,9 +1297,8 @@ new_segment: goto restart; } first_skb = tcp_rtx_and_write_queues_empty(sk); - skb = sk_stream_alloc_skb(sk, - select_size(sk, sg, first_skb), - sk->sk_allocation, + linear = select_size(sk, sg, first_skb, zc); + skb = sk_stream_alloc_skb(sk, linear, sk->sk_allocation, first_skb); if (!skb) goto wait_for_memory; @@ -1325,13 +1327,13 @@ new_segment: copy = msg_data_left(msg); /* Where to copy to? */ - if (skb_availroom(skb) > 0) { + if (skb_availroom(skb) > 0 && !zc) { /* We have some space in skb head. Superb! */ copy = min_t(int, copy, skb_availroom(skb)); err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); if (err) goto do_fault; - } else if (!uarg || !uarg->zerocopy) { + } else if (!zc) { bool merge = true; int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); @@ -1371,8 +1373,10 @@ new_segment: pfrag->offset += copy; } else { err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); - if (err == -EMSGSIZE || err == -EEXIST) + if (err == -EMSGSIZE || err == -EEXIST) { + tcp_mark_push(tp, skb); goto new_segment; + } if (err < 0) goto do_error; copy = err; @@ -1729,8 +1733,8 @@ static void tcp_update_recv_tstamps(struct sk_buff *skb, } /* Similar to __sock_recv_timestamp, but does not require an skb */ -void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, - struct scm_timestamping *tss) +static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, + struct scm_timestamping *tss) { struct timeval tv; bool has_timestamping = false; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 4d55c4b338ee..ff71b18d9682 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5299,6 +5299,9 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, unsigned int len = skb->len; struct tcp_sock *tp = tcp_sk(sk); + /* TCP congestion window tracking */ + trace_tcp_probe(sk, skb); + tcp_mstamp_refresh(tp); if (unlikely(!sk->sk_rx_dst)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index dd945b114215..5d203248123e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1911,7 +1911,7 @@ void tcp_v4_destroy_sock(struct sock *sk) /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { tcp_clear_md5_list(sk); - kfree_rcu(tp->md5sig_info, rcu); + kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu); tp->md5sig_info = NULL; } #endif diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 04be9f833927..95461f02ac9a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1944,7 +1944,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, in_flight = tcp_packets_in_flight(tp); - BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); + BUG_ON(tcp_skb_pcount(skb) <= 1); + BUG_ON(tp->snd_cwnd <= in_flight); send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c deleted file mode 100644 index 697f4c67b2e3..000000000000 --- a/net/ipv4/tcp_probe.c +++ /dev/null @@ -1,301 +0,0 @@ -/* - * tcpprobe - Observe the TCP flow with kprobes. - * - * The idea for this came from Werner Almesberger's umlsim - * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/kprobes.h> -#include <linux/socket.h> -#include <linux/tcp.h> -#include <linux/slab.h> -#include <linux/proc_fs.h> -#include <linux/module.h> -#include <linux/ktime.h> -#include <linux/time.h> -#include <net/net_namespace.h> - -#include <net/tcp.h> - -MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); -MODULE_DESCRIPTION("TCP cwnd snooper"); -MODULE_LICENSE("GPL"); -MODULE_VERSION("1.1"); - -static int port __read_mostly; -MODULE_PARM_DESC(port, "Port to match (0=all)"); -module_param(port, int, 0); - -static unsigned int bufsize __read_mostly = 4096; -MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); -module_param(bufsize, uint, 0); - -static unsigned int fwmark __read_mostly; -MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)"); -module_param(fwmark, uint, 0); - -static int full __read_mostly; -MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); -module_param(full, int, 0); - -static const char procname[] = "tcpprobe"; - -struct tcp_log { - ktime_t tstamp; - union { - struct sockaddr raw; - struct sockaddr_in v4; - struct sockaddr_in6 v6; - } src, dst; - u16 length; - u32 snd_nxt; - u32 snd_una; - u32 snd_wnd; - u32 rcv_wnd; - u32 snd_cwnd; - u32 ssthresh; - u32 srtt; -}; - -static struct { - spinlock_t lock; - wait_queue_head_t wait; - ktime_t start; - u32 lastcwnd; - - unsigned long head, tail; - struct tcp_log *log; -} tcp_probe; - -static inline int tcp_probe_used(void) -{ - return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); -} - -static inline int tcp_probe_avail(void) -{ - return bufsize - tcp_probe_used() - 1; -} - -#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \ - do { \ - si4.sin_family = AF_INET; \ - si4.sin_port = inet->inet_##mem##port; \ - si4.sin_addr.s_addr = inet->inet_##mem##addr; \ - } while (0) \ - -/* - * Hook inserted to be called before each receive packet. - * Note: arguments must match tcp_rcv_established()! - */ -static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th) -{ - unsigned int len = skb->len; - const struct tcp_sock *tp = tcp_sk(sk); - const struct inet_sock *inet = inet_sk(sk); - - /* Only update if port or skb mark matches */ - if (((port == 0 && fwmark == 0) || - ntohs(inet->inet_dport) == port || - ntohs(inet->inet_sport) == port || - (fwmark > 0 && skb->mark == fwmark)) && - (full || tp->snd_cwnd != tcp_probe.lastcwnd)) { - - spin_lock(&tcp_probe.lock); - /* If log fills, just silently drop */ - if (tcp_probe_avail() > 1) { - struct tcp_log *p = tcp_probe.log + tcp_probe.head; - - p->tstamp = ktime_get(); - switch (sk->sk_family) { - case AF_INET: - tcp_probe_copy_fl_to_si4(inet, p->src.v4, s); - tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d); - break; - case AF_INET6: - memset(&p->src.v6, 0, sizeof(p->src.v6)); - memset(&p->dst.v6, 0, sizeof(p->dst.v6)); -#if IS_ENABLED(CONFIG_IPV6) - p->src.v6.sin6_family = AF_INET6; - p->src.v6.sin6_port = inet->inet_sport; - p->src.v6.sin6_addr = inet6_sk(sk)->saddr; - - p->dst.v6.sin6_family = AF_INET6; - p->dst.v6.sin6_port = inet->inet_dport; - p->dst.v6.sin6_addr = sk->sk_v6_daddr; -#endif - break; - default: - BUG(); - } - - p->length = len; - p->snd_nxt = tp->snd_nxt; - p->snd_una = tp->snd_una; - p->snd_cwnd = tp->snd_cwnd; - p->snd_wnd = tp->snd_wnd; - p->rcv_wnd = tp->rcv_wnd; - p->ssthresh = tcp_current_ssthresh(sk); - p->srtt = tp->srtt_us >> 3; - - tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); - } - tcp_probe.lastcwnd = tp->snd_cwnd; - spin_unlock(&tcp_probe.lock); - - wake_up(&tcp_probe.wait); - } - - jprobe_return(); -} - -static struct jprobe tcp_jprobe = { - .kp = { - .symbol_name = "tcp_rcv_established", - }, - .entry = jtcp_rcv_established, -}; - -static int tcpprobe_open(struct inode *inode, struct file *file) -{ - /* Reset (empty) log */ - spin_lock_bh(&tcp_probe.lock); - tcp_probe.head = tcp_probe.tail = 0; - tcp_probe.start = ktime_get(); - spin_unlock_bh(&tcp_probe.lock); - - return 0; -} - -static int tcpprobe_sprint(char *tbuf, int n) -{ - const struct tcp_log *p - = tcp_probe.log + tcp_probe.tail; - struct timespec64 ts - = ktime_to_timespec64(ktime_sub(p->tstamp, tcp_probe.start)); - - return scnprintf(tbuf, n, - "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n", - (unsigned long)ts.tv_sec, - (unsigned long)ts.tv_nsec, - &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una, - p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd); -} - -static ssize_t tcpprobe_read(struct file *file, char __user *buf, - size_t len, loff_t *ppos) -{ - int error = 0; - size_t cnt = 0; - - if (!buf) - return -EINVAL; - - while (cnt < len) { - char tbuf[256]; - int width; - - /* Wait for data in buffer */ - error = wait_event_interruptible(tcp_probe.wait, - tcp_probe_used() > 0); - if (error) - break; - - spin_lock_bh(&tcp_probe.lock); - if (tcp_probe.head == tcp_probe.tail) { - /* multiple readers race? */ - spin_unlock_bh(&tcp_probe.lock); - continue; - } - - width = tcpprobe_sprint(tbuf, sizeof(tbuf)); - - if (cnt + width < len) - tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); - - spin_unlock_bh(&tcp_probe.lock); - - /* if record greater than space available - return partial buffer (so far) */ - if (cnt + width >= len) - break; - - if (copy_to_user(buf + cnt, tbuf, width)) - return -EFAULT; - cnt += width; - } - - return cnt == 0 ? error : cnt; -} - -static const struct file_operations tcpprobe_fops = { - .owner = THIS_MODULE, - .open = tcpprobe_open, - .read = tcpprobe_read, - .llseek = noop_llseek, -}; - -static __init int tcpprobe_init(void) -{ - int ret = -ENOMEM; - - /* Warning: if the function signature of tcp_rcv_established, - * has been changed, you also have to change the signature of - * jtcp_rcv_established, otherwise you end up right here! - */ - BUILD_BUG_ON(__same_type(tcp_rcv_established, - jtcp_rcv_established) == 0); - - init_waitqueue_head(&tcp_probe.wait); - spin_lock_init(&tcp_probe.lock); - - if (bufsize == 0) - return -EINVAL; - - bufsize = roundup_pow_of_two(bufsize); - tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); - if (!tcp_probe.log) - goto err0; - - if (!proc_create(procname, S_IRUSR, init_net.proc_net, &tcpprobe_fops)) - goto err0; - - ret = register_jprobe(&tcp_jprobe); - if (ret) - goto err1; - - pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n", - port, fwmark, bufsize); - return 0; - err1: - remove_proc_entry(procname, init_net.proc_net); - err0: - kfree(tcp_probe.log); - return ret; -} -module_init(tcpprobe_init); - -static __exit void tcpprobe_exit(void) -{ - remove_proc_entry(procname, init_net.proc_net); - unregister_jprobe(&tcp_jprobe); - kfree(tcp_probe.log); -} -module_exit(tcpprobe_exit); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index e9c0d1e1772e..db72619e07e4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2490,8 +2490,6 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) if (!skb_queue_empty(&udp_sk(sk)->reader_queue)) mask |= POLLIN | POLLRDNORM; - sock_rps_record_flow(sk); - /* Check for false positives due to checksum errors */ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index e50b7fea57ee..bcfc00e88756 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -23,6 +23,12 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) return xfrm4_extract_header(skb); } +static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + return dst_input(skb); +} + static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { @@ -33,7 +39,11 @@ static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk, iph->tos, skb->dev)) goto drop; } - return dst_input(skb); + + if (xfrm_trans_queue(skb, xfrm4_rcv_encap_finish2)) + goto drop; + + return 0; drop: kfree_skb(skb); return NET_RX_DROP; diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 7d885a44dc9d..8affc6d83d58 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -105,18 +105,15 @@ static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x, { __skb_push(skb, skb->mac_len); return skb_mac_gso_segment(skb, features); - } static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) { struct xfrm_offload *xo = xfrm_offload(skb); - if (xo->flags & XFRM_GSO_SEGMENT) { - skb->network_header = skb->network_header - x->props.header_len; + if (xo->flags & XFRM_GSO_SEGMENT) skb->transport_header = skb->network_header + sizeof(struct iphdr); - } skb_reset_mac_len(skb); pskb_pull(skb, skb->mac_len + x->props.header_len); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ed06b1190f05..2435f7ab070b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3438,6 +3438,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, } else if (event == NETDEV_CHANGE) { if (!addrconf_link_ready(dev)) { /* device is still not ready. */ + rt6_sync_down_dev(dev, event); break; } @@ -3449,6 +3450,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, * multicast snooping switches */ ipv6_mc_up(idev); + rt6_sync_up(dev, RTNH_F_LINKDOWN); break; } idev->if_flags |= IF_READY; @@ -3484,6 +3486,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, if (run_pending) addrconf_dad_run(idev); + /* Device has an address by now */ + rt6_sync_up(dev, RTNH_F_DEAD); + /* * If the MTU changed during the interface down, * when the interface up, the changed MTU must be @@ -3577,6 +3582,7 @@ static bool addr_is_local(const struct in6_addr *addr) static int addrconf_ifdown(struct net_device *dev, int how) { + unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN; struct net *net = dev_net(dev); struct inet6_dev *idev; struct inet6_ifaddr *ifa, *tmp; @@ -3586,8 +3592,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) ASSERT_RTNL(); - rt6_ifdown(net, dev); - neigh_ifdown(&nd_tbl, dev); + rt6_disable_ip(dev, event); idev = __in6_dev_get(dev); if (!idev) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index a1f918713006..fbf08ce3f5ab 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -221,8 +221,7 @@ ipv4_connected: if (__ipv6_addr_needs_scope_id(addr_type)) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { - if (sk->sk_bound_dev_if && - sk->sk_bound_dev_if != usin->sin6_scope_id) { + if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) { err = -EINVAL; goto out; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index a902ff8f59be..7c888c6e53a9 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -141,14 +141,32 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) static void esp_output_done(struct crypto_async_request *base, int err) { struct sk_buff *skb = base->data; + struct xfrm_offload *xo = xfrm_offload(skb); void *tmp; - struct dst_entry *dst = skb_dst(skb); - struct xfrm_state *x = dst->xfrm; + struct xfrm_state *x; + + if (xo && (xo->flags & XFRM_DEV_RESUME)) + x = skb->sp->xvec[skb->sp->len - 1]; + else + x = skb_dst(skb)->xfrm; tmp = ESP_SKB_CB(skb)->tmp; esp_ssg_unref(x, tmp); kfree(tmp); - xfrm_output_resume(skb, err); + + if (xo && (xo->flags & XFRM_DEV_RESUME)) { + if (err) { + XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); + kfree_skb(skb); + return; + } + + skb_push(skb, skb->data - skb_mac_header(skb)); + secpath_reset(skb); + xfrm_dev_resume(skb); + } else { + xfrm_output_resume(skb, err); + } } /* Move ESP header back into place. */ @@ -734,17 +752,13 @@ static int esp_init_aead(struct xfrm_state *x) char aead_name[CRYPTO_MAX_ALG_NAME]; struct crypto_aead *aead; int err; - u32 mask = 0; err = -ENAMETOOLONG; if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) goto error; - if (x->xso.offload_handle) - mask |= CRYPTO_ALG_ASYNC; - - aead = crypto_alloc_aead(aead_name, 0, mask); + aead = crypto_alloc_aead(aead_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; @@ -774,7 +788,6 @@ static int esp_init_authenc(struct xfrm_state *x) char authenc_name[CRYPTO_MAX_ALG_NAME]; unsigned int keylen; int err; - u32 mask = 0; err = -EINVAL; if (!x->ealg) @@ -800,10 +813,7 @@ static int esp_init_authenc(struct xfrm_state *x) goto error; } - if (x->xso.offload_handle) - mask |= CRYPTO_ALG_ASYNC; - - aead = crypto_alloc_aead(authenc_name, 0, mask); + aead = crypto_alloc_aead(authenc_name, 0, 0); err = PTR_ERR(aead); if (IS_ERR(aead)) goto error; diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 333a478aa161..0bb7d54cf2cb 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -135,75 +135,36 @@ static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, netdev_features_t features) { - __u32 seq; - int err = 0; - struct sk_buff *skb2; struct xfrm_state *x; struct ip_esp_hdr *esph; struct crypto_aead *aead; - struct sk_buff *segs = ERR_PTR(-EINVAL); netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); if (!xo) - goto out; - - seq = xo->seq.low; + return ERR_PTR(-EINVAL); x = skb->sp->xvec[skb->sp->len - 1]; aead = x->data; esph = ip_esp_hdr(skb); if (esph->spi != x->id.spi) - goto out; + return ERR_PTR(-EINVAL); if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) - goto out; + return ERR_PTR(-EINVAL); __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); skb->encap_hdr_csum = 1; - if (!(features & NETIF_F_HW_ESP)) + if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || + (x->xso.dev != skb->dev)) esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); - segs = x->outer_mode->gso_segment(x, skb, esp_features); - if (IS_ERR_OR_NULL(segs)) - goto out; - - __skb_pull(skb, skb->data - skb_mac_header(skb)); - - skb2 = segs; - do { - struct sk_buff *nskb = skb2->next; - - xo = xfrm_offload(skb2); - xo->flags |= XFRM_GSO_SEGMENT; - xo->seq.low = seq; - xo->seq.hi = xfrm_replay_seqhi(x, seq); - - if(!(features & NETIF_F_HW_ESP)) - xo->flags |= CRYPTO_FALLBACK; - - x->outer_mode->xmit(x, skb2); - - err = x->type_offload->xmit(x, skb2, esp_features); - if (err) { - kfree_skb_list(segs); - return ERR_PTR(err); - } - - if (!skb_is_gso(skb2)) - seq++; - else - seq += skb_shinfo(skb2)->gso_segs; - - skb_push(skb2, skb2->mac_len); - skb2 = nskb; - } while (skb2); + xo->flags |= XFRM_GSO_SEGMENT; -out: - return segs; + return x->outer_mode->gso_segment(x, skb, esp_features); } static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) @@ -222,6 +183,7 @@ static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) { + int len; int err; int alen; int blksize; @@ -230,6 +192,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features struct crypto_aead *aead; struct esp_info esp; bool hw_offload = true; + __u32 seq; esp.inplace = true; @@ -265,28 +228,33 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features return esp.nfrags; } + seq = xo->seq.low; + esph = ip_esp_hdr(skb); esph->spi = x->id.spi; skb_push(skb, -skb_network_offset(skb)); if (xo->flags & XFRM_GSO_SEGMENT) { - esph->seq_no = htonl(xo->seq.low); - } else { - int len; - - len = skb->len - sizeof(struct ipv6hdr); - if (len > IPV6_MAXPLEN) - len = 0; + esph->seq_no = htonl(seq); - ipv6_hdr(skb)->payload_len = htons(len); + if (!skb_is_gso(skb)) + xo->seq.low++; + else + xo->seq.low += skb_shinfo(skb)->gso_segs; } + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); + + len = skb->len - sizeof(struct ipv6hdr); + if (len > IPV6_MAXPLEN) + len = 0; + + ipv6_hdr(skb)->payload_len = htons(len); + if (hw_offload) return 0; - esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); - err = esp6_output_tail(x, skb, &esp); if (err) return err; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index a64d559fa513..e31118f417b4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -107,16 +107,13 @@ enum { void fib6_update_sernum(struct rt6_info *rt) { - struct fib6_table *table = rt->rt6i_table; struct net *net = dev_net(rt->dst.dev); struct fib6_node *fn; - spin_lock_bh(&table->tb6_lock); fn = rcu_dereference_protected(rt->rt6i_node, - lockdep_is_held(&table->tb6_lock)); + lockdep_is_held(&rt->rt6i_table->tb6_lock)); if (fn) fn->fn_sernum = fib6_new_sernum(net); - spin_unlock_bh(&table->tb6_lock); } /* @@ -799,12 +796,6 @@ insert_above: return ln; } -static bool rt6_qualify_for_ecmp(struct rt6_info *rt) -{ - return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) == - RTF_GATEWAY; -} - static void fib6_copy_metrics(u32 *mp, const struct mx6_config *mxc) { int i; @@ -994,6 +985,7 @@ next_iter: rt6i_nsiblings++; } BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings); + rt6_multipath_rebalance(temp_sibling); } /* @@ -1102,8 +1094,8 @@ void fib6_force_start_gc(struct net *net) jiffies + net->ipv6.sysctl.ip6_rt_gc_interval); } -static void fib6_update_sernum_upto_root(struct rt6_info *rt, - int sernum) +static void __fib6_update_sernum_upto_root(struct rt6_info *rt, + int sernum) { struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node, lockdep_is_held(&rt->rt6i_table->tb6_lock)); @@ -1117,6 +1109,11 @@ static void fib6_update_sernum_upto_root(struct rt6_info *rt, } } +void fib6_update_sernum_upto_root(struct net *net, struct rt6_info *rt) +{ + __fib6_update_sernum_upto_root(rt, fib6_new_sernum(net)); +} + /* * Add routing information to the routing tree. * <destination addr>/<source addr> @@ -1230,7 +1227,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, err = fib6_add_rt2node(fn, rt, info, mxc, extack); if (!err) { - fib6_update_sernum_upto_root(rt, sernum); + __fib6_update_sernum_upto_root(rt, sernum); fib6_start_gc(info->nl_net, rt); } @@ -1241,23 +1238,28 @@ out: * If fib6_add_1 has cleared the old leaf pointer in the * super-tree leaf node we have to find a new one for it. */ - struct rt6_info *pn_leaf = rcu_dereference_protected(pn->leaf, - lockdep_is_held(&table->tb6_lock)); - if (pn != fn && pn_leaf == rt) { - pn_leaf = NULL; - RCU_INIT_POINTER(pn->leaf, NULL); - atomic_dec(&rt->rt6i_ref); - } - if (pn != fn && !pn_leaf && !(pn->fn_flags & RTN_RTINFO)) { - pn_leaf = fib6_find_prefix(info->nl_net, table, pn); -#if RT6_DEBUG >= 2 - if (!pn_leaf) { - WARN_ON(!pn_leaf); - pn_leaf = info->nl_net->ipv6.ip6_null_entry; + if (pn != fn) { + struct rt6_info *pn_leaf = + rcu_dereference_protected(pn->leaf, + lockdep_is_held(&table->tb6_lock)); + if (pn_leaf == rt) { + pn_leaf = NULL; + RCU_INIT_POINTER(pn->leaf, NULL); + atomic_dec(&rt->rt6i_ref); } + if (!pn_leaf && !(pn->fn_flags & RTN_RTINFO)) { + pn_leaf = fib6_find_prefix(info->nl_net, table, + pn); +#if RT6_DEBUG >= 2 + if (!pn_leaf) { + WARN_ON(!pn_leaf); + pn_leaf = + info->nl_net->ipv6.ip6_null_entry; + } #endif - atomic_inc(&pn_leaf->rt6i_ref); - rcu_assign_pointer(pn->leaf, pn_leaf); + atomic_inc(&pn_leaf->rt6i_ref); + rcu_assign_pointer(pn->leaf, pn_leaf); + } } #endif goto failure; @@ -1665,6 +1667,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, sibling->rt6i_nsiblings--; rt->rt6i_nsiblings = 0; list_del_init(&rt->rt6i_siblings); + rt6_multipath_rebalance(next_sibling); } /* Adjust walkers */ @@ -1887,7 +1890,7 @@ static int fib6_clean_node(struct fib6_walker *w) for_each_fib6_walker_rt(w) { res = c->func(rt, c->arg); - if (res < 0) { + if (res == -1) { w->leaf = rt; res = fib6_del(rt, &info); if (res) { @@ -1900,6 +1903,12 @@ static int fib6_clean_node(struct fib6_walker *w) continue; } return 0; + } else if (res == -2) { + if (WARN_ON(!rt->rt6i_nsiblings)) + continue; + rt = list_last_entry(&rt->rt6i_siblings, + struct rt6_info, rt6i_siblings); + continue; } WARN_ON(res != 0); } @@ -1911,7 +1920,8 @@ static int fib6_clean_node(struct fib6_walker *w) * Convenient frontend to tree walker. * * func is called on each route. - * It may return -1 -> delete this route. + * It may return -2 -> skip multipath route. + * -1 -> delete this route. * 0 -> continue walking */ @@ -2103,7 +2113,6 @@ static void fib6_net_exit(struct net *net) { unsigned int i; - rt6_ifdown(net, NULL); del_timer_sync(&net->ipv6.ip6_fib_timer); for (i = 0; i < FIB6_TABLE_HASHSZ; i++) { diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 97f148f15429..db99446e0276 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -550,10 +550,6 @@ static int ip6erspan_rcv(struct sk_buff *skb, int gre_hdr_len, info = &tun_dst->u.tun_info; md = ip_tunnel_info_opts(info); - if (!md) { - dst_release((struct dst_entry *)tun_dst); - return PACKET_REJECT; - } memcpy(md, pkt_md, sizeof(*md)); md->version = ver; @@ -1337,6 +1333,36 @@ static void ip6gre_tunnel_setup(struct net_device *dev) eth_random_addr(dev->perm_addr); } +#define GRE6_FEATURES (NETIF_F_SG | \ + NETIF_F_FRAGLIST | \ + NETIF_F_HIGHDMA | \ + NETIF_F_HW_CSUM) + +static void ip6gre_tnl_init_features(struct net_device *dev) +{ + struct ip6_tnl *nt = netdev_priv(dev); + + dev->features |= GRE6_FEATURES; + dev->hw_features |= GRE6_FEATURES; + + if (!(nt->parms.o_flags & TUNNEL_SEQ)) { + /* TCP offload with GRE SEQ is not supported, nor + * can we support 2 levels of outer headers requiring + * an update. + */ + if (!(nt->parms.o_flags & TUNNEL_CSUM) || + nt->encap.type == TUNNEL_ENCAP_NONE) { + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + } + + /* Can use a lockless transmit, unless we generate + * output sequences + */ + dev->features |= NETIF_F_LLTX; + } +} + static int ip6gre_tunnel_init_common(struct net_device *dev) { struct ip6_tnl *tunnel; @@ -1375,6 +1401,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) dev->features |= NETIF_F_NETNS_LOCAL; netif_keep_dst(dev); } + ip6gre_tnl_init_features(dev); return 0; } @@ -1709,11 +1736,6 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = { .ndo_get_iflink = ip6_tnl_get_iflink, }; -#define GRE6_FEATURES (NETIF_F_SG | \ - NETIF_F_FRAGLIST | \ - NETIF_F_HIGHDMA | \ - NETIF_F_HW_CSUM) - static int ip6erspan_tap_init(struct net_device *dev) { struct ip6_tnl *tunnel; @@ -1852,26 +1874,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev, nt->net = dev_net(dev); ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); - dev->features |= GRE6_FEATURES; - dev->hw_features |= GRE6_FEATURES; - - if (!(nt->parms.o_flags & TUNNEL_SEQ)) { - /* TCP offload with GRE SEQ is not supported, nor - * can we support 2 levels of outer headers requiring - * an update. - */ - if (!(nt->parms.o_flags & TUNNEL_CSUM) || - (nt->encap.type == TUNNEL_ENCAP_NONE)) { - dev->features |= NETIF_F_GSO_SOFTWARE; - dev->hw_features |= NETIF_F_GSO_SOFTWARE; - } - - /* Can use a lockless transmit, unless we generate - * output sequences - */ - dev->features |= NETIF_F_LLTX; - } - err = register_netdevice(dev); if (err) goto out; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ece2781a31b2..19adad6d90bc 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -138,6 +138,14 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s return ret; } +#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) + /* Policy lookup after SNAT yielded a new policy */ + if (skb_dst(skb)->xfrm) { + IPCB(skb)->flags |= IPSKB_REROUTED; + return dst_output(net, sk, skb); + } +#endif + if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) || dst_allfrag(skb_dst(skb)) || (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size)) @@ -370,7 +378,7 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk, return dst_output(net, sk, skb); } -static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) +unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) { unsigned int mtu; struct inet6_dev *idev; @@ -390,6 +398,7 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) return mtu; } +EXPORT_SYMBOL_GPL(ip6_dst_mtu_forward); static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu) { diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8a4610e84e58..8071f42cd8a0 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1077,10 +1077,11 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); neigh_release(neigh); } - } else if (!(t->parms.flags & - (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { - /* enable the cache only only if the routing decision does - * not depend on the current inner header value + } else if (t->parms.proto != 0 && !(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | + IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only if neither the outer protocol nor the + * routing decision depends on the current inner header value */ use_cache = true; } @@ -1679,11 +1680,11 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) { struct ip6_tnl *tnl = netdev_priv(dev); - if (tnl->parms.proto == IPPROTO_IPIP) { - if (new_mtu < ETH_MIN_MTU) + if (tnl->parms.proto == IPPROTO_IPV6) { + if (new_mtu < IPV6_MIN_MTU) return -EINVAL; } else { - if (new_mtu < IPV6_MIN_MTU) + if (new_mtu < ETH_MIN_MTU) return -EINVAL; } if (new_mtu > 0xFFF8 - dev->hard_header_len) diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 39970e212ad5..d95ceca7ff8f 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -68,32 +68,7 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) } EXPORT_SYMBOL(ip6_route_me_harder); -/* - * Extra routing may needed on local out, as the QUEUE target never - * returns control to the table. - */ - -struct ip6_rt_info { - struct in6_addr daddr; - struct in6_addr saddr; - u_int32_t mark; -}; - -static void nf_ip6_saveroute(const struct sk_buff *skb, - struct nf_queue_entry *entry) -{ - struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); - - if (entry->state.hook == NF_INET_LOCAL_OUT) { - const struct ipv6hdr *iph = ipv6_hdr(skb); - - rt_info->daddr = iph->daddr; - rt_info->saddr = iph->saddr; - rt_info->mark = skb->mark; - } -} - -static int nf_ip6_reroute(struct net *net, struct sk_buff *skb, +static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); @@ -103,7 +78,7 @@ static int nf_ip6_reroute(struct net *net, struct sk_buff *skb, if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) - return ip6_route_me_harder(net, skb); + return ip6_route_me_harder(entry->state.net, skb); } return 0; } @@ -190,25 +165,19 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, }; static const struct nf_ipv6_ops ipv6ops = { - .chk_addr = ipv6_chk_addr, - .route_input = ip6_route_input, - .fragment = ip6_fragment -}; - -static const struct nf_afinfo nf_ip6_afinfo = { - .family = AF_INET6, + .chk_addr = ipv6_chk_addr, + .route_input = ip6_route_input, + .fragment = ip6_fragment, .checksum = nf_ip6_checksum, .checksum_partial = nf_ip6_checksum_partial, .route = nf_ip6_route, - .saveroute = nf_ip6_saveroute, .reroute = nf_ip6_reroute, - .route_key_size = sizeof(struct ip6_rt_info), }; int __init ipv6_netfilter_init(void) { RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); - return nf_register_afinfo(&nf_ip6_afinfo); + return 0; } /* This can be called from inet6_init() on errors, so it cannot @@ -217,5 +186,4 @@ int __init ipv6_netfilter_init(void) void ipv6_netfilter_fini(void) { RCU_INIT_POINTER(nf_ipv6_ops, NULL); - nf_unregister_afinfo(&nf_ip6_afinfo); } diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 6acb2eecd986..806e95375ec8 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -71,6 +71,14 @@ config NFT_FIB_IPV6 endif # NF_TABLES_IPV6 endif # NF_TABLES +config NF_FLOW_TABLE_IPV6 + select NF_FLOW_TABLE + tristate "Netfilter flow table IPv6 module" + help + This option adds the flow table IPv6 support. + + To compile it as a module, choose M here. + config NF_DUP_IPV6 tristate "Netfilter IPv6 packet duplication to alternate destination" depends on !NF_CONNTRACK || NF_CONNTRACK diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index c6ee0cdd0ba9..95611c4b39b0 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -45,6 +45,9 @@ obj-$(CONFIG_NFT_REDIR_IPV6) += nft_redir_ipv6.o obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o +# flow table support +obj-$(CONFIG_NF_FLOW_TABLE_IPV6) += nf_flow_table_ipv6.o + # matches obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1d7ae9366335..6ebbef2dfb60 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -991,9 +991,8 @@ static int get_info(struct net *net, void __user *user, if (compat) xt_compat_lock(AF_INET6); #endif - t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), - "ip6table_%s", name); - if (t) { + t = xt_request_find_table_lock(net, AF_INET6, name); + if (!IS_ERR(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT @@ -1023,7 +1022,7 @@ static int get_info(struct net *net, void __user *user, xt_table_unlock(t); module_put(t->me); } else - ret = -ENOENT; + ret = PTR_ERR(t); #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET6); @@ -1049,7 +1048,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, get.name[sizeof(get.name) - 1] = '\0'; t = xt_find_table_lock(net, AF_INET6, get.name); - if (t) { + if (!IS_ERR(t)) { struct xt_table_info *private = t->private; if (get.size == private->size) ret = copy_entries_to_user(private->size, @@ -1060,7 +1059,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr, module_put(t->me); xt_table_unlock(t); } else - ret = -ENOENT; + ret = PTR_ERR(t); return ret; } @@ -1083,10 +1082,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, goto out; } - t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), - "ip6table_%s", name); - if (!t) { - ret = -ENOENT; + t = xt_request_find_table_lock(net, AF_INET6, name); + if (IS_ERR(t)) { + ret = PTR_ERR(t); goto free_newinfo_counters_untrans; } @@ -1199,8 +1197,8 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); - if (!t) { - ret = -ENOENT; + if (IS_ERR(t)) { + ret = PTR_ERR(t); goto free; } @@ -1636,7 +1634,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, xt_compat_lock(AF_INET6); t = xt_find_table_lock(net, AF_INET6, get.name); - if (t) { + if (!IS_ERR(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; ret = compat_table_info(private, &info); @@ -1650,7 +1648,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, module_put(t->me); xt_table_unlock(t); } else - ret = -ENOENT; + ret = PTR_ERR(t); xt_compat_unlock(AF_INET6); return ret; diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 2b1a9dcdbcb3..b0524b18c4fb 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -42,14 +42,6 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) u_int8_t hop_limit; u_int32_t flowlabel, mark; int err; -#if 0 - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct iphdr) || - ip_hdrlen(skb) < sizeof(struct iphdr)) { - net_warn_ratelimited("ip6t_hook: happy cracking\n"); - return NF_ACCEPT; - } -#endif /* save source/dest address, mark, hoplimit, flowlabel, priority, */ memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index 991512576c8c..47306e45a80a 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -74,6 +74,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = { { .hook = ip6table_nat_in, .pf = NFPROTO_IPV6, + .nat_hook = true, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_NAT_DST, }, @@ -81,6 +82,7 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = { { .hook = ip6table_nat_out, .pf = NFPROTO_IPV6, + .nat_hook = true, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP6_PRI_NAT_SRC, }, @@ -88,12 +90,14 @@ static const struct nf_hook_ops nf_nat_ipv6_ops[] = { { .hook = ip6table_nat_local_fn, .pf = NFPROTO_IPV6, + .nat_hook = true, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP6_PRI_NAT_DST, }, /* After packet filtering, change source */ { .hook = ip6table_nat_fn, + .nat_hook = true, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP6_PRI_NAT_SRC, diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 3b80a38f62b8..11a313fd9273 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -176,11 +176,6 @@ static unsigned int ipv6_conntrack_local(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) { - net_notice_ratelimited("ipv6_conntrack_local: packet too short\n"); - return NF_ACCEPT; - } return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); } @@ -368,7 +363,7 @@ static struct nf_sockopt_ops so_getorigdst6 = { .owner = THIS_MODULE, }; -static struct nf_conntrack_l4proto *builtin_l4proto6[] = { +static const struct nf_conntrack_l4proto * const builtin_l4proto6[] = { &nf_conntrack_l4proto_tcp6, &nf_conntrack_l4proto_udp6, &nf_conntrack_l4proto_icmpv6, diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 3ac0d826afc4..2548e2c8aedd 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -27,7 +27,7 @@ #include <net/netfilter/ipv6/nf_conntrack_icmpv6.h> #include <net/netfilter/nf_log.h> -static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ; +static const unsigned int nf_ct_icmpv6_timeout = 30*HZ; static inline struct nf_icmp_net *icmpv6_pernet(struct net *net) { @@ -352,7 +352,7 @@ static struct nf_proto_net *icmpv6_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.icmpv6.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_ICMPV6, diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c new file mode 100644 index 000000000000..0c3b9d32f64f --- /dev/null +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c @@ -0,0 +1,278 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netfilter.h> +#include <linux/rhashtable.h> +#include <linux/ipv6.h> +#include <linux/netdevice.h> +#include <linux/ipv6.h> +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/neighbour.h> +#include <net/netfilter/nf_flow_table.h> +#include <net/netfilter/nf_tables.h> +/* For layer 4 checksum field offset. */ +#include <linux/tcp.h> +#include <linux/udp.h> + +static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff, + struct in6_addr *addr, + struct in6_addr *new_addr) +{ + struct tcphdr *tcph; + + if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || + skb_try_make_writable(skb, thoff + sizeof(*tcph))) + return -1; + + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, + new_addr->s6_addr32, true); + + return 0; +} + +static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff, + struct in6_addr *addr, + struct in6_addr *new_addr) +{ + struct udphdr *udph; + + if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || + skb_try_make_writable(skb, thoff + sizeof(*udph))) + return -1; + + udph = (void *)(skb_network_header(skb) + thoff); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32, + new_addr->s6_addr32, true); + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } + + return 0; +} + +static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h, + unsigned int thoff, struct in6_addr *addr, + struct in6_addr *new_addr) +{ + switch (ip6h->nexthdr) { + case IPPROTO_TCP: + if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0) + return NF_DROP; + break; + case IPPROTO_UDP: + if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0) + return NF_DROP; + break; + } + + return 0; +} + +static int nf_flow_snat_ipv6(const struct flow_offload *flow, + struct sk_buff *skb, struct ipv6hdr *ip6h, + unsigned int thoff, + enum flow_offload_tuple_dir dir) +{ + struct in6_addr addr, new_addr; + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + addr = ip6h->saddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6; + ip6h->saddr = new_addr; + break; + case FLOW_OFFLOAD_DIR_REPLY: + addr = ip6h->daddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6; + ip6h->daddr = new_addr; + break; + default: + return -1; + } + + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); +} + +static int nf_flow_dnat_ipv6(const struct flow_offload *flow, + struct sk_buff *skb, struct ipv6hdr *ip6h, + unsigned int thoff, + enum flow_offload_tuple_dir dir) +{ + struct in6_addr addr, new_addr; + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + addr = ip6h->daddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6; + ip6h->daddr = new_addr; + break; + case FLOW_OFFLOAD_DIR_REPLY: + addr = ip6h->saddr; + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6; + ip6h->saddr = new_addr; + break; + default: + return -1; + } + + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); +} + +static int nf_flow_nat_ipv6(const struct flow_offload *flow, + struct sk_buff *skb, + enum flow_offload_tuple_dir dir) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + unsigned int thoff = sizeof(*ip6h); + + if (flow->flags & FLOW_OFFLOAD_SNAT && + (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || + nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) + return -1; + if (flow->flags & FLOW_OFFLOAD_DNAT && + (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 || + nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0)) + return -1; + + return 0; +} + +static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, + struct flow_offload_tuple *tuple) +{ + struct flow_ports *ports; + struct ipv6hdr *ip6h; + unsigned int thoff; + + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return -1; + + ip6h = ipv6_hdr(skb); + + if (ip6h->nexthdr != IPPROTO_TCP && + ip6h->nexthdr != IPPROTO_UDP) + return -1; + + thoff = sizeof(*ip6h); + if (!pskb_may_pull(skb, thoff + sizeof(*ports))) + return -1; + + ports = (struct flow_ports *)(skb_network_header(skb) + thoff); + + tuple->src_v6 = ip6h->saddr; + tuple->dst_v6 = ip6h->daddr; + tuple->src_port = ports->source; + tuple->dst_port = ports->dest; + tuple->l3proto = AF_INET6; + tuple->l4proto = ip6h->nexthdr; + tuple->iifidx = dev->ifindex; + + return 0; +} + +/* Based on ip_exceeds_mtu(). */ +static bool __nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) +{ + if (skb->len <= mtu) + return false; + + if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) + return false; + + return true; +} + +static bool nf_flow_exceeds_mtu(struct sk_buff *skb, const struct rt6_info *rt) +{ + u32 mtu; + + mtu = ip6_dst_mtu_forward(&rt->dst); + if (__nf_flow_exceeds_mtu(skb, mtu)) + return true; + + return false; +} + +unsigned int +nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *flow_table = priv; + struct flow_offload_tuple tuple = {}; + enum flow_offload_tuple_dir dir; + struct flow_offload *flow; + struct net_device *outdev; + struct in6_addr *nexthop; + struct ipv6hdr *ip6h; + struct rt6_info *rt; + + if (skb->protocol != htons(ETH_P_IPV6)) + return NF_ACCEPT; + + if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0) + return NF_ACCEPT; + + tuplehash = flow_offload_lookup(flow_table, &tuple); + if (tuplehash == NULL) + return NF_ACCEPT; + + outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx); + if (!outdev) + return NF_ACCEPT; + + dir = tuplehash->tuple.dir; + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); + + rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache; + if (unlikely(nf_flow_exceeds_mtu(skb, rt))) + return NF_ACCEPT; + + if (skb_try_make_writable(skb, sizeof(*ip6h))) + return NF_DROP; + + if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) && + nf_flow_nat_ipv6(flow, skb, dir) < 0) + return NF_DROP; + + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + ip6h = ipv6_hdr(skb); + ip6h->hop_limit--; + + skb->dev = outdev; + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); + neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb); + + return NF_STOLEN; +} +EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook); + +static struct nf_flowtable_type flowtable_ipv6 = { + .family = NFPROTO_IPV6, + .params = &nf_flow_offload_rhash_params, + .gc = nf_flow_offload_work_gc, + .hook = nf_flow_offload_ipv6_hook, + .owner = THIS_MODULE, +}; + +static int __init nf_flow_ipv6_module_init(void) +{ + nft_register_flowtable_type(&flowtable_ipv6); + + return 0; +} + +static void __exit nf_flow_ipv6_module_exit(void) +{ + nft_unregister_flowtable_type(&flowtable_ipv6); +} + +module_init(nf_flow_ipv6_module_init); +module_exit(nf_flow_ipv6_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); +MODULE_ALIAS_NF_FLOWTABLE(AF_INET6); diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index 1d2fb9267d6f..bed57ee65f7b 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c @@ -369,10 +369,6 @@ nf_nat_ipv6_out(void *priv, struct sk_buff *skb, #endif unsigned int ret; - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) - return NF_ACCEPT; - ret = nf_nat_ipv6_fn(priv, skb, state, do_chain); #ifdef CONFIG_XFRM if (ret != NF_DROP && ret != NF_STOLEN && @@ -408,10 +404,6 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, unsigned int ret; int err; - /* root is playing with raw sockets. */ - if (skb->len < sizeof(struct ipv6hdr)) - return NF_ACCEPT; - ret = nf_nat_ipv6_fn(priv, skb, state, do_chain); if (ret != NF_DROP && ret != NF_STOLEN && (ct = nf_ct_get(skb, &ctinfo)) != NULL) { diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c index d6e4ba5de916..9cd45b964123 100644 --- a/net/ipv6/netfilter/nf_tables_ipv6.c +++ b/net/ipv6/netfilter/nf_tables_ipv6.c @@ -22,39 +22,17 @@ static unsigned int nft_do_chain_ipv6(void *priv, { struct nft_pktinfo pkt; - nft_set_pktinfo_ipv6(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_ipv6(&pkt, skb); return nft_do_chain(&pkt, priv); } -static unsigned int nft_ipv6_output(void *priv, - struct sk_buff *skb, - const struct nf_hook_state *state) -{ - if (unlikely(skb->len < sizeof(struct ipv6hdr))) { - if (net_ratelimit()) - pr_info("nf_tables_ipv6: ignoring short SOCK_RAW " - "packet\n"); - return NF_ACCEPT; - } - - return nft_do_chain_ipv6(priv, skb, state); -} - -struct nft_af_info nft_af_ipv6 __read_mostly = { +static struct nft_af_info nft_af_ipv6 __read_mostly = { .family = NFPROTO_IPV6, .nhooks = NF_INET_NUMHOOKS, .owner = THIS_MODULE, - .nops = 1, - .hooks = { - [NF_INET_LOCAL_IN] = nft_do_chain_ipv6, - [NF_INET_LOCAL_OUT] = nft_ipv6_output, - [NF_INET_FORWARD] = nft_do_chain_ipv6, - [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6, - [NF_INET_POST_ROUTING] = nft_do_chain_ipv6, - }, }; -EXPORT_SYMBOL_GPL(nft_af_ipv6); static int nf_tables_ipv6_init_net(struct net *net) { @@ -94,6 +72,13 @@ static const struct nf_chain_type filter_ipv6 = { (1 << NF_INET_FORWARD) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING), + .hooks = { + [NF_INET_LOCAL_IN] = nft_do_chain_ipv6, + [NF_INET_LOCAL_OUT] = nft_do_chain_ipv6, + [NF_INET_FORWARD] = nft_do_chain_ipv6, + [NF_INET_PRE_ROUTING] = nft_do_chain_ipv6, + [NF_INET_POST_ROUTING] = nft_do_chain_ipv6, + }, }; static int __init nf_tables_ipv6_init(void) diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c index 443cd306c0b0..73fe2bd13fcf 100644 --- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c +++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c @@ -31,7 +31,8 @@ static unsigned int nft_nat_do_chain(void *priv, { struct nft_pktinfo pkt; - nft_set_pktinfo_ipv6(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_ipv6(&pkt, skb); return nft_do_chain(&pkt, priv); } diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c index f2727475895e..11d3c3b9aa18 100644 --- a/net/ipv6/netfilter/nft_chain_route_ipv6.c +++ b/net/ipv6/netfilter/nft_chain_route_ipv6.c @@ -33,7 +33,8 @@ static unsigned int nf_route_table_hook(void *priv, u32 mark, flowlabel; int err; - nft_set_pktinfo_ipv6(&pkt, skb, state); + nft_set_pktinfo(&pkt, skb, state); + nft_set_pktinfo_ipv6(&pkt, skb); /* save source/dest address, mark, hoplimit, flowlabel, priority */ memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr)); diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 54b5899543ef..cc5174c7254c 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -60,7 +60,6 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, { const struct net_device *dev = NULL; const struct nf_ipv6_ops *v6ops; - const struct nf_afinfo *afinfo; int route_err, addrtype; struct rt6_info *rt; struct flowi6 fl6 = { @@ -69,8 +68,8 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, }; u32 ret = 0; - afinfo = nf_get_afinfo(NFPROTO_IPV6); - if (!afinfo) + v6ops = nf_get_ipv6_ops(); + if (!v6ops) return RTN_UNREACHABLE; if (priv->flags & NFTA_FIB_F_IIF) @@ -80,12 +79,11 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph); - v6ops = nf_get_ipv6_ops(); - if (dev && v6ops && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) + if (dev && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) ret = RTN_LOCAL; - route_err = afinfo->route(nft_net(pkt), (struct dst_entry **)&rt, - flowi6_to_flowi(&fl6), false); + route_err = v6ops->route(nft_net(pkt), (struct dst_entry **)&rt, + flowi6_to_flowi(&fl6), false); if (route_err) goto err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 2490280b3394..1076ae0ea9d5 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -455,7 +455,6 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match, int strict) { struct rt6_info *sibling, *next_sibling; - int route_choosen; /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. @@ -463,26 +462,19 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match, if (!fl6->mp_hash) fl6->mp_hash = rt6_multipath_hash(fl6, NULL); - route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1); - /* Don't change the route, if route_choosen == 0 - * (siblings does not include ourself) - */ - if (route_choosen) - list_for_each_entry_safe(sibling, next_sibling, - &match->rt6i_siblings, rt6i_siblings) { - route_choosen--; - if (route_choosen == 0) { - struct inet6_dev *idev = sibling->rt6i_idev; - - if (!netif_carrier_ok(sibling->dst.dev) && - idev->cnf.ignore_routes_with_linkdown) - break; - if (rt6_score_route(sibling, oif, strict) < 0) - break; - match = sibling; - break; - } - } + if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound)) + return match; + + list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, + rt6i_siblings) { + if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound)) + continue; + if (rt6_score_route(sibling, oif, strict) < 0) + break; + match = sibling; + break; + } + return match; } @@ -499,12 +491,15 @@ static inline struct rt6_info *rt6_device_match(struct net *net, struct rt6_info *local = NULL; struct rt6_info *sprt; - if (!oif && ipv6_addr_any(saddr)) - goto out; + if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD)) + return rt; for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) { struct net_device *dev = sprt->dst.dev; + if (sprt->rt6i_nh_flags & RTNH_F_DEAD) + continue; + if (oif) { if (dev->ifindex == oif) return sprt; @@ -533,8 +528,8 @@ static inline struct rt6_info *rt6_device_match(struct net *net, if (flags & RT6_LOOKUP_F_IFACE) return net->ipv6.ip6_null_entry; } -out: - return rt; + + return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt; } #ifdef CONFIG_IPV6_ROUTER_PREF @@ -679,10 +674,12 @@ static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, int m; bool match_do_rr = false; struct inet6_dev *idev = rt->rt6i_idev; - struct net_device *dev = rt->dst.dev; - if (dev && !netif_carrier_ok(dev) && - idev->cnf.ignore_routes_with_linkdown && + if (rt->rt6i_nh_flags & RTNH_F_DEAD) + goto out; + + if (idev->cnf.ignore_routes_with_linkdown && + rt->rt6i_nh_flags & RTNH_F_LINKDOWN && !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) goto out; @@ -1346,7 +1343,9 @@ out: /* Update fn->fn_sernum to invalidate all cached dst */ if (!err) { + spin_lock_bh(&ort->rt6i_table->tb6_lock); fib6_update_sernum(ort); + spin_unlock_bh(&ort->rt6i_table->tb6_lock); fib6_force_start_gc(net); } @@ -1824,10 +1823,10 @@ u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb) if (skb) { ip6_multipath_l3_keys(skb, &hash_keys); - return flow_hash_from_keys(&hash_keys); + return flow_hash_from_keys(&hash_keys) >> 1; } - return get_hash_from_flowi6(fl6); + return get_hash_from_flowi6(fl6) >> 1; } void ip6_route_input(struct sk_buff *skb) @@ -2154,6 +2153,8 @@ static struct rt6_info *__ip6_route_redirect(struct net *net, fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: for_each_fib6_node_rt_rcu(fn) { + if (rt->rt6i_nh_flags & RTNH_F_DEAD) + continue; if (rt6_check_expired(rt)) continue; if (rt->dst.error) @@ -2344,7 +2345,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, rt->rt6i_idev = idev; dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); - /* Add this dst into uncached_list so that rt6_ifdown() can + /* Add this dst into uncached_list so that rt6_disable_ip() can * do proper release of the net_device */ rt6_uncached_list_add(rt); @@ -2593,6 +2594,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, #endif rt->rt6i_metric = cfg->fc_metric; + rt->rt6i_nh_weight = 1; /* We cannot add true routes via loopback here, they would result in kernel looping; promote them to reject routes @@ -2746,6 +2748,9 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, rt->rt6i_flags = cfg->fc_flags; install_route: + if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) && + !netif_carrier_ok(dev)) + rt->rt6i_nh_flags |= RTNH_F_LINKDOWN; rt->dst.dev = dev; rt->rt6i_idev = idev; rt->rt6i_table = table; @@ -3459,37 +3464,245 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) fib6_clean_all(net, fib6_clean_tohost, gateway); } -struct arg_dev_net { - struct net_device *dev; - struct net *net; +struct arg_netdev_event { + const struct net_device *dev; + union { + unsigned int nh_flags; + unsigned long event; + }; }; +static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt) +{ + struct rt6_info *iter; + struct fib6_node *fn; + + fn = rcu_dereference_protected(rt->rt6i_node, + lockdep_is_held(&rt->rt6i_table->tb6_lock)); + iter = rcu_dereference_protected(fn->leaf, + lockdep_is_held(&rt->rt6i_table->tb6_lock)); + while (iter) { + if (iter->rt6i_metric == rt->rt6i_metric && + rt6_qualify_for_ecmp(iter)) + return iter; + iter = rcu_dereference_protected(iter->rt6_next, + lockdep_is_held(&rt->rt6i_table->tb6_lock)); + } + + return NULL; +} + +static bool rt6_is_dead(const struct rt6_info *rt) +{ + if (rt->rt6i_nh_flags & RTNH_F_DEAD || + (rt->rt6i_nh_flags & RTNH_F_LINKDOWN && + rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) + return true; + + return false; +} + +static int rt6_multipath_total_weight(const struct rt6_info *rt) +{ + struct rt6_info *iter; + int total = 0; + + if (!rt6_is_dead(rt)) + total += rt->rt6i_nh_weight; + + list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) { + if (!rt6_is_dead(iter)) + total += iter->rt6i_nh_weight; + } + + return total; +} + +static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total) +{ + int upper_bound = -1; + + if (!rt6_is_dead(rt)) { + *weight += rt->rt6i_nh_weight; + upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31, + total) - 1; + } + atomic_set(&rt->rt6i_nh_upper_bound, upper_bound); +} + +static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total) +{ + struct rt6_info *iter; + int weight = 0; + + rt6_upper_bound_set(rt, &weight, total); + + list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + rt6_upper_bound_set(iter, &weight, total); +} + +void rt6_multipath_rebalance(struct rt6_info *rt) +{ + struct rt6_info *first; + int total; + + /* In case the entire multipath route was marked for flushing, + * then there is no need to rebalance upon the removal of every + * sibling route. + */ + if (!rt->rt6i_nsiblings || rt->should_flush) + return; + + /* During lookup routes are evaluated in order, so we need to + * make sure upper bounds are assigned from the first sibling + * onwards. + */ + first = rt6_multipath_first_sibling(rt); + if (WARN_ON_ONCE(!first)) + return; + + total = rt6_multipath_total_weight(first); + rt6_multipath_upper_bound_set(first, total); +} + +static int fib6_ifup(struct rt6_info *rt, void *p_arg) +{ + const struct arg_netdev_event *arg = p_arg; + const struct net *net = dev_net(arg->dev); + + if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) { + rt->rt6i_nh_flags &= ~arg->nh_flags; + fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt); + rt6_multipath_rebalance(rt); + } + + return 0; +} + +void rt6_sync_up(struct net_device *dev, unsigned int nh_flags) +{ + struct arg_netdev_event arg = { + .dev = dev, + .nh_flags = nh_flags, + }; + + if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev)) + arg.nh_flags |= RTNH_F_LINKDOWN; + + fib6_clean_all(dev_net(dev), fib6_ifup, &arg); +} + +static bool rt6_multipath_uses_dev(const struct rt6_info *rt, + const struct net_device *dev) +{ + struct rt6_info *iter; + + if (rt->dst.dev == dev) + return true; + list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + if (iter->dst.dev == dev) + return true; + + return false; +} + +static void rt6_multipath_flush(struct rt6_info *rt) +{ + struct rt6_info *iter; + + rt->should_flush = 1; + list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + iter->should_flush = 1; +} + +static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt, + const struct net_device *down_dev) +{ + struct rt6_info *iter; + unsigned int dead = 0; + + if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD) + dead++; + list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + if (iter->dst.dev == down_dev || + iter->rt6i_nh_flags & RTNH_F_DEAD) + dead++; + + return dead; +} + +static void rt6_multipath_nh_flags_set(struct rt6_info *rt, + const struct net_device *dev, + unsigned int nh_flags) +{ + struct rt6_info *iter; + + if (rt->dst.dev == dev) + rt->rt6i_nh_flags |= nh_flags; + list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) + if (iter->dst.dev == dev) + iter->rt6i_nh_flags |= nh_flags; +} + /* called with write lock held for table with rt */ -static int fib6_ifdown(struct rt6_info *rt, void *arg) +static int fib6_ifdown(struct rt6_info *rt, void *p_arg) { - const struct arg_dev_net *adn = arg; - const struct net_device *dev = adn->dev; + const struct arg_netdev_event *arg = p_arg; + const struct net_device *dev = arg->dev; + const struct net *net = dev_net(dev); - if ((rt->dst.dev == dev || !dev) && - rt != adn->net->ipv6.ip6_null_entry && - (rt->rt6i_nsiblings == 0 || - (dev && netdev_unregistering(dev)) || - !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) - return -1; + if (rt == net->ipv6.ip6_null_entry) + return 0; + + switch (arg->event) { + case NETDEV_UNREGISTER: + return rt->dst.dev == dev ? -1 : 0; + case NETDEV_DOWN: + if (rt->should_flush) + return -1; + if (!rt->rt6i_nsiblings) + return rt->dst.dev == dev ? -1 : 0; + if (rt6_multipath_uses_dev(rt, dev)) { + unsigned int count; + + count = rt6_multipath_dead_count(rt, dev); + if (rt->rt6i_nsiblings + 1 == count) { + rt6_multipath_flush(rt); + return -1; + } + rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD | + RTNH_F_LINKDOWN); + fib6_update_sernum(rt); + rt6_multipath_rebalance(rt); + } + return -2; + case NETDEV_CHANGE: + if (rt->dst.dev != dev || + rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) + break; + rt->rt6i_nh_flags |= RTNH_F_LINKDOWN; + rt6_multipath_rebalance(rt); + break; + } return 0; } -void rt6_ifdown(struct net *net, struct net_device *dev) +void rt6_sync_down_dev(struct net_device *dev, unsigned long event) { - struct arg_dev_net adn = { + struct arg_netdev_event arg = { .dev = dev, - .net = net, + .event = event, }; - fib6_clean_all(net, fib6_ifdown, &adn); - if (dev) - rt6_uncached_list_flush_dev(net, dev); + fib6_clean_all(dev_net(dev), fib6_ifdown, &arg); +} + +void rt6_disable_ip(struct net_device *dev, unsigned long event) +{ + rt6_sync_down_dev(dev, event); + rt6_uncached_list_flush_dev(dev_net(dev), dev); + neigh_ifdown(&nd_tbl, dev); } struct rt6_mtu_change_arg { @@ -3812,6 +4025,8 @@ static int ip6_route_multipath_add(struct fib6_config *cfg, goto cleanup; } + rt->rt6i_nh_weight = rtnh->rtnh_hops + 1; + err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { dst_release_immediate(&rt->dst); @@ -3992,7 +4207,10 @@ static size_t rt6_nlmsg_size(struct rt6_info *rt) static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, unsigned int *flags, bool skip_oif) { - if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { + if (rt->rt6i_nh_flags & RTNH_F_DEAD) + *flags |= RTNH_F_DEAD; + + if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) { *flags |= RTNH_F_LINKDOWN; if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) *flags |= RTNH_F_DEAD; @@ -4031,7 +4249,7 @@ static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) if (!rtnh) goto nla_put_failure; - rtnh->rtnh_hops = 0; + rtnh->rtnh_hops = rt->rt6i_nh_weight - 1; rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; if (rt6_nexthop_info(skb, rt, &flags, true) < 0) diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index 825b8e01f947..ba3767ef5e93 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -501,7 +501,7 @@ static struct seg6_action_desc *__get_action_desc(int action) struct seg6_action_desc *desc; int i, count; - count = sizeof(seg6_action_table) / sizeof(struct seg6_action_desc); + count = ARRAY_SIZE(seg6_action_table); for (i = 0; i < count; i++) { desc = &seg6_action_table[i]; if (desc->action == action) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index aa12a26a96c6..c0f7e69f2e6c 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -176,8 +176,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, /* If interface is set while binding, indices * must coincide. */ - if (sk->sk_bound_dev_if && - sk->sk_bound_dev_if != usin->sin6_scope_id) + if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index fe04e23af986..841f4a07438e 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -32,6 +32,14 @@ int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, } EXPORT_SYMBOL(xfrm6_rcv_spi); +static int xfrm6_transport_finish2(struct net *net, struct sock *sk, + struct sk_buff *skb) +{ + if (xfrm_trans_queue(skb, ip6_rcv_finish)) + __kfree_skb(skb); + return -1; +} + int xfrm6_transport_finish(struct sk_buff *skb, int async) { struct xfrm_offload *xo = xfrm_offload(skb); @@ -56,7 +64,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, dev_net(skb->dev), NULL, skb, skb->dev, NULL, - ip6_rcv_finish); + xfrm6_transport_finish2); return -1; } diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index e66b94f46532..4e12859bc2ee 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c @@ -105,17 +105,14 @@ static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x, { __skb_push(skb, skb->mac_len); return skb_mac_gso_segment(skb, features); - } static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) { struct xfrm_offload *xo = xfrm_offload(skb); - if (xo->flags & XFRM_GSO_SEGMENT) { - skb->network_header = skb->network_header - x->props.header_len; + if (xo->flags & XFRM_GSO_SEGMENT) skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); - } skb_reset_mac_len(skb); pskb_pull(skb, skb->mac_len + x->props.header_len); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 115918ad8eca..62285fc6eb59 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -662,10 +662,9 @@ discard: * |x|S|x|x|x|x|x|x| Sequence Number | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * - * Cookie value, sublayer format and offset (pad) are negotiated with - * the peer when the session is set up. Unlike L2TPv2, we do not need - * to parse the packet header to determine if optional fields are - * present. + * Cookie value and sublayer format are negotiated with the peer when + * the session is set up. Unlike L2TPv2, we do not need to parse the + * packet header to determine if optional fields are present. * * Caller must already have parsed the frame and determined that it is * a data (not control) frame before coming here. Fields up to the @@ -780,10 +779,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, } } - /* Session data offset is handled differently for L2TPv2 and - * L2TPv3. For L2TPv2, there is an optional 16-bit value in - * the header. For L2TPv3, the offset is negotiated using AVPs - * in the session setup control protocol. + /* Session data offset is defined only for L2TPv2 and is + * indicated by an optional 16-bit value in the header. */ if (tunnel->version == L2TP_HDR_VER_2) { /* If offset bit set, skip it. */ @@ -791,8 +788,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, offset = ntohs(*(__be16 *)ptr); ptr += 2 + offset; } - } else - ptr += session->offset; + } offset = ptr - optr; if (!pskb_may_pull(skb, offset)) @@ -1068,8 +1064,6 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) } bufp += session->l2specific_len; } - if (session->offset) - bufp += session->offset; return bufp - optr; } @@ -1734,7 +1728,7 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version) if (session->send_seq) session->hdr_len += 4; } else { - session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset; + session->hdr_len = 4 + session->cookie_len + session->l2specific_len; if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) session->hdr_len += 4; } @@ -1784,7 +1778,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn session->recv_seq = cfg->recv_seq; session->lns_mode = cfg->lns_mode; session->reorder_timeout = cfg->reorder_timeout; - session->offset = cfg->offset; session->l2specific_type = cfg->l2specific_type; session->l2specific_len = cfg->l2specific_len; session->cookie_len = cfg->cookie_len; diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9534e16965cc..c2e9bbd79b35 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -59,7 +59,6 @@ struct l2tp_session_cfg { int debug; /* bitmask of debug message * categories */ u16 vlan_id; /* VLAN pseudowire only */ - u16 offset; /* offset to payload */ u16 l2specific_len; /* Layer 2 specific length */ u16 l2specific_type; /* Layer 2 specific type */ u8 cookie[8]; /* optional cookie */ @@ -86,8 +85,6 @@ struct l2tp_session { int cookie_len; u8 peer_cookie[8]; int peer_cookie_len; - u16 offset; /* offset from end of L2TP header - to beginning of data */ u16 l2specific_len; u16 l2specific_type; u16 hdr_len; diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index eb69411bcb47..2c30587d1a14 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -180,8 +180,8 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) session->lns_mode ? "LNS" : "LAC", session->debug, jiffies_to_msecs(session->reorder_timeout)); - seq_printf(m, " offset %hu l2specific %hu/%hu\n", - session->offset, session->l2specific_type, session->l2specific_len); + seq_printf(m, " offset 0 l2specific %hu/%hu\n", + session->l2specific_type, session->l2specific_len); if (session->cookie_len) { seq_printf(m, " cookie %02x%02x%02x%02x", session->cookie[0], session->cookie[1], diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index a1f24fb2be98..e1ca29f79821 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -547,9 +547,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf } if (tunnel->version > 2) { - if (info->attrs[L2TP_ATTR_OFFSET]) - cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]); - if (info->attrs[L2TP_ATTR_DATA_SEQ]) cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]); diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index d444752dbf40..a8b1616cec41 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -153,27 +153,16 @@ EXPORT_SYMBOL(ieee80211_stop_rx_ba_session); */ static void sta_rx_agg_session_timer_expired(struct timer_list *t) { - struct tid_ampdu_rx *tid_rx_timer = - from_timer(tid_rx_timer, t, session_timer); - struct sta_info *sta = tid_rx_timer->sta; - u8 tid = tid_rx_timer->tid; - struct tid_ampdu_rx *tid_rx; + struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, session_timer); + struct sta_info *sta = tid_rx->sta; + u8 tid = tid_rx->tid; unsigned long timeout; - rcu_read_lock(); - tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); - if (!tid_rx) { - rcu_read_unlock(); - return; - } - timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); if (time_is_after_jiffies(timeout)) { mod_timer(&tid_rx->session_timer, timeout); - rcu_read_unlock(); return; } - rcu_read_unlock(); ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n", sta->sta.addr, tid); @@ -415,10 +404,11 @@ end: timeout); } -void __ieee80211_start_rx_ba_session(struct sta_info *sta, - u8 dialog_token, u16 timeout, - u16 start_seq_num, u16 ba_policy, u16 tid, - u16 buf_size, bool tx, bool auto_seq) +static void __ieee80211_start_rx_ba_session(struct sta_info *sta, + u8 dialog_token, u16 timeout, + u16 start_seq_num, u16 ba_policy, + u16 tid, u16 buf_size, bool tx, + bool auto_seq) { mutex_lock(&sta->ampdu_mlme.mtx); ___ieee80211_start_rx_ba_session(sta, dialog_token, timeout, diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 5f8ab5be369f..595c662a61e8 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -392,7 +392,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, * telling the driver. New packets will not go through since * the aggregation session is no longer OPERATIONAL. */ - synchronize_net(); + if (!local->in_reconfig) + synchronize_net(); tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? WLAN_BACK_RECIPIENT : @@ -429,18 +430,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, */ static void sta_addba_resp_timer_expired(struct timer_list *t) { - struct tid_ampdu_tx *tid_tx_timer = - from_timer(tid_tx_timer, t, addba_resp_timer); - struct sta_info *sta = tid_tx_timer->sta; - u8 tid = tid_tx_timer->tid; - struct tid_ampdu_tx *tid_tx; + struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, addba_resp_timer); + struct sta_info *sta = tid_tx->sta; + u8 tid = tid_tx->tid; /* check if the TID waits for addBA response */ - rcu_read_lock(); - tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); - if (!tid_tx || - test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { - rcu_read_unlock(); + if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { ht_dbg(sta->sdata, "timer expired on %pM tid %d not expecting addBA response\n", sta->sta.addr, tid); @@ -451,7 +446,6 @@ static void sta_addba_resp_timer_expired(struct timer_list *t) sta->sta.addr, tid); ieee80211_stop_tx_ba_session(&sta->sta, tid); - rcu_read_unlock(); } void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) @@ -529,29 +523,21 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) */ static void sta_tx_agg_session_timer_expired(struct timer_list *t) { - struct tid_ampdu_tx *tid_tx_timer = - from_timer(tid_tx_timer, t, session_timer); - struct sta_info *sta = tid_tx_timer->sta; - u8 tid = tid_tx_timer->tid; - struct tid_ampdu_tx *tid_tx; + struct tid_ampdu_tx *tid_tx = from_timer(tid_tx, t, session_timer); + struct sta_info *sta = tid_tx->sta; + u8 tid = tid_tx->tid; unsigned long timeout; - rcu_read_lock(); - tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); - if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { - rcu_read_unlock(); + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { return; } timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout); if (time_is_after_jiffies(timeout)) { mod_timer(&tid_tx->session_timer, timeout); - rcu_read_unlock(); return; } - rcu_read_unlock(); - ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", sta->sta.addr, tid); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb15d3b97cb2..46028e12e216 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -573,10 +573,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_BIP_CMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_cmac)); + /* fall through */ case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_gmac)); + /* fall through */ case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != @@ -2205,6 +2207,7 @@ static int ieee80211_scan(struct wiphy *wiphy, * for now fall through to allow scanning only when * beaconing hasn't been configured yet */ + /* fall through */ case NL80211_IFTYPE_AP: /* * If the scan has been forced (and the driver supports @@ -2373,10 +2376,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, struct ieee80211_sub_if_data *sdata; enum nl80211_tx_power_setting txp_type = type; bool update_txp_type = false; + bool has_monitor = false; if (wdev) { sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + sdata = rtnl_dereference(local->monitor_sdata); + if (!sdata) + return -EOPNOTSUPP; + } + switch (type) { case NL80211_TX_POWER_AUTOMATIC: sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL; @@ -2415,15 +2425,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, mutex_lock(&local->iflist_mtx); list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { + has_monitor = true; + continue; + } sdata->user_power_level = local->user_power_level; if (txp_type != sdata->vif.bss_conf.txpower_type) update_txp_type = true; sdata->vif.bss_conf.txpower_type = txp_type; } - list_for_each_entry(sdata, &local->interfaces, list) + list_for_each_entry(sdata, &local->interfaces, list) { + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) + continue; ieee80211_recalc_txpower(sdata, update_txp_type); + } mutex_unlock(&local->iflist_mtx); + if (has_monitor) { + sdata = rtnl_dereference(local->monitor_sdata); + if (sdata) { + sdata->user_power_level = local->user_power_level; + if (txp_type != sdata->vif.bss_conf.txpower_type) + update_txp_type = true; + sdata->vif.bss_conf.txpower_type = txp_type; + + ieee80211_recalc_txpower(sdata, update_txp_type); + } + } + return 0; } diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 5fae001f286c..1f466d12a6bc 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -211,6 +211,7 @@ static const char *hw_flag_names[] = { FLAG(TX_FRAG_LIST), FLAG(REPORTS_LOW_ACK), FLAG(SUPPORTS_TX_FRAG), + FLAG(SUPPORTS_TDLS_BUFFER_STA), #undef FLAG }; diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index c7f93fd9ca7a..4d82fe7d627c 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -165,7 +165,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || sdata->vif.type == NL80211_IFTYPE_NAN || (sdata->vif.type == NL80211_IFTYPE_MONITOR && - !sdata->vif.mu_mimo_owner))) + !sdata->vif.mu_mimo_owner && + !(changed & BSS_CHANGED_TXPOWER)))) return; if (!check_sdata_in_driver(sdata)) diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 1621b6ab17ba..d7523530d3f8 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -492,6 +492,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); + /* fall through */ case IEEE80211_SMPS_OFF: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DISABLED; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 885d00b41911..26900025de2f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1757,10 +1757,6 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool stop); void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, u16 initiator, u16 reason, bool stop); -void __ieee80211_start_rx_ba_session(struct sta_info *sta, - u8 dialog_token, u16 timeout, - u16 start_seq_num, u16 ba_policy, u16 tid, - u16 buf_size, bool tx, bool auto_seq); void ___ieee80211_start_rx_ba_session(struct sta_info *sta, u8 dialog_token, u16 timeout, u16 start_seq_num, u16 ba_policy, u16 tid, diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 13b16f90e1cf..5fe01f82df12 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1474,7 +1474,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: - BUG(); + WARN_ON(1); break; } @@ -1633,7 +1633,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local, goto out_unlock; } } - /* otherwise fall through */ + /* fall through */ default: /* assign a new address if possible -- try n_addresses first */ for (i = 0; i < local->hw.wiphy->n_addresses; i++) { diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 938049395f90..aee05ec3f7ea 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -178,13 +178,17 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) decrease_tailroom_need_count(sdata, 1); WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)); + WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) && + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)); + return 0; } @@ -237,7 +241,8 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sta = key->sta; sdata = key->sdata; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(sdata); @@ -1104,7 +1109,8 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + if (!((key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE)) || (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) increment_tailroom_need_count(key->sdata); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index e054a2fd8d38..0785d04a80bc 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -263,6 +263,9 @@ static void ieee80211_restart_work(struct work_struct *work) flush_delayed_work(&local->roc_work); flush_work(&local->hw_roc_done); + /* wait for all packet processing to be done */ + synchronize_net(); + ieee80211_reconfig(local); rtnl_unlock(); } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 5e27364e10ac..73ac607beb5d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -989,8 +989,10 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: sta_flags |= IEEE80211_STA_DISABLE_HT; + /* fall through */ case NL80211_CHAN_WIDTH_20: sta_flags |= IEEE80211_STA_DISABLE_40MHZ; + /* fall through */ case NL80211_CHAN_WIDTH_40: sta_flags |= IEEE80211_STA_DISABLE_VHT; break; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 4394463a0c2e..35ad3983ae4b 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1250,6 +1250,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) break; case IEEE80211_PROACTIVE_PREQ_WITH_PREP: flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG; + /* fall through */ case IEEE80211_PROACTIVE_PREQ_NO_PREP: interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout; target_flags |= IEEE80211_PREQ_TO_FLAG | diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index e2d00cce3c17..0f6c9ca59062 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -672,7 +672,7 @@ void mesh_plink_timer(struct timer_list *t) break; } reason = WLAN_REASON_MESH_MAX_RETRIES; - /* fall through on else */ + /* fall through */ case NL80211_PLINK_CNF_RCVD: /* confirm timer */ if (!reason) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index c244691deab9..39b660b9a908 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -473,6 +473,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); + /* fall through */ case IEEE80211_SMPS_OFF: cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; @@ -2861,10 +2862,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, aid = le16_to_cpu(mgmt->u.assoc_resp.aid); capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info); - if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) - sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n", - aid); - aid &= ~(BIT(15) | BIT(14)); + /* + * The 5 MSB of the AID field are reserved + * (802.11-2016 9.4.1.8 AID field) + */ + aid &= 0x7ff; ifmgd->broken_ap = false; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index faf4f6055000..f1d40b6645ff 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -801,14 +801,14 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, case NL80211_IFTYPE_ADHOC: if (!sdata->vif.bss_conf.ibss_joined) need_offchan = true; - /* fall through */ #ifdef CONFIG_MAC80211_MESH + /* fall through */ case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif) && !sdata->u.mesh.mesh_id_len) need_offchan = true; - /* fall through */ #endif + /* fall through */ case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 70e9d2ca8bbe..fd580614085b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1607,23 +1607,16 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) /* * Change STA power saving mode only at the end of a frame - * exchange sequence. + * exchange sequence, and only for a data or management + * frame as specified in IEEE 802.11-2016 11.2.3.2 */ if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && !ieee80211_has_morefrags(hdr->frame_control) && - !ieee80211_is_back_req(hdr->frame_control) && + (ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_is_data(hdr->frame_control)) && !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && (rx->sdata->vif.type == NL80211_IFTYPE_AP || - rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && - /* - * PM bit is only checked in frames where it isn't reserved, - * in AP mode it's reserved in non-bufferable management frames - * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) - * BAR frames should be ignored as specified in - * IEEE 802.11-2012 10.2.1.2. - */ - (!ieee80211_is_mgmt(hdr->frame_control) || - ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { + rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { if (test_sta_flag(sta, WLAN_STA_PS_STA)) { if (!ieee80211_has_pm(hdr->frame_control)) sta_ps_end(sta); @@ -3632,6 +3625,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) } return true; case NL80211_IFTYPE_MESH_POINT: + if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) + return false; if (multicast) return true; return ether_addr_equal(sdata->vif.addr, hdr->addr1); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 91093d4a2f84..5cd5e6e5834e 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -47,6 +47,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, NL80211_FEATURE_TDLS_CHANNEL_SWITCH; bool wider_band = ieee80211_hw_check(&local->hw, TDLS_WIDER_BW) && !ifmgd->tdls_wider_bw_prohibited; + bool buffer_sta = ieee80211_hw_check(&local->hw, + SUPPORTS_TDLS_BUFFER_STA); struct ieee80211_supported_band *sband = ieee80211_get_sband(sdata); bool vht = sband && sband->vht_cap.vht_supported; u8 *pos = skb_put(skb, 10); @@ -56,7 +58,8 @@ static void ieee80211_tdls_add_ext_capab(struct ieee80211_sub_if_data *sdata, *pos++ = 0x0; *pos++ = 0x0; *pos++ = 0x0; - *pos++ = chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0; + *pos++ = (chan_switch ? WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH : 0) | + (buffer_sta ? WLAN_EXT_CAPA4_TDLS_BUFFER_STA : 0); *pos++ = WLAN_EXT_CAPA5_TDLS_ENABLED; *pos++ = 0; *pos++ = 0; @@ -236,6 +239,7 @@ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac) switch (ac) { default: WARN_ON_ONCE(1); + /* fall through */ case 0: return IEEE80211_AC_BE; case 1: diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 3160954fc406..25904af38839 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2922,7 +2922,9 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) gen_iv = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV; iv_spc = build.key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE; - mmic = build.key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC; + mmic = build.key->conf.flags & + (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE); /* don't handle software crypto */ if (!(build.key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d57e5f6bd8b6..1f82191ce601 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2110,15 +2110,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy, 0); wake_up: - if (local->in_reconfig) { - local->in_reconfig = false; - barrier(); - - /* Restart deferred ROCs */ - mutex_lock(&local->mtx); - ieee80211_start_next_roc(local); - mutex_unlock(&local->mtx); - } if (local->monitors == local->open_count && local->monitors > 0) ieee80211_add_virtual_monitor(local); @@ -2146,6 +2137,16 @@ int ieee80211_reconfig(struct ieee80211_local *local) mutex_unlock(&local->sta_mtx); } + if (local->in_reconfig) { + local->in_reconfig = false; + barrier(); + + /* Restart deferred ROCs */ + mutex_lock(&local->mtx); + ieee80211_start_next_roc(local); + mutex_unlock(&local->mtx); + } + ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 3e3d3014e9ab..5f7c96368b11 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -165,6 +165,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, qos = sta->sta.wme; break; } + /* fall through */ case NL80211_IFTYPE_AP: ra = skb->data; break; diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index b58722d9de37..785056cb76f6 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -1,7 +1,7 @@ /* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2008, Jouni Malinen <j@w1.fi> - * Copyright (C) 2016 Intel Deutschland GmbH + * Copyright (C) 2016-2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -59,8 +59,9 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) if (info->control.hw_key && (info->flags & IEEE80211_TX_CTL_DONTFRAG || ieee80211_hw_check(&tx->local->hw, SUPPORTS_TX_FRAG)) && - !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { - /* hwaccel - with no need for SW-generated MMIC */ + !(tx->key->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE))) { + /* hwaccel - with no need for SW-generated MMIC or MIC space */ return TX_CONTINUE; } @@ -75,8 +76,15 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) skb_tailroom(skb), tail)) return TX_DROP; - key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; mic = skb_put(skb, MICHAEL_MIC_LEN); + + if (tx->key->conf.flags & IEEE80211_KEY_FLAG_PUT_MIC_SPACE) { + /* Zeroed MIC can help with debug */ + memset(mic, 0, MICHAEL_MIC_LEN); + return TX_CONTINUE; + } + + key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) mic[0]++; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index e4a13cc8a2e7..0ee0fcf3abbf 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -12,6 +12,12 @@ config NETFILTER_INGRESS config NETFILTER_NETLINK tristate +config NETFILTER_FAMILY_BRIDGE + bool + +config NETFILTER_FAMILY_ARP + bool + config NETFILTER_NETLINK_ACCT tristate "Netfilter NFACCT over NFNETLINK interface" depends on NETFILTER_ADVANCED @@ -62,6 +68,8 @@ config NF_LOG_NETDEV select NF_LOG_COMMON if NF_CONNTRACK +config NETFILTER_CONNCOUNT + tristate config NF_CONNTRACK_MARK bool 'Connection mark tracking support' @@ -497,6 +505,13 @@ config NFT_CT This option adds the "ct" expression that you can use to match connection tracking information such as the flow state. +config NFT_FLOW_OFFLOAD + depends on NF_CONNTRACK + tristate "Netfilter nf_tables hardware flow offload module" + help + This option adds the "flow_offload" expression that you can use to + choose what flows are placed into the hardware. + config NFT_SET_RBTREE tristate "Netfilter nf_tables rbtree set module" help @@ -649,6 +664,21 @@ endif # NF_TABLES_NETDEV endif # NF_TABLES +config NF_FLOW_TABLE_INET + select NF_FLOW_TABLE + tristate "Netfilter flow table mixed IPv4/IPv6 module" + help + This option adds the flow table mixed IPv4/IPv6 support. + + To compile it as a module, choose M here. + +config NF_FLOW_TABLE + tristate "Netfilter flow table module" + help + This option adds the flow table core infrastructure. + + To compile it as a module, choose M here. + config NETFILTER_XTABLES tristate "Netfilter Xtables support (required for ip_tables)" default m if NETFILTER_ADVANCED=n @@ -1120,6 +1150,7 @@ config NETFILTER_XT_MATCH_CONNLIMIT tristate '"connlimit" match support' depends on NF_CONNTRACK depends on NETFILTER_ADVANCED + select NETFILTER_CONNCOUNT ---help--- This match allows you to match against the number of parallel connections to a server per client IP address (or address block). diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index f78ed2470831..5d9b8b959e58 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o +netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o utils.o nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o @@ -67,6 +67,8 @@ obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o # SYNPROXY obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o +obj-$(CONFIG_NETFILTER_CONNCOUNT) += nf_conncount.o + # generic packet duplication from netdev family obj-$(CONFIG_NF_DUP_NETDEV) += nf_dup_netdev.o @@ -84,6 +86,7 @@ obj-$(CONFIG_NFT_META) += nft_meta.o obj-$(CONFIG_NFT_RT) += nft_rt.o obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o obj-$(CONFIG_NFT_CT) += nft_ct.o +obj-$(CONFIG_NFT_FLOW_OFFLOAD) += nft_flow_offload.o obj-$(CONFIG_NFT_LIMIT) += nft_limit.o obj-$(CONFIG_NFT_NAT) += nft_nat.o obj-$(CONFIG_NFT_OBJREF) += nft_objref.o @@ -107,6 +110,10 @@ obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o +# flow table infrastructure +obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o +obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o + # generic X tables obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 52cd2901a097..997dd387d259 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -4,8 +4,7 @@ * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any * way. * - * Rusty Russell (C)2000 -- This code is GPL. - * Patrick McHardy (c) 2006-2012 + * This code is GPL. */ #include <linux/kernel.h> #include <linux/netfilter.h> @@ -28,34 +27,12 @@ #include "nf_internals.h" -static DEFINE_MUTEX(afinfo_mutex); - -const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; -EXPORT_SYMBOL(nf_afinfo); const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly; EXPORT_SYMBOL_GPL(nf_ipv6_ops); DEFINE_PER_CPU(bool, nf_skb_duplicated); EXPORT_SYMBOL_GPL(nf_skb_duplicated); -int nf_register_afinfo(const struct nf_afinfo *afinfo) -{ - mutex_lock(&afinfo_mutex); - RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo); - mutex_unlock(&afinfo_mutex); - return 0; -} -EXPORT_SYMBOL_GPL(nf_register_afinfo); - -void nf_unregister_afinfo(const struct nf_afinfo *afinfo) -{ - mutex_lock(&afinfo_mutex); - RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL); - mutex_unlock(&afinfo_mutex); - synchronize_rcu(); -} -EXPORT_SYMBOL_GPL(nf_unregister_afinfo); - #ifdef HAVE_JUMP_LABEL struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; EXPORT_SYMBOL(nf_hooks_needed); @@ -74,7 +51,8 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num) struct nf_hook_entries *e; size_t alloc = sizeof(*e) + sizeof(struct nf_hook_entry) * num + - sizeof(struct nf_hook_ops *) * num; + sizeof(struct nf_hook_ops *) * num + + sizeof(struct nf_hook_entries_rcu_head); if (num == 0) return NULL; @@ -85,6 +63,30 @@ static struct nf_hook_entries *allocate_hook_entries_size(u16 num) return e; } +static void __nf_hook_entries_free(struct rcu_head *h) +{ + struct nf_hook_entries_rcu_head *head; + + head = container_of(h, struct nf_hook_entries_rcu_head, head); + kvfree(head->allocation); +} + +static void nf_hook_entries_free(struct nf_hook_entries *e) +{ + struct nf_hook_entries_rcu_head *head; + struct nf_hook_ops **ops; + unsigned int num; + + if (!e) + return; + + num = e->num_hook_entries; + ops = nf_hook_entries_get_hook_ops(e); + head = (void *)&ops[num]; + head->allocation = e; + call_rcu(&head->head, __nf_hook_entries_free); +} + static unsigned int accept_all(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) @@ -135,6 +137,12 @@ nf_hook_entries_grow(const struct nf_hook_entries *old, ++i; continue; } + + if (reg->nat_hook && orig_ops[i]->nat_hook) { + kvfree(new); + return ERR_PTR(-EEXIST); + } + if (inserted || reg->priority > orig_ops[i]->priority) { new_ops[nhooks] = (void *)orig_ops[i]; new->hooks[nhooks] = old->hooks[i]; @@ -237,27 +245,61 @@ out_assign: return old; } -static struct nf_hook_entries __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg) +static struct nf_hook_entries __rcu ** +nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum, + struct net_device *dev) { - if (reg->pf != NFPROTO_NETDEV) - return net->nf.hooks[reg->pf]+reg->hooknum; + switch (pf) { + case NFPROTO_NETDEV: + break; +#ifdef CONFIG_NETFILTER_FAMILY_ARP + case NFPROTO_ARP: + if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum)) + return NULL; + return net->nf.hooks_arp + hooknum; +#endif +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + case NFPROTO_BRIDGE: + if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum)) + return NULL; + return net->nf.hooks_bridge + hooknum; +#endif + case NFPROTO_IPV4: + if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum)) + return NULL; + return net->nf.hooks_ipv4 + hooknum; + case NFPROTO_IPV6: + if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum)) + return NULL; + return net->nf.hooks_ipv6 + hooknum; +#if IS_ENABLED(CONFIG_DECNET) + case NFPROTO_DECNET: + if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_decnet) <= hooknum)) + return NULL; + return net->nf.hooks_decnet + hooknum; +#endif + default: + WARN_ON_ONCE(1); + return NULL; + } #ifdef CONFIG_NETFILTER_INGRESS - if (reg->hooknum == NF_NETDEV_INGRESS) { - if (reg->dev && dev_net(reg->dev) == net) - return ®->dev->nf_hooks_ingress; + if (hooknum == NF_NETDEV_INGRESS) { + if (dev && dev_net(dev) == net) + return &dev->nf_hooks_ingress; } #endif WARN_ON_ONCE(1); return NULL; } -int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) +static int __nf_register_net_hook(struct net *net, int pf, + const struct nf_hook_ops *reg) { struct nf_hook_entries *p, *new_hooks; struct nf_hook_entries __rcu **pp; - if (reg->pf == NFPROTO_NETDEV) { + if (pf == NFPROTO_NETDEV) { #ifndef CONFIG_NETFILTER_INGRESS if (reg->hooknum == NF_NETDEV_INGRESS) return -EOPNOTSUPP; @@ -267,7 +309,7 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) return -EINVAL; } - pp = nf_hook_entry_head(net, reg); + pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); if (!pp) return -EINVAL; @@ -285,21 +327,19 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) hooks_validate(new_hooks); #ifdef CONFIG_NETFILTER_INGRESS - if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) + if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_inc_ingress_queue(); #endif #ifdef HAVE_JUMP_LABEL - static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); + static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]); #endif - synchronize_net(); BUG_ON(p == new_hooks); - kvfree(p); + nf_hook_entries_free(p); return 0; } -EXPORT_SYMBOL(nf_register_net_hook); /* - * __nf_unregister_net_hook - remove a hook from blob + * nf_remove_net_hook - remove a hook from blob * * @oldp: current address of hook blob * @unreg: hook to unregister @@ -307,8 +347,8 @@ EXPORT_SYMBOL(nf_register_net_hook); * This cannot fail, hook unregistration must always succeed. * Therefore replace the to-be-removed hook with a dummy hook. */ -static void __nf_unregister_net_hook(struct nf_hook_entries *old, - const struct nf_hook_ops *unreg) +static void nf_remove_net_hook(struct nf_hook_entries *old, + const struct nf_hook_ops *unreg, int pf) { struct nf_hook_ops **orig_ops; bool found = false; @@ -326,24 +366,24 @@ static void __nf_unregister_net_hook(struct nf_hook_entries *old, if (found) { #ifdef CONFIG_NETFILTER_INGRESS - if (unreg->pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS) + if (pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS) net_dec_ingress_queue(); #endif #ifdef HAVE_JUMP_LABEL - static_key_slow_dec(&nf_hooks_needed[unreg->pf][unreg->hooknum]); + static_key_slow_dec(&nf_hooks_needed[pf][unreg->hooknum]); #endif } else { - WARN_ONCE(1, "hook not found, pf %d num %d", unreg->pf, unreg->hooknum); + WARN_ONCE(1, "hook not found, pf %d num %d", pf, unreg->hooknum); } } -void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) +void __nf_unregister_net_hook(struct net *net, int pf, + const struct nf_hook_ops *reg) { struct nf_hook_entries __rcu **pp; struct nf_hook_entries *p; - unsigned int nfq; - pp = nf_hook_entry_head(net, reg); + pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); if (!pp) return; @@ -355,23 +395,52 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) return; } - __nf_unregister_net_hook(p, reg); + nf_remove_net_hook(p, reg, pf); p = __nf_hook_entries_try_shrink(pp); mutex_unlock(&nf_hook_mutex); if (!p) return; - synchronize_net(); + nf_queue_nf_hook_drop(net); + nf_hook_entries_free(p); +} - /* other cpu might still process nfqueue verdict that used reg */ - nfq = nf_queue_nf_hook_drop(net); - if (nfq) - synchronize_net(); - kvfree(p); +void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) +{ + if (reg->pf == NFPROTO_INET) { + __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); + __nf_unregister_net_hook(net, NFPROTO_IPV6, reg); + } else { + __nf_unregister_net_hook(net, reg->pf, reg); + } } EXPORT_SYMBOL(nf_unregister_net_hook); +int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) +{ + int err; + + if (reg->pf == NFPROTO_INET) { + err = __nf_register_net_hook(net, NFPROTO_IPV4, reg); + if (err < 0) + return err; + + err = __nf_register_net_hook(net, NFPROTO_IPV6, reg); + if (err < 0) { + __nf_unregister_net_hook(net, NFPROTO_IPV4, reg); + return err; + } + } else { + err = __nf_register_net_hook(net, reg->pf, reg); + if (err < 0) + return err; + } + + return 0; +} +EXPORT_SYMBOL(nf_register_net_hook); + int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n) { @@ -395,63 +464,10 @@ EXPORT_SYMBOL(nf_register_net_hooks); void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int hookcount) { - struct nf_hook_entries *to_free[16], *p; - struct nf_hook_entries __rcu **pp; - unsigned int i, j, n; - - mutex_lock(&nf_hook_mutex); - for (i = 0; i < hookcount; i++) { - pp = nf_hook_entry_head(net, ®[i]); - if (!pp) - continue; - - p = nf_entry_dereference(*pp); - if (WARN_ON_ONCE(!p)) - continue; - __nf_unregister_net_hook(p, ®[i]); - } - mutex_unlock(&nf_hook_mutex); - - do { - n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free)); - - mutex_lock(&nf_hook_mutex); - - for (i = 0, j = 0; i < hookcount && j < n; i++) { - pp = nf_hook_entry_head(net, ®[i]); - if (!pp) - continue; - - p = nf_entry_dereference(*pp); - if (!p) - continue; - - to_free[j] = __nf_hook_entries_try_shrink(pp); - if (to_free[j]) - ++j; - } - - mutex_unlock(&nf_hook_mutex); - - if (j) { - unsigned int nfq; - - synchronize_net(); - - /* need 2nd synchronize_net() if nfqueue is used, skb - * can get reinjected right before nf_queue_hook_drop() - */ - nfq = nf_queue_nf_hook_drop(net); - if (nfq) - synchronize_net(); - - for (i = 0; i < j; i++) - kvfree(to_free[i]); - } + unsigned int i; - reg += n; - hookcount -= n; - } while (hookcount > 0); + for (i = 0; i < hookcount; i++) + nf_unregister_net_hook(net, ®[i]); } EXPORT_SYMBOL(nf_unregister_net_hooks); @@ -569,14 +585,27 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); EXPORT_SYMBOL(nf_nat_decode_session_hook); #endif -static int __net_init netfilter_net_init(struct net *net) +static void __net_init __netfilter_net_init(struct nf_hook_entries **e, int max) { - int i, h; + int h; - for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { - for (h = 0; h < NF_MAX_HOOKS; h++) - RCU_INIT_POINTER(net->nf.hooks[i][h], NULL); - } + for (h = 0; h < max; h++) + RCU_INIT_POINTER(e[h], NULL); +} + +static int __net_init netfilter_net_init(struct net *net) +{ + __netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4)); + __netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6)); +#ifdef CONFIG_NETFILTER_FAMILY_ARP + __netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp)); +#endif +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + __netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge)); +#endif +#if IS_ENABLED(CONFIG_DECNET) + __netfilter_net_init(net->nf.hooks_decnet, ARRAY_SIZE(net->nf.hooks_decnet)); +#endif #ifdef CONFIG_PROC_FS net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h index 5ca18f07683b..257ca393e6f2 100644 --- a/net/netfilter/ipset/ip_set_bitmap_gen.h +++ b/net/netfilter/ipset/ip_set_bitmap_gen.h @@ -127,14 +127,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, if (ret <= 0) return ret; - if (SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(x, set))) - return 0; - if (SET_WITH_COUNTER(set)) - ip_set_update_counter(ext_counter(x, set), ext, mext, flags); - if (SET_WITH_SKBINFO(set)) - ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags); - return 1; + return ip_set_match_extensions(set, ext, mext, flags, x); } static int @@ -227,6 +220,7 @@ mtype_list(const struct ip_set *set, rcu_read_lock(); for (; cb->args[IPSET_CB_ARG0] < map->elements; cb->args[IPSET_CB_ARG0]++) { + cond_resched_rcu(); id = cb->args[IPSET_CB_ARG0]; x = get_ext(set, map, id); if (!test_bit(id, map->members) || diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index d8975a0b4282..488d6d05c65c 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -263,12 +263,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip); if (ret) return ret; - if (first_ip > last_ip) { - u32 tmp = first_ip; - - first_ip = last_ip; - last_ip = tmp; - } + if (first_ip > last_ip) + swap(first_ip, last_ip); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 4c279fbd2d5d..c00b6a2e8e3c 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -337,12 +337,8 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip); if (ret) return ret; - if (first_ip > last_ip) { - u32 tmp = first_ip; - - first_ip = last_ip; - last_ip = tmp; - } + if (first_ip > last_ip) + swap(first_ip, last_ip); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c index 7f9bbd7c98b5..b561ca8b3659 100644 --- a/net/netfilter/ipset/ip_set_bitmap_port.c +++ b/net/netfilter/ipset/ip_set_bitmap_port.c @@ -238,12 +238,8 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[], first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]); last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); - if (first_port > last_port) { - u16 tmp = first_port; - - first_port = last_port; - last_port = tmp; - } + if (first_port > last_port) + swap(first_port, last_port); elements = last_port - first_port + 1; set->dsize = ip_set_elem_len(set, tb, 0, 0); diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index cf84f7b37cd9..728bf31bb386 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -57,7 +57,7 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET); /* When the nfnl mutex is held: */ #define ip_set_dereference(p) \ - rcu_dereference_protected(p, 1) + rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET)) #define ip_set(inst, id) \ ip_set_dereference((inst)->ip_set_list)[id] @@ -472,6 +472,31 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, } EXPORT_SYMBOL_GPL(ip_set_put_extensions); +bool +ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags, void *data) +{ + if (SET_WITH_TIMEOUT(set) && + ip_set_timeout_expired(ext_timeout(data, set))) + return false; + if (SET_WITH_COUNTER(set)) { + struct ip_set_counter *counter = ext_counter(data, set); + + if (flags & IPSET_FLAG_MATCH_COUNTERS && + !(ip_set_match_counter(ip_set_get_packets(counter), + mext->packets, mext->packets_op) && + ip_set_match_counter(ip_set_get_bytes(counter), + mext->bytes, mext->bytes_op))) + return false; + ip_set_update_counter(counter, ext, flags); + } + if (SET_WITH_SKBINFO(set)) + ip_set_get_skbinfo(ext_skbinfo(data, set), + ext, mext, flags); + return true; +} +EXPORT_SYMBOL_GPL(ip_set_match_extensions); + /* Creating/destroying/renaming/swapping affect the existence and * the properties of a set. All of these can be executed from userspace * only and serialized by the nfnl mutex indirectly from nfnetlink. @@ -1386,11 +1411,9 @@ dump_last: goto next_set; if (set->variant->uref) set->variant->uref(set, cb, true); - /* Fall through and add elements */ + /* fall through */ default: - rcu_read_lock_bh(); ret = set->variant->list(set, skb, cb); - rcu_read_unlock_bh(); if (!cb->args[IPSET_CB_ARG0]) /* Set is done, proceed with next one */ goto next_set; @@ -2055,6 +2078,7 @@ ip_set_net_exit(struct net *net) inst->is_deleted = true; /* flag for ip_set_nfnl_put */ + nfnl_lock(NFNL_SUBSYS_IPSET); for (i = 0; i < inst->ip_set_max; i++) { set = ip_set(inst, i); if (set) { @@ -2062,6 +2086,7 @@ ip_set_net_exit(struct net *net) ip_set_destroy_set(set); } } + nfnl_unlock(NFNL_SUBSYS_IPSET); kfree(rcu_dereference_protected(inst->ip_set_list, 1)); } diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index efffc8eabafe..bbad940c0137 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -917,12 +917,9 @@ static inline int mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext, struct ip_set_ext *mext, struct ip_set *set, u32 flags) { - if (SET_WITH_COUNTER(set)) - ip_set_update_counter(ext_counter(data, set), - ext, mext, flags); - if (SET_WITH_SKBINFO(set)) - ip_set_get_skbinfo(ext_skbinfo(data, set), - ext, mext, flags); + if (!ip_set_match_extensions(set, ext, mext, flags, data)) + return 0; + /* nomatch entries return -ENOTEMPTY */ return mtype_do_data_match(data); } @@ -941,9 +938,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d, struct mtype_elem *data; #if IPSET_NET_COUNT == 2 struct mtype_elem orig = *d; - int i, j = 0, k; + int ret, i, j = 0, k; #else - int i, j = 0; + int ret, i, j = 0; #endif u32 key, multi = 0; @@ -969,18 +966,13 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d, data = ahash_data(n, i, set->dsize); if (!mtype_data_equal(data, d, &multi)) continue; - if (SET_WITH_TIMEOUT(set)) { - if (!ip_set_timeout_expired( - ext_timeout(data, set))) - return mtype_data_match(data, ext, - mext, set, - flags); + ret = mtype_data_match(data, ext, mext, set, flags); + if (ret != 0) + return ret; #ifdef IP_SET_HASH_WITH_MULTI - multi = 0; + /* No match, reset multiple match flag */ + multi = 0; #endif - } else - return mtype_data_match(data, ext, - mext, set, flags); } #if IPSET_NET_COUNT == 2 } @@ -1027,12 +1019,11 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, if (!test_bit(i, n->used)) continue; data = ahash_data(n, i, set->dsize); - if (mtype_data_equal(data, d, &multi) && - !(SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(data, set)))) { - ret = mtype_data_match(data, ext, mext, set, flags); + if (!mtype_data_equal(data, d, &multi)) + continue; + ret = mtype_data_match(data, ext, mext, set, flags); + if (ret != 0) goto out; - } } out: return ret; @@ -1143,6 +1134,7 @@ mtype_list(const struct ip_set *set, rcu_read_lock(); for (; cb->args[IPSET_CB_ARG0] < jhash_size(t->htable_bits); cb->args[IPSET_CB_ARG0]++) { + cond_resched_rcu(); incomplete = skb_tail_pointer(skb); n = rcu_dereference(hbucket(t, cb->args[IPSET_CB_ARG0])); pr_debug("cb->arg bucket: %lu, t %p n %p\n", diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index e864681b8dc5..072a658fde04 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -55,8 +55,9 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, struct ip_set_adt_opt *opt, const struct ip_set_ext *ext) { struct list_set *map = set->data; + struct ip_set_ext *mext = &opt->ext; struct set_elem *e; - u32 cmdflags = opt->cmdflags; + u32 flags = opt->cmdflags; int ret; /* Don't lookup sub-counters at all */ @@ -64,21 +65,11 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb, if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; list_for_each_entry_rcu(e, &map->members, list) { - if (SET_WITH_TIMEOUT(set) && - ip_set_timeout_expired(ext_timeout(e, set))) - continue; ret = ip_set_test(e->id, skb, par, opt); - if (ret > 0) { - if (SET_WITH_COUNTER(set)) - ip_set_update_counter(ext_counter(e, set), - ext, &opt->ext, - cmdflags); - if (SET_WITH_SKBINFO(set)) - ip_set_get_skbinfo(ext_skbinfo(e, set), - ext, &opt->ext, - cmdflags); - return ret; - } + if (ret <= 0) + continue; + if (ip_set_match_extensions(set, ext, mext, flags, e)) + return 1; } return 0; } diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 3e053cb30070..f489b8db2406 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -322,7 +322,7 @@ ip_vs_conn_fill_param_proto(struct netns_ipvs *ipvs, { __be16 _ports[2], *pptr; - pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); + pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports); if (pptr == NULL) return 1; diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 5cb7cac9177d..5f6f73cf2174 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -433,7 +433,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, /* * IPv6 frags, only the first hit here. */ - pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); + pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports); if (pptr == NULL) return NULL; @@ -566,7 +566,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, struct netns_ipvs *ipvs = svc->ipvs; struct net *net = ipvs->net; - pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); + pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports); if (!pptr) return NF_DROP; dport = likely(!ip_vs_iph_inverse(iph)) ? pptr[1] : pptr[0]; @@ -982,7 +982,7 @@ static int ip_vs_out_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb, unsigned int offset; *related = 1; - ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh); + ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; @@ -1214,7 +1214,7 @@ static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum, return NULL; pptr = frag_safe_skb_hp(skb, iph->len, - sizeof(_ports), _ports, iph); + sizeof(_ports), _ports); if (!pptr) return NULL; @@ -1407,7 +1407,7 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in __be16 _ports[2], *pptr; pptr = frag_safe_skb_hp(skb, iph.len, - sizeof(_ports), _ports, &iph); + sizeof(_ports), _ports); if (pptr == NULL) return NF_ACCEPT; /* Not for me */ if (ip_vs_has_real_service(ipvs, af, iph.protocol, &iph.saddr, @@ -1741,7 +1741,7 @@ static int ip_vs_in_icmp_v6(struct netns_ipvs *ipvs, struct sk_buff *skb, *related = 1; - ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph, iph); + ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph); if (ic == NULL) return NF_DROP; diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 121a321b91be..bcd9b7bde4ee 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -315,6 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) switch (skb->ip_summed) { case CHECKSUM_NONE: skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0); + /* fall through */ case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index 30e11cd6aa8a..c15ef7c2a1fa 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -319,6 +319,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) case CHECKSUM_NONE: skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0); + /* fall through */ case CHECKSUM_COMPLETE: #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) { diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c new file mode 100644 index 000000000000..a95518261168 --- /dev/null +++ b/net/netfilter/nf_conncount.c @@ -0,0 +1,373 @@ +/* + * count the number of connections matching an arbitrary key. + * + * (C) 2017 Red Hat GmbH + * Author: Florian Westphal <fw@strlen.de> + * + * split from xt_connlimit.c: + * (c) 2000 Gerd Knorr <kraxel@bytesex.org> + * Nov 2002: Martin Bene <martin.bene@icomedias.com>: + * only ignore TIME_WAIT or gone connections + * (C) CC Computer Consultants GmbH, 2007 + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/jhash.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/module.h> +#include <linux/random.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/netfilter/nf_conntrack_tcp.h> +#include <linux/netfilter/x_tables.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_count.h> +#include <net/netfilter/nf_conntrack_core.h> +#include <net/netfilter/nf_conntrack_tuple.h> +#include <net/netfilter/nf_conntrack_zones.h> + +#define CONNCOUNT_SLOTS 256U + +#ifdef CONFIG_LOCKDEP +#define CONNCOUNT_LOCK_SLOTS 8U +#else +#define CONNCOUNT_LOCK_SLOTS 256U +#endif + +#define CONNCOUNT_GC_MAX_NODES 8 +#define MAX_KEYLEN 5 + +/* we will save the tuples of all connections we care about */ +struct nf_conncount_tuple { + struct hlist_node node; + struct nf_conntrack_tuple tuple; +}; + +struct nf_conncount_rb { + struct rb_node node; + struct hlist_head hhead; /* connections/hosts in same subnet */ + u32 key[MAX_KEYLEN]; +}; + +static spinlock_t nf_conncount_locks[CONNCOUNT_LOCK_SLOTS] __cacheline_aligned_in_smp; + +struct nf_conncount_data { + unsigned int keylen; + struct rb_root root[CONNCOUNT_SLOTS]; +}; + +static u_int32_t conncount_rnd __read_mostly; +static struct kmem_cache *conncount_rb_cachep __read_mostly; +static struct kmem_cache *conncount_conn_cachep __read_mostly; + +static inline bool already_closed(const struct nf_conn *conn) +{ + if (nf_ct_protonum(conn) == IPPROTO_TCP) + return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT || + conn->proto.tcp.state == TCP_CONNTRACK_CLOSE; + else + return 0; +} + +static int key_diff(const u32 *a, const u32 *b, unsigned int klen) +{ + return memcmp(a, b, klen * sizeof(u32)); +} + +static bool add_hlist(struct hlist_head *head, + const struct nf_conntrack_tuple *tuple) +{ + struct nf_conncount_tuple *conn; + + conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); + if (conn == NULL) + return false; + conn->tuple = *tuple; + hlist_add_head(&conn->node, head); + return true; +} + +static unsigned int check_hlist(struct net *net, + struct hlist_head *head, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone, + bool *addit) +{ + const struct nf_conntrack_tuple_hash *found; + struct nf_conncount_tuple *conn; + struct hlist_node *n; + struct nf_conn *found_ct; + unsigned int length = 0; + + *addit = true; + + /* check the saved connections */ + hlist_for_each_entry_safe(conn, n, head, node) { + found = nf_conntrack_find_get(net, zone, &conn->tuple); + if (found == NULL) { + hlist_del(&conn->node); + kmem_cache_free(conncount_conn_cachep, conn); + continue; + } + + found_ct = nf_ct_tuplehash_to_ctrack(found); + + if (nf_ct_tuple_equal(&conn->tuple, tuple)) { + /* + * Just to be sure we have it only once in the list. + * We should not see tuples twice unless someone hooks + * this into a table without "-p tcp --syn". + */ + *addit = false; + } else if (already_closed(found_ct)) { + /* + * we do not care about connections which are + * closed already -> ditch it + */ + nf_ct_put(found_ct); + hlist_del(&conn->node); + kmem_cache_free(conncount_conn_cachep, conn); + continue; + } + + nf_ct_put(found_ct); + length++; + } + + return length; +} + +static void tree_nodes_free(struct rb_root *root, + struct nf_conncount_rb *gc_nodes[], + unsigned int gc_count) +{ + struct nf_conncount_rb *rbconn; + + while (gc_count) { + rbconn = gc_nodes[--gc_count]; + rb_erase(&rbconn->node, root); + kmem_cache_free(conncount_rb_cachep, rbconn); + } +} + +static unsigned int +count_tree(struct net *net, struct rb_root *root, + const u32 *key, u8 keylen, + u8 family, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone) +{ + struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES]; + struct rb_node **rbnode, *parent; + struct nf_conncount_rb *rbconn; + struct nf_conncount_tuple *conn; + unsigned int gc_count; + bool no_gc = false; + + restart: + gc_count = 0; + parent = NULL; + rbnode = &(root->rb_node); + while (*rbnode) { + int diff; + bool addit; + + rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); + + parent = *rbnode; + diff = key_diff(key, rbconn->key, keylen); + if (diff < 0) { + rbnode = &((*rbnode)->rb_left); + } else if (diff > 0) { + rbnode = &((*rbnode)->rb_right); + } else { + /* same source network -> be counted! */ + unsigned int count; + count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit); + + tree_nodes_free(root, gc_nodes, gc_count); + if (!addit) + return count; + + if (!add_hlist(&rbconn->hhead, tuple)) + return 0; /* hotdrop */ + + return count + 1; + } + + if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes)) + continue; + + /* only used for GC on hhead, retval and 'addit' ignored */ + check_hlist(net, &rbconn->hhead, tuple, zone, &addit); + if (hlist_empty(&rbconn->hhead)) + gc_nodes[gc_count++] = rbconn; + } + + if (gc_count) { + no_gc = true; + tree_nodes_free(root, gc_nodes, gc_count); + /* tree_node_free before new allocation permits + * allocator to re-use newly free'd object. + * + * This is a rare event; in most cases we will find + * existing node to re-use. (or gc_count is 0). + */ + goto restart; + } + + /* no match, need to insert new node */ + rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC); + if (rbconn == NULL) + return 0; + + conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC); + if (conn == NULL) { + kmem_cache_free(conncount_rb_cachep, rbconn); + return 0; + } + + conn->tuple = *tuple; + memcpy(rbconn->key, key, sizeof(u32) * keylen); + + INIT_HLIST_HEAD(&rbconn->hhead); + hlist_add_head(&conn->node, &rbconn->hhead); + + rb_link_node(&rbconn->node, parent, rbnode); + rb_insert_color(&rbconn->node, root); + return 1; +} + +unsigned int nf_conncount_count(struct net *net, + struct nf_conncount_data *data, + const u32 *key, + unsigned int family, + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_zone *zone) +{ + struct rb_root *root; + int count; + u32 hash; + + hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; + root = &data->root[hash]; + + spin_lock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); + + count = count_tree(net, root, key, data->keylen, family, tuple, zone); + + spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]); + + return count; +} +EXPORT_SYMBOL_GPL(nf_conncount_count); + +struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family, + unsigned int keylen) +{ + struct nf_conncount_data *data; + int ret, i; + + if (keylen % sizeof(u32) || + keylen / sizeof(u32) > MAX_KEYLEN || + keylen == 0) + return ERR_PTR(-EINVAL); + + net_get_random_once(&conncount_rnd, sizeof(conncount_rnd)); + + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + ret = nf_ct_netns_get(net, family); + if (ret < 0) { + kfree(data); + return ERR_PTR(ret); + } + + for (i = 0; i < ARRAY_SIZE(data->root); ++i) + data->root[i] = RB_ROOT; + + data->keylen = keylen / sizeof(u32); + + return data; +} +EXPORT_SYMBOL_GPL(nf_conncount_init); + +static void destroy_tree(struct rb_root *r) +{ + struct nf_conncount_tuple *conn; + struct nf_conncount_rb *rbconn; + struct hlist_node *n; + struct rb_node *node; + + while ((node = rb_first(r)) != NULL) { + rbconn = rb_entry(node, struct nf_conncount_rb, node); + + rb_erase(node, r); + + hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node) + kmem_cache_free(conncount_conn_cachep, conn); + + kmem_cache_free(conncount_rb_cachep, rbconn); + } +} + +void nf_conncount_destroy(struct net *net, unsigned int family, + struct nf_conncount_data *data) +{ + unsigned int i; + + nf_ct_netns_put(net, family); + + for (i = 0; i < ARRAY_SIZE(data->root); ++i) + destroy_tree(&data->root[i]); + + kfree(data); +} +EXPORT_SYMBOL_GPL(nf_conncount_destroy); + +static int __init nf_conncount_modinit(void) +{ + int i; + + BUILD_BUG_ON(CONNCOUNT_LOCK_SLOTS > CONNCOUNT_SLOTS); + BUILD_BUG_ON((CONNCOUNT_SLOTS % CONNCOUNT_LOCK_SLOTS) != 0); + + for (i = 0; i < CONNCOUNT_LOCK_SLOTS; ++i) + spin_lock_init(&nf_conncount_locks[i]); + + conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple", + sizeof(struct nf_conncount_tuple), + 0, 0, NULL); + if (!conncount_conn_cachep) + return -ENOMEM; + + conncount_rb_cachep = kmem_cache_create("nf_conncount_rb", + sizeof(struct nf_conncount_rb), + 0, 0, NULL); + if (!conncount_rb_cachep) { + kmem_cache_destroy(conncount_conn_cachep); + return -ENOMEM; + } + + return 0; +} + +static void __exit nf_conncount_modexit(void) +{ + kmem_cache_destroy(conncount_conn_cachep); + kmem_cache_destroy(conncount_rb_cachep); +} + +module_init(nf_conncount_modinit); +module_exit(nf_conncount_modexit); +MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); +MODULE_AUTHOR("Florian Westphal <fw@strlen.de>"); +MODULE_DESCRIPTION("netfilter: count number of connections matching a key"); +MODULE_LICENSE("GPL"); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 85f643c1e227..6a64d528d076 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -901,6 +901,9 @@ static unsigned int early_drop_list(struct net *net, hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) { tmp = nf_ct_tuplehash_to_ctrack(h); + if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) + continue; + if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); continue; @@ -975,6 +978,18 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct) return false; } +#define DAY (86400 * HZ) + +/* Set an arbitrary timeout large enough not to ever expire, this save + * us a check for the IPS_OFFLOAD_BIT from the packet path via + * nf_ct_is_expired(). + */ +static void nf_ct_offload_timeout(struct nf_conn *ct) +{ + if (nf_ct_expires(ct) < DAY / 2) + ct->timeout = nfct_time_stamp + DAY; +} + static void gc_worker(struct work_struct *work) { unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); @@ -1011,6 +1026,11 @@ static void gc_worker(struct work_struct *work) tmp = nf_ct_tuplehash_to_ctrack(h); scanned++; + if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { + nf_ct_offload_timeout(tmp); + continue; + } + if (nf_ct_is_expired(tmp)) { nf_ct_gc_expired(tmp); expired_count++; diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index dc6347342e34..1601275efe2d 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -1,4 +1,4 @@ -/**************************************************************************** +/* * ip_conntrack_helper_h323_asn1.c - BER and PER decoding library for H.323 * conntrack/NAT module. * @@ -8,7 +8,7 @@ * * See ip_conntrack_helper_h323_asn1.h for details. * - ****************************************************************************/ + */ #ifdef __KERNEL__ #include <linux/kernel.h> @@ -140,14 +140,15 @@ static const decoder_t Decoders[] = { decode_choice, }; -/**************************************************************************** +/* * H.323 Types - ****************************************************************************/ + */ #include "nf_conntrack_h323_types.c" -/**************************************************************************** +/* * Functions - ****************************************************************************/ + */ + /* Assume bs is aligned && v < 16384 */ static unsigned int get_len(struct bitstr *bs) { @@ -177,7 +178,6 @@ static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits) return 0; } -/****************************************************************************/ static unsigned int get_bit(struct bitstr *bs) { unsigned int b = (*bs->cur) & (0x80 >> bs->bit); @@ -187,7 +187,6 @@ static unsigned int get_bit(struct bitstr *bs) return b; } -/****************************************************************************/ /* Assume b <= 8 */ static unsigned int get_bits(struct bitstr *bs, unsigned int b) { @@ -213,7 +212,6 @@ static unsigned int get_bits(struct bitstr *bs, unsigned int b) return v; } -/****************************************************************************/ /* Assume b <= 32 */ static unsigned int get_bitmap(struct bitstr *bs, unsigned int b) { @@ -251,9 +249,9 @@ static unsigned int get_bitmap(struct bitstr *bs, unsigned int b) return v; } -/**************************************************************************** +/* * Assume bs is aligned and sizeof(unsigned int) == 4 - ****************************************************************************/ + */ static unsigned int get_uint(struct bitstr *bs, int b) { unsigned int v = 0; @@ -262,12 +260,15 @@ static unsigned int get_uint(struct bitstr *bs, int b) case 4: v |= *bs->cur++; v <<= 8; + /* fall through */ case 3: v |= *bs->cur++; v <<= 8; + /* fall through */ case 2: v |= *bs->cur++; v <<= 8; + /* fall through */ case 1: v |= *bs->cur++; break; @@ -275,7 +276,6 @@ static unsigned int get_uint(struct bitstr *bs, int b) return v; } -/****************************************************************************/ static int decode_nul(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -284,7 +284,6 @@ static int decode_nul(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_bool(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -296,7 +295,6 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_oid(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -316,7 +314,6 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_int(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -364,7 +361,6 @@ static int decode_int(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_enum(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -381,7 +377,6 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_bitstr(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -418,7 +413,6 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_numstr(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -439,7 +433,6 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_octstr(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -493,7 +486,6 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -523,7 +515,6 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_seq(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -653,7 +644,6 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ static int decode_seqof(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -750,8 +740,6 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } - -/****************************************************************************/ static int decode_choice(struct bitstr *bs, const struct field_t *f, char *base, int level) { @@ -833,7 +821,6 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f, return H323_ERROR_NONE; } -/****************************************************************************/ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras) { static const struct field_t ras_message = { @@ -849,7 +836,6 @@ int DecodeRasMessage(unsigned char *buf, size_t sz, RasMessage *ras) return decode_choice(&bs, &ras_message, (char *) ras, 0); } -/****************************************************************************/ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg, size_t sz, H323_UserInformation *uuie) { @@ -867,7 +853,6 @@ static int DecodeH323_UserInformation(unsigned char *buf, unsigned char *beg, return decode_seq(&bs, &h323_userinformation, (char *) uuie, 0); } -/****************************************************************************/ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz, MultimediaSystemControlMessage * mscm) @@ -886,7 +871,6 @@ int DecodeMultimediaSystemControlMessage(unsigned char *buf, size_t sz, (char *) mscm, 0); } -/****************************************************************************/ int DecodeQ931(unsigned char *buf, size_t sz, Q931 *q931) { unsigned char *p = buf; diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index f71f0d2558fd..005589c6d0f6 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -24,6 +24,7 @@ #include <linux/skbuff.h> #include <net/route.h> #include <net/ip6_route.h> +#include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> @@ -115,7 +116,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245; static struct nf_conntrack_helper nf_conntrack_helper_q931[]; static struct nf_conntrack_helper nf_conntrack_helper_ras[]; -/****************************************************************************/ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned char **data, int *datalen, int *dataoff) @@ -219,7 +219,6 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff, return 0; } -/****************************************************************************/ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, H245_TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port) @@ -254,7 +253,6 @@ static int get_h245_addr(struct nf_conn *ct, const unsigned char *data, return 1; } -/****************************************************************************/ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -328,7 +326,6 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct, return ret; } -/****************************************************************************/ static int expect_t120(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -380,7 +377,6 @@ static int expect_t120(struct sk_buff *skb, return ret; } -/****************************************************************************/ static int process_h245_channel(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -410,7 +406,6 @@ static int process_h245_channel(struct sk_buff *skb, return 0; } -/****************************************************************************/ static int process_olc(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -472,7 +467,6 @@ static int process_olc(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_olca(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned char **data, int dataoff, @@ -542,7 +536,6 @@ static int process_olca(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_h245(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned char **data, int dataoff, @@ -578,7 +571,6 @@ static int process_h245(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int h245_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { @@ -628,7 +620,6 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff, return NF_DROP; } -/****************************************************************************/ static const struct nf_conntrack_expect_policy h245_exp_policy = { .max_expected = H323_RTP_CHANNEL_MAX * 4 + 2 /* T.120 */, .timeout = 240, @@ -643,7 +634,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { .expect_policy = &h245_exp_policy, }; -/****************************************************************************/ int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port) @@ -675,7 +665,6 @@ int get_h225_addr(struct nf_conn *ct, unsigned char *data, return 1; } -/****************************************************************************/ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned char **data, int dataoff, @@ -726,20 +715,15 @@ static int expect_h245(struct sk_buff *skb, struct nf_conn *ct, } /* If the calling party is on the same side of the forward-to party, - * we don't need to track the second call */ + * we don't need to track the second call + */ static int callforward_do_filter(struct net *net, const union nf_inet_addr *src, const union nf_inet_addr *dst, u_int8_t family) { - const struct nf_afinfo *afinfo; int ret = 0; - /* rcu_read_lock()ed by nf_hook_thresh */ - afinfo = nf_get_afinfo(family); - if (!afinfo) - return 0; - switch (family) { case AF_INET: { struct flowi4 fl1, fl2; @@ -750,10 +734,10 @@ static int callforward_do_filter(struct net *net, memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->ip; - if (!afinfo->route(net, (struct dst_entry **)&rt1, - flowi4_to_flowi(&fl1), false)) { - if (!afinfo->route(net, (struct dst_entry **)&rt2, - flowi4_to_flowi(&fl2), false)) { + if (!nf_ip_route(net, (struct dst_entry **)&rt1, + flowi4_to_flowi(&fl1), false)) { + if (!nf_ip_route(net, (struct dst_entry **)&rt2, + flowi4_to_flowi(&fl2), false)) { if (rt_nexthop(rt1, fl1.daddr) == rt_nexthop(rt2, fl2.daddr) && rt1->dst.dev == rt2->dst.dev) @@ -766,18 +750,23 @@ static int callforward_do_filter(struct net *net, } #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV6) case AF_INET6: { - struct flowi6 fl1, fl2; + const struct nf_ipv6_ops *v6ops; struct rt6_info *rt1, *rt2; + struct flowi6 fl1, fl2; + + v6ops = nf_get_ipv6_ops(); + if (!v6ops) + return 0; memset(&fl1, 0, sizeof(fl1)); fl1.daddr = src->in6; memset(&fl2, 0, sizeof(fl2)); fl2.daddr = dst->in6; - if (!afinfo->route(net, (struct dst_entry **)&rt1, - flowi6_to_flowi(&fl1), false)) { - if (!afinfo->route(net, (struct dst_entry **)&rt2, - flowi6_to_flowi(&fl2), false)) { + if (!v6ops->route(net, (struct dst_entry **)&rt1, + flowi6_to_flowi(&fl1), false)) { + if (!v6ops->route(net, (struct dst_entry **)&rt2, + flowi6_to_flowi(&fl2), false)) { if (ipv6_addr_equal(rt6_nexthop(rt1, &fl1.daddr), rt6_nexthop(rt2, &fl2.daddr)) && rt1->dst.dev == rt2->dst.dev) @@ -794,7 +783,6 @@ static int callforward_do_filter(struct net *net, } -/****************************************************************************/ static int expect_callforwarding(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -815,7 +803,8 @@ static int expect_callforwarding(struct sk_buff *skb, return 0; /* If the calling party is on the same side of the forward-to party, - * we don't need to track the second call */ + * we don't need to track the second call + */ if (callforward_filter && callforward_do_filter(net, &addr, &ct->tuplehash[!dir].tuple.src.u3, nf_ct_l3num(ct))) { @@ -854,7 +843,6 @@ static int expect_callforwarding(struct sk_buff *skb, return ret; } -/****************************************************************************/ static int process_setup(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -925,7 +913,6 @@ static int process_setup(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_callproceeding(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, @@ -958,7 +945,6 @@ static int process_callproceeding(struct sk_buff *skb, return 0; } -/****************************************************************************/ static int process_connect(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -990,7 +976,6 @@ static int process_connect(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1022,7 +1007,6 @@ static int process_alerting(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_facility(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1063,7 +1047,6 @@ static int process_facility(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_progress(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1095,7 +1078,6 @@ static int process_progress(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_q931(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned char **data, int dataoff, @@ -1154,7 +1136,6 @@ static int process_q931(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int q931_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { @@ -1203,7 +1184,6 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff, return NF_DROP; } -/****************************************************************************/ static const struct nf_conntrack_expect_policy q931_exp_policy = { /* T.120 and H.245 */ .max_expected = H323_RTP_CHANNEL_MAX * 4 + 4, @@ -1231,7 +1211,6 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = { }, }; -/****************************************************************************/ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, int *datalen) { @@ -1249,7 +1228,6 @@ static unsigned char *get_udp_data(struct sk_buff *skb, unsigned int protoff, return skb_header_pointer(skb, dataoff, *datalen, h323_buffer); } -/****************************************************************************/ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, union nf_inet_addr *addr, __be16 port) @@ -1270,7 +1248,6 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct, return NULL; } -/****************************************************************************/ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, unsigned char **data, @@ -1328,7 +1305,6 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct, return ret; } -/****************************************************************************/ static int process_grq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1346,7 +1322,6 @@ static int process_grq(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1391,7 +1366,6 @@ static int process_gcf(struct sk_buff *skb, struct nf_conn *ct, return ret; } -/****************************************************************************/ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1428,7 +1402,6 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1480,7 +1453,6 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_urq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1514,7 +1486,6 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_arq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1559,7 +1530,6 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_acf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1608,7 +1578,6 @@ static int process_acf(struct sk_buff *skb, struct nf_conn *ct, return ret; } -/****************************************************************************/ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1626,7 +1595,6 @@ static int process_lrq(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1666,7 +1634,6 @@ static int process_lcf(struct sk_buff *skb, struct nf_conn *ct, return ret; } -/****************************************************************************/ static int process_irr(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1700,7 +1667,6 @@ static int process_irr(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int process_ras(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff, @@ -1745,7 +1711,6 @@ static int process_ras(struct sk_buff *skb, struct nf_conn *ct, return 0; } -/****************************************************************************/ static int ras_help(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, enum ip_conntrack_info ctinfo) { @@ -1788,7 +1753,6 @@ static int ras_help(struct sk_buff *skb, unsigned int protoff, return NF_DROP; } -/****************************************************************************/ static const struct nf_conntrack_expect_policy ras_exp_policy = { .max_expected = 32, .timeout = 240, @@ -1849,7 +1813,6 @@ static void __exit h323_helper_exit(void) nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); } -/****************************************************************************/ static void __exit nf_conntrack_h323_fini(void) { h323_helper_exit(); @@ -1857,7 +1820,6 @@ static void __exit nf_conntrack_h323_fini(void) pr_debug("nf_ct_h323: fini\n"); } -/****************************************************************************/ static int __init nf_conntrack_h323_init(void) { int ret; @@ -1877,7 +1839,6 @@ err1: return ret; } -/****************************************************************************/ module_init(nf_conntrack_h323_init); module_exit(nf_conntrack_h323_fini); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 382d49792f42..7c7921a53b13 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -544,7 +544,7 @@ static size_t ctnetlink_proto_size(const struct nf_conn *ct) len *= 3u; /* ORIG, REPLY, MASTER */ l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); - len += l4proto->nla_size; + len += l4proto->nlattr_size; if (l4proto->nlattr_tuple_size) { len4 = l4proto->nlattr_tuple_size(); len4 *= 3u; /* ORIG, REPLY, MASTER */ @@ -1110,6 +1110,14 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = { .len = NF_CT_LABELS_MAX_SIZE }, }; +static int ctnetlink_flush_iterate(struct nf_conn *ct, void *data) +{ + if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) + return 0; + + return ctnetlink_filter_match(ct, data); +} + static int ctnetlink_flush_conntrack(struct net *net, const struct nlattr * const cda[], u32 portid, int report) @@ -1122,7 +1130,7 @@ static int ctnetlink_flush_conntrack(struct net *net, return PTR_ERR(filter); } - nf_ct_iterate_cleanup_net(net, ctnetlink_filter_match, filter, + nf_ct_iterate_cleanup_net(net, ctnetlink_flush_iterate, filter, portid, report); kfree(filter); @@ -1168,6 +1176,11 @@ static int ctnetlink_del_conntrack(struct net *net, struct sock *ctnl, ct = nf_ct_tuplehash_to_ctrack(h); + if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) { + nf_ct_put(ct); + return -EBUSY; + } + if (cda[CTA_ID]) { u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID])); if (id != (u32)(unsigned long)ct) { diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index c8e9c9503a08..afdeca53e88b 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -385,14 +385,14 @@ void nf_ct_l4proto_unregister_sysctl(struct net *net, /* FIXME: Allow NULL functions and sub in pointers to generic for them. --RR */ -int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto) +int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *l4proto) { int ret = 0; if (l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos)) return -EBUSY; - if ((l4proto->to_nlattr && !l4proto->nlattr_size) || + if ((l4proto->to_nlattr && l4proto->nlattr_size == 0) || (l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size)) return -EINVAL; @@ -428,10 +428,6 @@ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto) goto out_unlock; } - l4proto->nla_size = 0; - if (l4proto->nlattr_size) - l4proto->nla_size += l4proto->nlattr_size(); - rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto], l4proto); out_unlock: @@ -502,7 +498,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net, } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one); -int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[], +int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const l4proto[], unsigned int num_proto) { int ret = -EINVAL, ver; @@ -524,7 +520,7 @@ int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[], EXPORT_SYMBOL_GPL(nf_ct_l4proto_register); int nf_ct_l4proto_pernet_register(struct net *net, - struct nf_conntrack_l4proto *const l4proto[], + const struct nf_conntrack_l4proto *const l4proto[], unsigned int num_proto) { int ret = -EINVAL; @@ -545,7 +541,7 @@ int nf_ct_l4proto_pernet_register(struct net *net, } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register); -void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[], +void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const l4proto[], unsigned int num_proto) { mutex_lock(&nf_ct_proto_mutex); @@ -555,12 +551,12 @@ void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[], synchronize_net(); /* Remove all contrack entries for this protocol */ - nf_ct_iterate_destroy(kill_l4proto, l4proto); + nf_ct_iterate_destroy(kill_l4proto, (void *)l4proto); } EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister); void nf_ct_l4proto_pernet_unregister(struct net *net, - struct nf_conntrack_l4proto *const l4proto[], + const struct nf_conntrack_l4proto *const l4proto[], unsigned int num_proto) { while (num_proto-- != 0) diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 2a446f4a554c..abe647d5b8c6 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -654,6 +654,12 @@ static const struct nla_policy dccp_nla_policy[CTA_PROTOINFO_DCCP_MAX + 1] = { [CTA_PROTOINFO_DCCP_PAD] = { .type = NLA_UNSPEC }, }; +#define DCCP_NLATTR_SIZE ( \ + NLA_ALIGN(NLA_HDRLEN + 1) + \ + NLA_ALIGN(NLA_HDRLEN + 1) + \ + NLA_ALIGN(NLA_HDRLEN + sizeof(u64)) + \ + NLA_ALIGN(NLA_HDRLEN + 0)) + static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) { struct nlattr *attr = cda[CTA_PROTOINFO_DCCP]; @@ -691,13 +697,6 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct) spin_unlock_bh(&ct->lock); return 0; } - -static int dccp_nlattr_size(void) -{ - return nla_total_size(0) /* CTA_PROTOINFO_DCCP */ - + nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1); -} - #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) @@ -862,7 +861,7 @@ static struct nf_proto_net *dccp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.dccp.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = { +const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = { .l3proto = AF_INET, .l4proto = IPPROTO_DCCP, .pkt_to_tuple = dccp_pkt_to_tuple, @@ -876,8 +875,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = { .print_conntrack = dccp_print_conntrack, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .nlattr_size = DCCP_NLATTR_SIZE, .to_nlattr = dccp_to_nlattr, - .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, @@ -898,7 +897,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 __read_mostly = { }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_dccp4); -struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = { +const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = { .l3proto = AF_INET6, .l4proto = IPPROTO_DCCP, .pkt_to_tuple = dccp_pkt_to_tuple, @@ -912,8 +911,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 __read_mostly = { .print_conntrack = dccp_print_conntrack, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .nlattr_size = DCCP_NLATTR_SIZE, .to_nlattr = dccp_to_nlattr, - .nlattr_size = dccp_nlattr_size, .from_nlattr = nlattr_to_dccp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c index 1f86ddf6649a..6c6896d21cd7 100644 --- a/net/netfilter/nf_conntrack_proto_generic.c +++ b/net/netfilter/nf_conntrack_proto_generic.c @@ -12,7 +12,7 @@ #include <linux/netfilter.h> #include <net/netfilter/nf_conntrack_l4proto.h> -static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ; +static const unsigned int nf_ct_generic_timeout = 600*HZ; static bool nf_generic_should_process(u8 proto) { @@ -163,7 +163,7 @@ static struct nf_proto_net *generic_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.generic.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic = { .l3proto = PF_UNSPEC, .l4proto = 255, diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index a2503005d80b..d049ea5a3770 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -48,7 +48,7 @@ enum grep_conntrack { GRE_CT_MAX }; -static unsigned int gre_timeouts[GRE_CT_MAX] = { +static const unsigned int gre_timeouts[GRE_CT_MAX] = { [GRE_CT_UNREPLIED] = 30*HZ, [GRE_CT_REPLIED] = 180*HZ, }; @@ -352,7 +352,7 @@ static int gre_init_net(struct net *net, u_int16_t proto) } /* protocol helper struct */ -static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = { +static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = { .l3proto = AF_INET, .l4proto = IPPROTO_GRE, .pkt_to_tuple = gre_pkt_to_tuple, diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 80faf04ddf15..fb9a35d16069 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -52,7 +52,7 @@ static const char *const sctp_conntrack_names[] = { #define HOURS * 60 MINS #define DAYS * 24 HOURS -static unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] __read_mostly = { +static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_CLOSED] = 10 SECS, [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, @@ -578,6 +578,11 @@ static const struct nla_policy sctp_nla_policy[CTA_PROTOINFO_SCTP_MAX+1] = { [CTA_PROTOINFO_SCTP_VTAG_REPLY] = { .type = NLA_U32 }, }; +#define SCTP_NLATTR_SIZE ( \ + NLA_ALIGN(NLA_HDRLEN + 1) + \ + NLA_ALIGN(NLA_HDRLEN + 4) + \ + NLA_ALIGN(NLA_HDRLEN + 4)) + static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) { struct nlattr *attr = cda[CTA_PROTOINFO_SCTP]; @@ -608,12 +613,6 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct) return 0; } - -static int sctp_nlattr_size(void) -{ - return nla_total_size(0) /* CTA_PROTOINFO_SCTP */ - + nla_policy_len(sctp_nla_policy, CTA_PROTOINFO_SCTP_MAX + 1); -} #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) @@ -778,7 +777,7 @@ static struct nf_proto_net *sctp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.sctp.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { +const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = { .l3proto = PF_INET, .l4proto = IPPROTO_SCTP, .pkt_to_tuple = sctp_pkt_to_tuple, @@ -793,8 +792,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { .can_early_drop = sctp_can_early_drop, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .nlattr_size = SCTP_NLATTR_SIZE, .to_nlattr = sctp_to_nlattr, - .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, @@ -815,7 +814,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = { }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_sctp4); -struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { +const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_SCTP, .pkt_to_tuple = sctp_pkt_to_tuple, @@ -830,8 +829,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = { .can_early_drop = sctp_can_early_drop, .me = THIS_MODULE, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .nlattr_size = SCTP_NLATTR_SIZE, .to_nlattr = sctp_to_nlattr, - .nlattr_size = sctp_nlattr_size, .from_nlattr = nlattr_to_sctp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 37ef35b861f2..e97cdc1cf98c 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -68,7 +68,7 @@ static const char *const tcp_conntrack_names[] = { #define HOURS * 60 MINS #define DAYS * 24 HOURS -static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = { +static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = { [TCP_CONNTRACK_SYN_SENT] = 2 MINS, [TCP_CONNTRACK_SYN_RECV] = 60 SECS, [TCP_CONNTRACK_ESTABLISHED] = 5 DAYS, @@ -305,6 +305,9 @@ static bool tcp_invert_tuple(struct nf_conntrack_tuple *tuple, /* Print out the private part of the conntrack. */ static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct) { + if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) + return; + seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]); } #endif @@ -1222,6 +1225,12 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = { [CTA_PROTOINFO_TCP_FLAGS_REPLY] = { .len = sizeof(struct nf_ct_tcp_flags) }, }; +#define TCP_NLATTR_SIZE ( \ + NLA_ALIGN(NLA_HDRLEN + 1) + \ + NLA_ALIGN(NLA_HDRLEN + 1) + \ + NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \ + NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags)))) + static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) { struct nlattr *pattr = cda[CTA_PROTOINFO_TCP]; @@ -1274,12 +1283,6 @@ static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct) return 0; } -static int tcp_nlattr_size(void) -{ - return nla_total_size(0) /* CTA_PROTOINFO_TCP */ - + nla_policy_len(tcp_nla_policy, CTA_PROTOINFO_TCP_MAX + 1); -} - static unsigned int tcp_nlattr_tuple_size(void) { static unsigned int size __read_mostly; @@ -1541,7 +1544,7 @@ static struct nf_proto_net *tcp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.tcp.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 = { .l3proto = PF_INET, .l4proto = IPPROTO_TCP, @@ -1557,11 +1560,11 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = .can_early_drop = tcp_can_early_drop, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .to_nlattr = tcp_to_nlattr, - .nlattr_size = tcp_nlattr_size, .from_nlattr = nlattr_to_tcp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nlattr_tuple_size = tcp_nlattr_tuple_size, + .nlattr_size = TCP_NLATTR_SIZE, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) @@ -1579,7 +1582,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly = }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4); -struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_TCP, @@ -1594,8 +1597,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly = .error = tcp_error, .can_early_drop = tcp_can_early_drop, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) + .nlattr_size = TCP_NLATTR_SIZE, .to_nlattr = tcp_to_nlattr, - .nlattr_size = tcp_nlattr_size, .from_nlattr = nlattr_to_tcp, .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c index 3a5f727103af..fe7243970aa4 100644 --- a/net/netfilter/nf_conntrack_proto_udp.c +++ b/net/netfilter/nf_conntrack_proto_udp.c @@ -26,7 +26,7 @@ #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> -static unsigned int udp_timeouts[UDP_CT_MAX] = { +static const unsigned int udp_timeouts[UDP_CT_MAX] = { [UDP_CT_UNREPLIED] = 30*HZ, [UDP_CT_REPLIED] = 180*HZ, }; @@ -296,7 +296,7 @@ static struct nf_proto_net *udp_get_net_proto(struct net *net) return &net->ct.nf_ct_proto.udp.pn; } -struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 = { .l3proto = PF_INET, .l4proto = IPPROTO_UDP, @@ -328,7 +328,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); #ifdef CONFIG_NF_CT_PROTO_UDPLITE -struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 = { .l3proto = PF_INET, .l4proto = IPPROTO_UDPLITE, @@ -360,7 +360,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly = EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4); #endif -struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_UDP, @@ -392,7 +392,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6); #ifdef CONFIG_NF_CT_PROTO_UDPLITE -struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly = +const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 = { .l3proto = PF_INET6, .l4proto = IPPROTO_UDPLITE, diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 5a101caa3e12..46d32baad095 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -309,10 +309,12 @@ static int ct_seq_show(struct seq_file *s, void *v) WARN_ON(!l4proto); ret = -ENOSPC; - seq_printf(s, "%-8s %u %-8s %u %ld ", + seq_printf(s, "%-8s %u %-8s %u ", l3proto_name(l3proto->l3proto), nf_ct_l3num(ct), - l4proto_name(l4proto->l4proto), nf_ct_protonum(ct), - nf_ct_expires(ct) / HZ); + l4proto_name(l4proto->l4proto), nf_ct_protonum(ct)); + + if (!test_bit(IPS_OFFLOAD_BIT, &ct->status)) + seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ); if (l4proto->print_conntrack) l4proto->print_conntrack(s, ct); @@ -339,7 +341,9 @@ static int ct_seq_show(struct seq_file *s, void *v) if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) goto release; - if (test_bit(IPS_ASSURED_BIT, &ct->status)) + if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) + seq_puts(s, "[OFFLOAD] "); + else if (test_bit(IPS_ASSURED_BIT, &ct->status)) seq_puts(s, "[ASSURED] "); if (seq_has_overflowed(s)) diff --git a/net/netfilter/nf_flow_table.c b/net/netfilter/nf_flow_table.c new file mode 100644 index 000000000000..2f5099cb85b8 --- /dev/null +++ b/net/netfilter/nf_flow_table.c @@ -0,0 +1,429 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netfilter.h> +#include <linux/rhashtable.h> +#include <linux/netdevice.h> +#include <net/netfilter/nf_flow_table.h> +#include <net/netfilter/nf_conntrack.h> +#include <net/netfilter/nf_conntrack_core.h> +#include <net/netfilter/nf_conntrack_tuple.h> + +struct flow_offload_entry { + struct flow_offload flow; + struct nf_conn *ct; + struct rcu_head rcu_head; +}; + +struct flow_offload * +flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route) +{ + struct flow_offload_entry *entry; + struct flow_offload *flow; + + if (unlikely(nf_ct_is_dying(ct) || + !atomic_inc_not_zero(&ct->ct_general.use))) + return NULL; + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + goto err_ct_refcnt; + + flow = &entry->flow; + + if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst)) + goto err_dst_cache_original; + + if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst)) + goto err_dst_cache_reply; + + entry->ct = ct; + + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num) { + case NFPROTO_IPV4: + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4 = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in; + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4 = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4 = + ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4 = + ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in; + break; + case NFPROTO_IPV6: + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6 = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6; + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6 = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6 = + ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.in6; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6 = + ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.in6; + break; + } + + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l3proto = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l3proto = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.l4proto = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; + + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache = + route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache = + route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst; + + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port; + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port = + ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u.tcp.port; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port = + ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u.tcp.port; + + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dir = + FLOW_OFFLOAD_DIR_ORIGINAL; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dir = + FLOW_OFFLOAD_DIR_REPLY; + + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx = + route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex; + flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.oifidx = + route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx = + route->tuple[FLOW_OFFLOAD_DIR_REPLY].ifindex; + flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.oifidx = + route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].ifindex; + + if (ct->status & IPS_SRC_NAT) + flow->flags |= FLOW_OFFLOAD_SNAT; + else if (ct->status & IPS_DST_NAT) + flow->flags |= FLOW_OFFLOAD_DNAT; + + return flow; + +err_dst_cache_reply: + dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst); +err_dst_cache_original: + kfree(entry); +err_ct_refcnt: + nf_ct_put(ct); + + return NULL; +} +EXPORT_SYMBOL_GPL(flow_offload_alloc); + +void flow_offload_free(struct flow_offload *flow) +{ + struct flow_offload_entry *e; + + dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache); + dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache); + e = container_of(flow, struct flow_offload_entry, flow); + kfree(e); +} +EXPORT_SYMBOL_GPL(flow_offload_free); + +void flow_offload_dead(struct flow_offload *flow) +{ + flow->flags |= FLOW_OFFLOAD_DYING; +} +EXPORT_SYMBOL_GPL(flow_offload_dead); + +int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) +{ + flow->timeout = (u32)jiffies; + + rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, + *flow_table->type->params); + rhashtable_insert_fast(&flow_table->rhashtable, + &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, + *flow_table->type->params); + return 0; +} +EXPORT_SYMBOL_GPL(flow_offload_add); + +void flow_offload_del(struct nf_flowtable *flow_table, + struct flow_offload *flow) +{ + struct flow_offload_entry *e; + + rhashtable_remove_fast(&flow_table->rhashtable, + &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, + *flow_table->type->params); + rhashtable_remove_fast(&flow_table->rhashtable, + &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node, + *flow_table->type->params); + + e = container_of(flow, struct flow_offload_entry, flow); + kfree_rcu(e, rcu_head); +} +EXPORT_SYMBOL_GPL(flow_offload_del); + +struct flow_offload_tuple_rhash * +flow_offload_lookup(struct nf_flowtable *flow_table, + struct flow_offload_tuple *tuple) +{ + return rhashtable_lookup_fast(&flow_table->rhashtable, tuple, + *flow_table->type->params); +} +EXPORT_SYMBOL_GPL(flow_offload_lookup); + +static void nf_flow_release_ct(const struct flow_offload *flow) +{ + struct flow_offload_entry *e; + + e = container_of(flow, struct flow_offload_entry, flow); + nf_ct_delete(e->ct, 0, 0); + nf_ct_put(e->ct); +} + +int nf_flow_table_iterate(struct nf_flowtable *flow_table, + void (*iter)(struct flow_offload *flow, void *data), + void *data) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct rhashtable_iter hti; + struct flow_offload *flow; + int err; + + err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL); + if (err) + return err; + + rhashtable_walk_start(&hti); + + while ((tuplehash = rhashtable_walk_next(&hti))) { + if (IS_ERR(tuplehash)) { + err = PTR_ERR(tuplehash); + if (err != -EAGAIN) + goto out; + + continue; + } + if (tuplehash->tuple.dir) + continue; + + flow = container_of(tuplehash, struct flow_offload, tuplehash[0]); + + iter(flow, data); + } +out: + rhashtable_walk_stop(&hti); + rhashtable_walk_exit(&hti); + + return err; +} +EXPORT_SYMBOL_GPL(nf_flow_table_iterate); + +static inline bool nf_flow_has_expired(const struct flow_offload *flow) +{ + return (__s32)(flow->timeout - (u32)jiffies) <= 0; +} + +static inline bool nf_flow_is_dying(const struct flow_offload *flow) +{ + return flow->flags & FLOW_OFFLOAD_DYING; +} + +void nf_flow_offload_work_gc(struct work_struct *work) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *flow_table; + struct rhashtable_iter hti; + struct flow_offload *flow; + int err; + + flow_table = container_of(work, struct nf_flowtable, gc_work.work); + + err = rhashtable_walk_init(&flow_table->rhashtable, &hti, GFP_KERNEL); + if (err) + goto schedule; + + rhashtable_walk_start(&hti); + + while ((tuplehash = rhashtable_walk_next(&hti))) { + if (IS_ERR(tuplehash)) { + err = PTR_ERR(tuplehash); + if (err != -EAGAIN) + goto out; + + continue; + } + if (tuplehash->tuple.dir) + continue; + + flow = container_of(tuplehash, struct flow_offload, tuplehash[0]); + + if (nf_flow_has_expired(flow) || + nf_flow_is_dying(flow)) { + flow_offload_del(flow_table, flow); + nf_flow_release_ct(flow); + } + } +out: + rhashtable_walk_stop(&hti); + rhashtable_walk_exit(&hti); +schedule: + queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ); +} +EXPORT_SYMBOL_GPL(nf_flow_offload_work_gc); + +static u32 flow_offload_hash(const void *data, u32 len, u32 seed) +{ + const struct flow_offload_tuple *tuple = data; + + return jhash(tuple, offsetof(struct flow_offload_tuple, dir), seed); +} + +static u32 flow_offload_hash_obj(const void *data, u32 len, u32 seed) +{ + const struct flow_offload_tuple_rhash *tuplehash = data; + + return jhash(&tuplehash->tuple, offsetof(struct flow_offload_tuple, dir), seed); +} + +static int flow_offload_hash_cmp(struct rhashtable_compare_arg *arg, + const void *ptr) +{ + const struct flow_offload_tuple *tuple = arg->key; + const struct flow_offload_tuple_rhash *x = ptr; + + if (memcmp(&x->tuple, tuple, offsetof(struct flow_offload_tuple, dir))) + return 1; + + return 0; +} + +const struct rhashtable_params nf_flow_offload_rhash_params = { + .head_offset = offsetof(struct flow_offload_tuple_rhash, node), + .hashfn = flow_offload_hash, + .obj_hashfn = flow_offload_hash_obj, + .obj_cmpfn = flow_offload_hash_cmp, + .automatic_shrinking = true, +}; +EXPORT_SYMBOL_GPL(nf_flow_offload_rhash_params); + +static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff, + __be16 port, __be16 new_port) +{ + struct tcphdr *tcph; + + if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) || + skb_try_make_writable(skb, thoff + sizeof(*tcph))) + return -1; + + tcph = (void *)(skb_network_header(skb) + thoff); + inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true); + + return 0; +} + +static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff, + __be16 port, __be16 new_port) +{ + struct udphdr *udph; + + if (!pskb_may_pull(skb, thoff + sizeof(*udph)) || + skb_try_make_writable(skb, thoff + sizeof(*udph))) + return -1; + + udph = (void *)(skb_network_header(skb) + thoff); + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { + inet_proto_csum_replace2(&udph->check, skb, port, + new_port, true); + if (!udph->check) + udph->check = CSUM_MANGLED_0; + } + + return 0; +} + +static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff, + u8 protocol, __be16 port, __be16 new_port) +{ + switch (protocol) { + case IPPROTO_TCP: + if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0) + return NF_DROP; + break; + case IPPROTO_UDP: + if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0) + return NF_DROP; + break; + } + + return 0; +} + +int nf_flow_snat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir) +{ + struct flow_ports *hdr; + __be16 port, new_port; + + if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) || + skb_try_make_writable(skb, thoff + sizeof(*hdr))) + return -1; + + hdr = (void *)(skb_network_header(skb) + thoff); + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + port = hdr->source; + new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; + hdr->source = new_port; + break; + case FLOW_OFFLOAD_DIR_REPLY: + port = hdr->dest; + new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; + hdr->dest = new_port; + break; + default: + return -1; + } + + return nf_flow_nat_port(skb, thoff, protocol, port, new_port); +} +EXPORT_SYMBOL_GPL(nf_flow_snat_port); + +int nf_flow_dnat_port(const struct flow_offload *flow, + struct sk_buff *skb, unsigned int thoff, + u8 protocol, enum flow_offload_tuple_dir dir) +{ + struct flow_ports *hdr; + __be16 port, new_port; + + if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) || + skb_try_make_writable(skb, thoff + sizeof(*hdr))) + return -1; + + hdr = (void *)(skb_network_header(skb) + thoff); + + switch (dir) { + case FLOW_OFFLOAD_DIR_ORIGINAL: + port = hdr->dest; + new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port; + hdr->dest = new_port; + break; + case FLOW_OFFLOAD_DIR_REPLY: + port = hdr->source; + new_port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port; + hdr->source = new_port; + break; + default: + return -1; + } + + return nf_flow_nat_port(skb, thoff, protocol, port, new_port); +} +EXPORT_SYMBOL_GPL(nf_flow_dnat_port); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c new file mode 100644 index 000000000000..281209aeba8f --- /dev/null +++ b/net/netfilter/nf_flow_table_inet.c @@ -0,0 +1,48 @@ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/netfilter.h> +#include <linux/rhashtable.h> +#include <net/netfilter/nf_flow_table.h> +#include <net/netfilter/nf_tables.h> + +static unsigned int +nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) +{ + switch (skb->protocol) { + case htons(ETH_P_IP): + return nf_flow_offload_ip_hook(priv, skb, state); + case htons(ETH_P_IPV6): + return nf_flow_offload_ipv6_hook(priv, skb, state); + } + + return NF_ACCEPT; +} + +static struct nf_flowtable_type flowtable_inet = { + .family = NFPROTO_INET, + .params = &nf_flow_offload_rhash_params, + .gc = nf_flow_offload_work_gc, + .hook = nf_flow_offload_inet_hook, + .owner = THIS_MODULE, +}; + +static int __init nf_flow_inet_module_init(void) +{ + nft_register_flowtable_type(&flowtable_inet); + + return 0; +} + +static void __exit nf_flow_inet_module_exit(void) +{ + nft_unregister_flowtable_type(&flowtable_inet); +} + +module_init(nf_flow_inet_module_init); +module_exit(nf_flow_inet_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); +MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */ diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index 44284cd2528d..18f6d7ae995b 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h @@ -10,7 +10,7 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *entries, unsigned int index, unsigned int verdict); -unsigned int nf_queue_nf_hook_drop(struct net *net); +void nf_queue_nf_hook_drop(struct net *net); /* nf_log.c */ int __init netfilter_log_init(void); diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index f7e21953b1de..7f55af5f3d1a 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -10,9 +10,13 @@ #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/netfilter.h> +#include <linux/netfilter_ipv4.h> +#include <linux/netfilter_ipv6.h> #include <linux/netfilter_bridge.h> #include <linux/seq_file.h> #include <linux/rcupdate.h> +#include <linux/netfilter_ipv4.h> +#include <linux/netfilter_ipv6.h> #include <net/protocol.h> #include <net/netfilter/nf_queue.h> #include <net/dst.h> @@ -96,30 +100,56 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry) } EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); -unsigned int nf_queue_nf_hook_drop(struct net *net) +void nf_queue_nf_hook_drop(struct net *net) { const struct nf_queue_handler *qh; - unsigned int count = 0; rcu_read_lock(); qh = rcu_dereference(net->nf.queue_handler); if (qh) - count = qh->nf_hook_drop(net); + qh->nf_hook_drop(net); rcu_read_unlock(); - - return count; } EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop); +static void nf_ip_saveroute(const struct sk_buff *skb, + struct nf_queue_entry *entry) +{ + struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry); + + if (entry->state.hook == NF_INET_LOCAL_OUT) { + const struct iphdr *iph = ip_hdr(skb); + + rt_info->tos = iph->tos; + rt_info->daddr = iph->daddr; + rt_info->saddr = iph->saddr; + rt_info->mark = skb->mark; + } +} + +static void nf_ip6_saveroute(const struct sk_buff *skb, + struct nf_queue_entry *entry) +{ + struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); + + if (entry->state.hook == NF_INET_LOCAL_OUT) { + const struct ipv6hdr *iph = ipv6_hdr(skb); + + rt_info->daddr = iph->daddr; + rt_info->saddr = iph->saddr; + rt_info->mark = skb->mark; + } +} + static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, const struct nf_hook_entries *entries, unsigned int index, unsigned int queuenum) { int status = -ENOENT; struct nf_queue_entry *entry = NULL; - const struct nf_afinfo *afinfo; const struct nf_queue_handler *qh; struct net *net = state->net; + unsigned int route_key_size; /* QUEUE == DROP if no one is waiting, to be safe. */ qh = rcu_dereference(net->nf.queue_handler); @@ -128,11 +158,19 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, goto err; } - afinfo = nf_get_afinfo(state->pf); - if (!afinfo) - goto err; + switch (state->pf) { + case AF_INET: + route_key_size = sizeof(struct ip_rt_info); + break; + case AF_INET6: + route_key_size = sizeof(struct ip6_rt_info); + break; + default: + route_key_size = 0; + break; + } - entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); + entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC); if (!entry) { status = -ENOMEM; goto err; @@ -142,12 +180,21 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, .skb = skb, .state = *state, .hook_index = index, - .size = sizeof(*entry) + afinfo->route_key_size, + .size = sizeof(*entry) + route_key_size, }; nf_queue_entry_get_refs(entry); skb_dst_force(skb); - afinfo->saveroute(skb, entry); + + switch (entry->state.pf) { + case AF_INET: + nf_ip_saveroute(skb, entry); + break; + case AF_INET6: + nf_ip6_saveroute(skb, entry); + break; + } + status = qh->outfn(entry, queuenum); if (status < 0) { @@ -204,13 +251,31 @@ repeat: return NF_ACCEPT; } +static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum) +{ + switch (pf) { +#ifdef CONFIG_NETFILTER_FAMILY_BRIDGE + case NFPROTO_BRIDGE: + return rcu_dereference(net->nf.hooks_bridge[hooknum]); +#endif + case NFPROTO_IPV4: + return rcu_dereference(net->nf.hooks_ipv4[hooknum]); + case NFPROTO_IPV6: + return rcu_dereference(net->nf.hooks_ipv6[hooknum]); + default: + WARN_ON_ONCE(1); + return NULL; + } + + return NULL; +} + /* Caller must hold rcu read-side lock */ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) { const struct nf_hook_entry *hook_entry; const struct nf_hook_entries *hooks; struct sk_buff *skb = entry->skb; - const struct nf_afinfo *afinfo; const struct net *net; unsigned int i; int err; @@ -219,12 +284,12 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) net = entry->state.net; pf = entry->state.pf; - hooks = rcu_dereference(net->nf.hooks[pf][entry->state.hook]); + hooks = nf_hook_entries_head(net, pf, entry->state.hook); nf_queue_entry_release_refs(entry); i = entry->hook_index; - if (WARN_ON_ONCE(i >= hooks->num_hook_entries)) { + if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) { kfree_skb(skb); kfree(entry); return; @@ -237,8 +302,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state); if (verdict == NF_ACCEPT) { - afinfo = nf_get_afinfo(entry->state.pf); - if (!afinfo || afinfo->reroute(entry->state.net, skb, entry) < 0) + if (nf_reroute(skb, entry) < 0) verdict = NF_DROP; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 10798b357481..336b81689ac9 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -17,6 +17,7 @@ #include <linux/netfilter.h> #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nf_tables.h> +#include <net/netfilter/nf_flow_table.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> #include <net/net_namespace.h> @@ -24,6 +25,7 @@ static LIST_HEAD(nf_tables_expressions); static LIST_HEAD(nf_tables_objects); +static LIST_HEAD(nf_tables_flowtables); /** * nft_register_afinfo - register nf_tables address family info @@ -139,29 +141,26 @@ static void nft_trans_destroy(struct nft_trans *trans) kfree(trans); } -static int nf_tables_register_hooks(struct net *net, - const struct nft_table *table, - struct nft_chain *chain, - unsigned int hook_nops) +static int nf_tables_register_hook(struct net *net, + const struct nft_table *table, + struct nft_chain *chain) { if (table->flags & NFT_TABLE_F_DORMANT || !nft_is_base_chain(chain)) return 0; - return nf_register_net_hooks(net, nft_base_chain(chain)->ops, - hook_nops); + return nf_register_net_hook(net, &nft_base_chain(chain)->ops); } -static void nf_tables_unregister_hooks(struct net *net, - const struct nft_table *table, - struct nft_chain *chain, - unsigned int hook_nops) +static void nf_tables_unregister_hook(struct net *net, + const struct nft_table *table, + struct nft_chain *chain) { if (table->flags & NFT_TABLE_F_DORMANT || !nft_is_base_chain(chain)) return; - nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, hook_nops); + nf_unregister_net_hook(net, &nft_base_chain(chain)->ops); } static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) @@ -348,6 +347,40 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj) return err; } +static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type, + struct nft_flowtable *flowtable) +{ + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, + sizeof(struct nft_trans_flowtable)); + if (trans == NULL) + return -ENOMEM; + + if (msg_type == NFT_MSG_NEWFLOWTABLE) + nft_activate_next(ctx->net, flowtable); + + nft_trans_flowtable(trans) = flowtable; + list_add_tail(&trans->list, &ctx->net->nft.commit_list); + + return 0; +} + +static int nft_delflowtable(struct nft_ctx *ctx, + struct nft_flowtable *flowtable) +{ + int err; + + err = nft_trans_flowtable_add(ctx, NFT_MSG_DELFLOWTABLE, flowtable); + if (err < 0) + return err; + + nft_deactivate_next(ctx->net, flowtable); + ctx->table->use--; + + return err; +} + /* * Tables */ @@ -595,8 +628,7 @@ static void _nf_tables_table_disable(struct net *net, if (cnt && i++ == cnt) break; - nf_unregister_net_hooks(net, nft_base_chain(chain)->ops, - afi->nops); + nf_unregister_net_hook(net, &nft_base_chain(chain)->ops); } } @@ -613,8 +645,7 @@ static int nf_tables_table_enable(struct net *net, if (!nft_is_base_chain(chain)) continue; - err = nf_register_net_hooks(net, nft_base_chain(chain)->ops, - afi->nops); + err = nf_register_net_hook(net, &nft_base_chain(chain)->ops); if (err < 0) goto err; @@ -733,6 +764,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk, INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); INIT_LIST_HEAD(&table->objects); + INIT_LIST_HEAD(&table->flowtables); table->flags = flags; nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); @@ -754,10 +786,11 @@ err1: static int nft_flush_table(struct nft_ctx *ctx) { - int err; + struct nft_flowtable *flowtable, *nft; struct nft_chain *chain, *nc; struct nft_object *obj, *ne; struct nft_set *set, *ns; + int err; list_for_each_entry(chain, &ctx->table->chains, list) { if (!nft_is_active_next(ctx->net, chain)) @@ -774,7 +807,7 @@ static int nft_flush_table(struct nft_ctx *ctx) if (!nft_is_active_next(ctx->net, set)) continue; - if (set->flags & NFT_SET_ANONYMOUS && + if (nft_set_is_anonymous(set) && !list_empty(&set->bindings)) continue; @@ -783,6 +816,12 @@ static int nft_flush_table(struct nft_ctx *ctx) goto out; } + list_for_each_entry_safe(flowtable, nft, &ctx->table->flowtables, list) { + err = nft_delflowtable(ctx, flowtable); + if (err < 0) + goto out; + } + list_for_each_entry_safe(obj, ne, &ctx->table->objects, list) { err = nft_delobj(ctx, obj); if (err < 0) @@ -1026,7 +1065,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); - const struct nf_hook_ops *ops = &basechain->ops[0]; + const struct nf_hook_ops *ops = &basechain->ops; struct nlattr *nest; nest = nla_nest_start(skb, NFTA_CHAIN_HOOK); @@ -1227,13 +1266,13 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) static void nft_chain_stats_replace(struct nft_base_chain *chain, struct nft_stats __percpu *newstats) { + struct nft_stats __percpu *oldstats; + if (newstats == NULL) return; if (chain->stats) { - struct nft_stats __percpu *oldstats = - nft_dereference(chain->stats); - + oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES); rcu_assign_pointer(chain->stats, newstats); synchronize_rcu(); free_percpu(oldstats); @@ -1252,8 +1291,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) free_percpu(basechain->stats); if (basechain->stats) static_branch_dec(&nft_counters_enabled); - if (basechain->ops[0].dev != NULL) - dev_put(basechain->ops[0].dev); + if (basechain->ops.dev != NULL) + dev_put(basechain->ops.dev); kfree(chain->name); kfree(basechain); } else { @@ -1264,7 +1303,7 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) struct nft_chain_hook { u32 num; - u32 priority; + s32 priority; const struct nf_chain_type *type; struct net_device *dev; }; @@ -1303,6 +1342,11 @@ static int nft_chain_parse_hook(struct net *net, } if (!(type->hook_mask & (1 << hook->num))) return -EOPNOTSUPP; + + if (type->type == NFT_CHAIN_T_NAT && + hook->priority <= NF_IP_PRI_CONNTRACK) + return -EOPNOTSUPP; + if (!try_module_get(type->owner)) return -ENOENT; @@ -1349,7 +1393,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_stats __percpu *stats; struct net *net = ctx->net; struct nft_chain *chain; - unsigned int i; int err; if (table->use == UINT_MAX) @@ -1358,7 +1401,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (nla[NFTA_CHAIN_HOOK]) { struct nft_chain_hook hook; struct nf_hook_ops *ops; - nf_hookfn *hookfn; err = nft_chain_parse_hook(net, nla, afi, &hook, create); if (err < 0) @@ -1384,23 +1426,19 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, static_branch_inc(&nft_counters_enabled); } - hookfn = hook.type->hooks[hook.num]; basechain->type = hook.type; chain = &basechain->chain; - for (i = 0; i < afi->nops; i++) { - ops = &basechain->ops[i]; - ops->pf = family; - ops->hooknum = hook.num; - ops->priority = hook.priority; - ops->priv = chain; - ops->hook = afi->hooks[ops->hooknum]; - ops->dev = hook.dev; - if (hookfn) - ops->hook = hookfn; - if (afi->hook_ops_init) - afi->hook_ops_init(ops, i); - } + ops = &basechain->ops; + ops->pf = family; + ops->hooknum = hook.num; + ops->priority = hook.priority; + ops->priv = chain; + ops->hook = hook.type->hooks[ops->hooknum]; + ops->dev = hook.dev; + + if (basechain->type->type == NFT_CHAIN_T_NAT) + ops->nat_hook = true; chain->flags |= NFT_BASE_CHAIN; basechain->policy = policy; @@ -1418,7 +1456,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err1; } - err = nf_tables_register_hooks(net, table, chain, afi->nops); + err = nf_tables_register_hook(net, table, chain); if (err < 0) goto err1; @@ -1432,7 +1470,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return 0; err2: - nf_tables_unregister_hooks(net, table, chain, afi->nops); + nf_tables_unregister_hook(net, table, chain); err1: nf_tables_chain_destroy(chain); @@ -1445,14 +1483,13 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, const struct nlattr * const *nla = ctx->nla; struct nft_table *table = ctx->table; struct nft_chain *chain = ctx->chain; - struct nft_af_info *afi = ctx->afi; struct nft_base_chain *basechain; struct nft_stats *stats = NULL; struct nft_chain_hook hook; const struct nlattr *name; struct nf_hook_ops *ops; struct nft_trans *trans; - int err, i; + int err; if (nla[NFTA_CHAIN_HOOK]) { if (!nft_is_base_chain(chain)) @@ -1469,14 +1506,12 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return -EBUSY; } - for (i = 0; i < afi->nops; i++) { - ops = &basechain->ops[i]; - if (ops->hooknum != hook.num || - ops->priority != hook.priority || - ops->dev != hook.dev) { - nft_chain_release_hook(&hook); - return -EBUSY; - } + ops = &basechain->ops; + if (ops->hooknum != hook.num || + ops->priority != hook.priority || + ops->dev != hook.dev) { + nft_chain_release_hook(&hook); + return -EBUSY; } nft_chain_release_hook(&hook); } @@ -2072,7 +2107,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, continue; list_for_each_entry_rcu(chain, &table->chains, list) { - if (ctx && ctx->chain[0] && + if (ctx && ctx->chain && strcmp(ctx->chain, chain->name) != 0) continue; @@ -3277,7 +3312,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *i; struct nft_set_iter iter; - if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS) + if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) return -EBUSY; if (binding->flags & NFT_SET_MAP) { @@ -3312,7 +3347,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, { list_del_rcu(&binding->list); - if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS && + if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && nft_is_active(ctx->net, set)) nf_tables_set_destroy(ctx, set); } @@ -4665,8 +4700,10 @@ static int nf_tables_dump_obj_done(struct netlink_callback *cb) { struct nft_obj_filter *filter = cb->data; - kfree(filter->table); - kfree(filter); + if (filter) { + kfree(filter->table); + kfree(filter); + } return 0; } @@ -4848,6 +4885,605 @@ static void nf_tables_obj_notify(const struct nft_ctx *ctx, ctx->afi->family, ctx->report, GFP_KERNEL); } +/* + * Flow tables + */ +void nft_register_flowtable_type(struct nf_flowtable_type *type) +{ + nfnl_lock(NFNL_SUBSYS_NFTABLES); + list_add_tail_rcu(&type->list, &nf_tables_flowtables); + nfnl_unlock(NFNL_SUBSYS_NFTABLES); +} +EXPORT_SYMBOL_GPL(nft_register_flowtable_type); + +void nft_unregister_flowtable_type(struct nf_flowtable_type *type) +{ + nfnl_lock(NFNL_SUBSYS_NFTABLES); + list_del_rcu(&type->list); + nfnl_unlock(NFNL_SUBSYS_NFTABLES); +} +EXPORT_SYMBOL_GPL(nft_unregister_flowtable_type); + +static const struct nla_policy nft_flowtable_policy[NFTA_FLOWTABLE_MAX + 1] = { + [NFTA_FLOWTABLE_TABLE] = { .type = NLA_STRING, + .len = NFT_NAME_MAXLEN - 1 }, + [NFTA_FLOWTABLE_NAME] = { .type = NLA_STRING, + .len = NFT_NAME_MAXLEN - 1 }, + [NFTA_FLOWTABLE_HOOK] = { .type = NLA_NESTED }, +}; + +struct nft_flowtable *nf_tables_flowtable_lookup(const struct nft_table *table, + const struct nlattr *nla, + u8 genmask) +{ + struct nft_flowtable *flowtable; + + list_for_each_entry(flowtable, &table->flowtables, list) { + if (!nla_strcmp(nla, flowtable->name) && + nft_active_genmask(flowtable, genmask)) + return flowtable; + } + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(nf_tables_flowtable_lookup); + +#define NFT_FLOWTABLE_DEVICE_MAX 8 + +static int nf_tables_parse_devices(const struct nft_ctx *ctx, + const struct nlattr *attr, + struct net_device *dev_array[], int *len) +{ + const struct nlattr *tmp; + struct net_device *dev; + char ifname[IFNAMSIZ]; + int rem, n = 0, err; + + nla_for_each_nested(tmp, attr, rem) { + if (nla_type(tmp) != NFTA_DEVICE_NAME) { + err = -EINVAL; + goto err1; + } + + nla_strlcpy(ifname, tmp, IFNAMSIZ); + dev = dev_get_by_name(ctx->net, ifname); + if (!dev) { + err = -ENOENT; + goto err1; + } + + dev_array[n++] = dev; + if (n == NFT_FLOWTABLE_DEVICE_MAX) { + err = -EFBIG; + goto err1; + } + } + if (!len) + return -EINVAL; + + err = 0; +err1: + *len = n; + return err; +} + +static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX + 1] = { + [NFTA_FLOWTABLE_HOOK_NUM] = { .type = NLA_U32 }, + [NFTA_FLOWTABLE_HOOK_PRIORITY] = { .type = NLA_U32 }, + [NFTA_FLOWTABLE_HOOK_DEVS] = { .type = NLA_NESTED }, +}; + +static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx, + const struct nlattr *attr, + struct nft_flowtable *flowtable) +{ + struct net_device *dev_array[NFT_FLOWTABLE_DEVICE_MAX]; + struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1]; + struct nf_hook_ops *ops; + int hooknum, priority; + int err, n = 0, i; + + err = nla_parse_nested(tb, NFTA_FLOWTABLE_HOOK_MAX, attr, + nft_flowtable_hook_policy, NULL); + if (err < 0) + return err; + + if (!tb[NFTA_FLOWTABLE_HOOK_NUM] || + !tb[NFTA_FLOWTABLE_HOOK_PRIORITY] || + !tb[NFTA_FLOWTABLE_HOOK_DEVS]) + return -EINVAL; + + hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM])); + if (hooknum >= ctx->afi->nhooks) + return -EINVAL; + + priority = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_PRIORITY])); + + err = nf_tables_parse_devices(ctx, tb[NFTA_FLOWTABLE_HOOK_DEVS], + dev_array, &n); + if (err < 0) + goto err1; + + ops = kzalloc(sizeof(struct nf_hook_ops) * n, GFP_KERNEL); + if (!ops) { + err = -ENOMEM; + goto err1; + } + + flowtable->ops = ops; + flowtable->ops_len = n; + + for (i = 0; i < n; i++) { + flowtable->ops[i].pf = NFPROTO_NETDEV; + flowtable->ops[i].hooknum = hooknum; + flowtable->ops[i].priority = priority; + flowtable->ops[i].priv = &flowtable->data.rhashtable; + flowtable->ops[i].hook = flowtable->data.type->hook; + flowtable->ops[i].dev = dev_array[i]; + } + + err = 0; +err1: + for (i = 0; i < n; i++) + dev_put(dev_array[i]); + + return err; +} + +static const struct nf_flowtable_type * +__nft_flowtable_type_get(const struct nft_af_info *afi) +{ + const struct nf_flowtable_type *type; + + list_for_each_entry(type, &nf_tables_flowtables, list) { + if (afi->family == type->family) + return type; + } + return NULL; +} + +static const struct nf_flowtable_type * +nft_flowtable_type_get(const struct nft_af_info *afi) +{ + const struct nf_flowtable_type *type; + + type = __nft_flowtable_type_get(afi); + if (type != NULL && try_module_get(type->owner)) + return type; + +#ifdef CONFIG_MODULES + if (type == NULL) { + nfnl_unlock(NFNL_SUBSYS_NFTABLES); + request_module("nf-flowtable-%u", afi->family); + nfnl_lock(NFNL_SUBSYS_NFTABLES); + if (__nft_flowtable_type_get(afi)) + return ERR_PTR(-EAGAIN); + } +#endif + return ERR_PTR(-ENOENT); +} + +void nft_flow_table_iterate(struct net *net, + void (*iter)(struct nf_flowtable *flowtable, void *data), + void *data) +{ + struct nft_flowtable *flowtable; + const struct nft_af_info *afi; + const struct nft_table *table; + + rcu_read_lock(); + list_for_each_entry_rcu(afi, &net->nft.af_info, list) { + list_for_each_entry_rcu(table, &afi->tables, list) { + list_for_each_entry_rcu(flowtable, &table->flowtables, list) { + iter(&flowtable->data, data); + } + } + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(nft_flow_table_iterate); + +static void nft_unregister_flowtable_net_hooks(struct net *net, + struct nft_flowtable *flowtable) +{ + int i; + + for (i = 0; i < flowtable->ops_len; i++) { + if (!flowtable->ops[i].dev) + continue; + + nf_unregister_net_hook(net, &flowtable->ops[i]); + } +} + +static int nf_tables_newflowtable(struct net *net, struct sock *nlsk, + struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const nla[], + struct netlink_ext_ack *extack) +{ + const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + const struct nf_flowtable_type *type; + u8 genmask = nft_genmask_next(net); + int family = nfmsg->nfgen_family; + struct nft_flowtable *flowtable; + struct nft_af_info *afi; + struct nft_table *table; + struct nft_ctx ctx; + int err, i, k; + + if (!nla[NFTA_FLOWTABLE_TABLE] || + !nla[NFTA_FLOWTABLE_NAME] || + !nla[NFTA_FLOWTABLE_HOOK]) + return -EINVAL; + + afi = nf_tables_afinfo_lookup(net, family, true); + if (IS_ERR(afi)) + return PTR_ERR(afi); + + table = nf_tables_table_lookup(afi, nla[NFTA_FLOWTABLE_TABLE], genmask); + if (IS_ERR(table)) + return PTR_ERR(table); + + flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], + genmask); + if (IS_ERR(flowtable)) { + err = PTR_ERR(flowtable); + if (err != -ENOENT) + return err; + } else { + if (nlh->nlmsg_flags & NLM_F_EXCL) + return -EEXIST; + + return 0; + } + + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); + + flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL); + if (!flowtable) + return -ENOMEM; + + flowtable->table = table; + flowtable->name = nla_strdup(nla[NFTA_FLOWTABLE_NAME], GFP_KERNEL); + if (!flowtable->name) { + err = -ENOMEM; + goto err1; + } + + type = nft_flowtable_type_get(afi); + if (IS_ERR(type)) { + err = PTR_ERR(type); + goto err2; + } + + flowtable->data.type = type; + err = rhashtable_init(&flowtable->data.rhashtable, type->params); + if (err < 0) + goto err3; + + err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK], + flowtable); + if (err < 0) + goto err3; + + for (i = 0; i < flowtable->ops_len; i++) { + err = nf_register_net_hook(net, &flowtable->ops[i]); + if (err < 0) + goto err4; + } + + err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable); + if (err < 0) + goto err5; + + INIT_DEFERRABLE_WORK(&flowtable->data.gc_work, type->gc); + queue_delayed_work(system_power_efficient_wq, + &flowtable->data.gc_work, HZ); + + list_add_tail_rcu(&flowtable->list, &table->flowtables); + table->use++; + + return 0; +err5: + i = flowtable->ops_len; +err4: + for (k = i - 1; k >= 0; k--) + nf_unregister_net_hook(net, &flowtable->ops[i]); + + kfree(flowtable->ops); +err3: + module_put(type->owner); +err2: + kfree(flowtable->name); +err1: + kfree(flowtable); + return err; +} + +static int nf_tables_delflowtable(struct net *net, struct sock *nlsk, + struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const nla[], + struct netlink_ext_ack *extack) +{ + const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_next(net); + int family = nfmsg->nfgen_family; + struct nft_flowtable *flowtable; + struct nft_af_info *afi; + struct nft_table *table; + struct nft_ctx ctx; + + afi = nf_tables_afinfo_lookup(net, family, true); + if (IS_ERR(afi)) + return PTR_ERR(afi); + + table = nf_tables_table_lookup(afi, nla[NFTA_FLOWTABLE_TABLE], genmask); + if (IS_ERR(table)) + return PTR_ERR(table); + + flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], + genmask); + if (IS_ERR(flowtable)) + return PTR_ERR(flowtable); + if (flowtable->use > 0) + return -EBUSY; + + nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); + + return nft_delflowtable(&ctx, flowtable); +} + +static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, + u32 portid, u32 seq, int event, + u32 flags, int family, + struct nft_flowtable *flowtable) +{ + struct nlattr *nest, *nest_devs; + struct nfgenmsg *nfmsg; + struct nlmsghdr *nlh; + int i; + + event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, event); + nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags); + if (nlh == NULL) + goto nla_put_failure; + + nfmsg = nlmsg_data(nlh); + nfmsg->nfgen_family = family; + nfmsg->version = NFNETLINK_V0; + nfmsg->res_id = htons(net->nft.base_seq & 0xffff); + + if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) || + nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) || + nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use))) + goto nla_put_failure; + + nest = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK); + if (nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_NUM, htonl(flowtable->hooknum)) || + nla_put_be32(skb, NFTA_FLOWTABLE_HOOK_PRIORITY, htonl(flowtable->priority))) + goto nla_put_failure; + + nest_devs = nla_nest_start(skb, NFTA_FLOWTABLE_HOOK_DEVS); + if (!nest_devs) + goto nla_put_failure; + + for (i = 0; i < flowtable->ops_len; i++) { + if (flowtable->ops[i].dev && + nla_put_string(skb, NFTA_DEVICE_NAME, + flowtable->ops[i].dev->name)) + goto nla_put_failure; + } + nla_nest_end(skb, nest_devs); + nla_nest_end(skb, nest); + + nlmsg_end(skb, nlh); + return 0; + +nla_put_failure: + nlmsg_trim(skb, nlh); + return -1; +} + +struct nft_flowtable_filter { + char *table; +}; + +static int nf_tables_dump_flowtable(struct sk_buff *skb, + struct netlink_callback *cb) +{ + const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); + struct nft_flowtable_filter *filter = cb->data; + unsigned int idx = 0, s_idx = cb->args[0]; + struct net *net = sock_net(skb->sk); + int family = nfmsg->nfgen_family; + struct nft_flowtable *flowtable; + const struct nft_af_info *afi; + const struct nft_table *table; + + rcu_read_lock(); + cb->seq = net->nft.base_seq; + + list_for_each_entry_rcu(afi, &net->nft.af_info, list) { + if (family != NFPROTO_UNSPEC && family != afi->family) + continue; + + list_for_each_entry_rcu(table, &afi->tables, list) { + list_for_each_entry_rcu(flowtable, &table->flowtables, list) { + if (!nft_is_active(net, flowtable)) + goto cont; + if (idx < s_idx) + goto cont; + if (idx > s_idx) + memset(&cb->args[1], 0, + sizeof(cb->args) - sizeof(cb->args[0])); + if (filter && filter->table[0] && + strcmp(filter->table, table->name)) + goto cont; + + if (nf_tables_fill_flowtable_info(skb, net, NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NFT_MSG_NEWFLOWTABLE, + NLM_F_MULTI | NLM_F_APPEND, + afi->family, flowtable) < 0) + goto done; + + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); +cont: + idx++; + } + } + } +done: + rcu_read_unlock(); + + cb->args[0] = idx; + return skb->len; +} + +static int nf_tables_dump_flowtable_done(struct netlink_callback *cb) +{ + struct nft_flowtable_filter *filter = cb->data; + + if (!filter) + return 0; + + kfree(filter->table); + kfree(filter); + + return 0; +} + +static struct nft_flowtable_filter * +nft_flowtable_filter_alloc(const struct nlattr * const nla[]) +{ + struct nft_flowtable_filter *filter; + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return ERR_PTR(-ENOMEM); + + if (nla[NFTA_FLOWTABLE_TABLE]) { + filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE], + GFP_KERNEL); + if (!filter->table) { + kfree(filter); + return ERR_PTR(-ENOMEM); + } + } + return filter; +} + +static int nf_tables_getflowtable(struct net *net, struct sock *nlsk, + struct sk_buff *skb, + const struct nlmsghdr *nlh, + const struct nlattr * const nla[], + struct netlink_ext_ack *extack) +{ + const struct nfgenmsg *nfmsg = nlmsg_data(nlh); + u8 genmask = nft_genmask_cur(net); + int family = nfmsg->nfgen_family; + struct nft_flowtable *flowtable; + const struct nft_af_info *afi; + const struct nft_table *table; + struct sk_buff *skb2; + int err; + + if (nlh->nlmsg_flags & NLM_F_DUMP) { + struct netlink_dump_control c = { + .dump = nf_tables_dump_flowtable, + .done = nf_tables_dump_flowtable_done, + }; + + if (nla[NFTA_FLOWTABLE_TABLE]) { + struct nft_flowtable_filter *filter; + + filter = nft_flowtable_filter_alloc(nla); + if (IS_ERR(filter)) + return -ENOMEM; + + c.data = filter; + } + return netlink_dump_start(nlsk, skb, nlh, &c); + } + + if (!nla[NFTA_FLOWTABLE_NAME]) + return -EINVAL; + + afi = nf_tables_afinfo_lookup(net, family, false); + if (IS_ERR(afi)) + return PTR_ERR(afi); + + table = nf_tables_table_lookup(afi, nla[NFTA_FLOWTABLE_TABLE], genmask); + if (IS_ERR(table)) + return PTR_ERR(table); + + flowtable = nf_tables_flowtable_lookup(table, nla[NFTA_FLOWTABLE_NAME], + genmask); + if (IS_ERR(table)) + return PTR_ERR(flowtable); + + skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb2) + return -ENOMEM; + + err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid, + nlh->nlmsg_seq, + NFT_MSG_NEWFLOWTABLE, 0, family, + flowtable); + if (err < 0) + goto err; + + return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid); +err: + kfree_skb(skb2); + return err; +} + +static void nf_tables_flowtable_notify(struct nft_ctx *ctx, + struct nft_flowtable *flowtable, + int event) +{ + struct sk_buff *skb; + int err; + + if (ctx->report && + !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) + return; + + skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (skb == NULL) + goto err; + + err = nf_tables_fill_flowtable_info(skb, ctx->net, ctx->portid, + ctx->seq, event, 0, + ctx->afi->family, flowtable); + if (err < 0) { + kfree_skb(skb); + goto err; + } + + nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, + ctx->report, GFP_KERNEL); + return; +err: + nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); +} + +static void nft_flowtable_destroy(void *ptr, void *arg) +{ + kfree(ptr); +} + +static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) +{ + cancel_delayed_work_sync(&flowtable->data.gc_work); + kfree(flowtable->name); + rhashtable_free_and_destroy(&flowtable->data.rhashtable, + nft_flowtable_destroy, NULL); + module_put(flowtable->data.type->owner); +} + static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq) { @@ -4878,6 +5514,49 @@ nla_put_failure: return -EMSGSIZE; } +static void nft_flowtable_event(unsigned long event, struct net_device *dev, + struct nft_flowtable *flowtable) +{ + int i; + + for (i = 0; i < flowtable->ops_len; i++) { + if (flowtable->ops[i].dev != dev) + continue; + + nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]); + flowtable->ops[i].dev = NULL; + break; + } +} + +static int nf_tables_flowtable_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct nft_flowtable *flowtable; + struct nft_table *table; + struct nft_af_info *afi; + + if (event != NETDEV_UNREGISTER) + return 0; + + nfnl_lock(NFNL_SUBSYS_NFTABLES); + list_for_each_entry(afi, &dev_net(dev)->nft.af_info, list) { + list_for_each_entry(table, &afi->tables, list) { + list_for_each_entry(flowtable, &table->flowtables, list) { + nft_flowtable_event(event, dev, flowtable); + } + } + } + nfnl_unlock(NFNL_SUBSYS_NFTABLES); + + return NOTIFY_DONE; +} + +static struct notifier_block nf_tables_flowtable_notifier = { + .notifier_call = nf_tables_flowtable_event, +}; + static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event) { @@ -5030,6 +5709,21 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = { .attr_count = NFTA_OBJ_MAX, .policy = nft_obj_policy, }, + [NFT_MSG_NEWFLOWTABLE] = { + .call_batch = nf_tables_newflowtable, + .attr_count = NFTA_FLOWTABLE_MAX, + .policy = nft_flowtable_policy, + }, + [NFT_MSG_GETFLOWTABLE] = { + .call = nf_tables_getflowtable, + .attr_count = NFTA_FLOWTABLE_MAX, + .policy = nft_flowtable_policy, + }, + [NFT_MSG_DELFLOWTABLE] = { + .call_batch = nf_tables_delflowtable, + .attr_count = NFTA_FLOWTABLE_MAX, + .policy = nft_flowtable_policy, + }, }; static void nft_chain_commit_update(struct nft_trans *trans) @@ -5075,6 +5769,9 @@ static void nf_tables_commit_release(struct nft_trans *trans) case NFT_MSG_DELOBJ: nft_obj_destroy(nft_trans_obj(trans)); break; + case NFT_MSG_DELFLOWTABLE: + nf_tables_flowtable_destroy(nft_trans_flowtable(trans)); + break; } kfree(trans); } @@ -5127,10 +5824,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_DELCHAIN: list_del_rcu(&trans->ctx.chain->list); nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN); - nf_tables_unregister_hooks(trans->ctx.net, - trans->ctx.table, - trans->ctx.chain, - trans->ctx.afi->nops); + nf_tables_unregister_hook(trans->ctx.net, + trans->ctx.table, + trans->ctx.chain); break; case NFT_MSG_NEWRULE: nft_clear(trans->ctx.net, nft_trans_rule(trans)); @@ -5150,7 +5846,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) /* This avoids hitting -EBUSY when deleting the table * from the transaction. */ - if (nft_trans_set(trans)->flags & NFT_SET_ANONYMOUS && + if (nft_set_is_anonymous(nft_trans_set(trans)) && !list_empty(&nft_trans_set(trans)->bindings)) trans->ctx.table->use--; @@ -5193,6 +5889,21 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans), NFT_MSG_DELOBJ); break; + case NFT_MSG_NEWFLOWTABLE: + nft_clear(net, nft_trans_flowtable(trans)); + nf_tables_flowtable_notify(&trans->ctx, + nft_trans_flowtable(trans), + NFT_MSG_NEWFLOWTABLE); + nft_trans_destroy(trans); + break; + case NFT_MSG_DELFLOWTABLE: + list_del_rcu(&nft_trans_flowtable(trans)->list); + nf_tables_flowtable_notify(&trans->ctx, + nft_trans_flowtable(trans), + NFT_MSG_DELFLOWTABLE); + nft_unregister_flowtable_net_hooks(net, + nft_trans_flowtable(trans)); + break; } } @@ -5230,6 +5941,9 @@ static void nf_tables_abort_release(struct nft_trans *trans) case NFT_MSG_NEWOBJ: nft_obj_destroy(nft_trans_obj(trans)); break; + case NFT_MSG_NEWFLOWTABLE: + nf_tables_flowtable_destroy(nft_trans_flowtable(trans)); + break; } kfree(trans); } @@ -5267,10 +5981,9 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) } else { trans->ctx.table->use--; list_del_rcu(&trans->ctx.chain->list); - nf_tables_unregister_hooks(trans->ctx.net, - trans->ctx.table, - trans->ctx.chain, - trans->ctx.afi->nops); + nf_tables_unregister_hook(trans->ctx.net, + trans->ctx.table, + trans->ctx.chain); } break; case NFT_MSG_DELCHAIN: @@ -5320,6 +6033,17 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb) nft_clear(trans->ctx.net, nft_trans_obj(trans)); nft_trans_destroy(trans); break; + case NFT_MSG_NEWFLOWTABLE: + trans->ctx.table->use--; + list_del_rcu(&nft_trans_flowtable(trans)->list); + nft_unregister_flowtable_net_hooks(net, + nft_trans_flowtable(trans)); + break; + case NFT_MSG_DELFLOWTABLE: + trans->ctx.table->use++; + nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); + nft_trans_destroy(trans); + break; } } @@ -5371,7 +6095,7 @@ int nft_chain_validate_hooks(const struct nft_chain *chain, if (nft_is_base_chain(chain)) { basechain = nft_base_chain(chain); - if ((1 << basechain->ops[0].hooknum) & hook_flags) + if ((1 << basechain->ops.hooknum) & hook_flags) return 0; return -EOPNOTSUPP; @@ -5859,8 +6583,7 @@ int __nft_release_basechain(struct nft_ctx *ctx) BUG_ON(!nft_is_base_chain(ctx->chain)); - nf_tables_unregister_hooks(ctx->net, ctx->chain->table, ctx->chain, - ctx->afi->nops); + nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { list_del(&rule->list); ctx->chain->use--; @@ -5877,6 +6600,7 @@ EXPORT_SYMBOL_GPL(__nft_release_basechain); /* Called by nft_unregister_afinfo() from __net_exit path, nfnl_lock is held. */ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi) { + struct nft_flowtable *flowtable, *nf; struct nft_table *table, *nt; struct nft_chain *chain, *nc; struct nft_object *obj, *ne; @@ -5889,8 +6613,10 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi) list_for_each_entry_safe(table, nt, &afi->tables, list) { list_for_each_entry(chain, &table->chains, list) - nf_tables_unregister_hooks(net, table, chain, - afi->nops); + nf_tables_unregister_hook(net, table, chain); + list_for_each_entry(flowtable, &table->flowtables, list) + nf_unregister_net_hooks(net, flowtable->ops, + flowtable->ops_len); /* No packets are walking on these chains anymore. */ ctx.table = table; list_for_each_entry(chain, &table->chains, list) { @@ -5901,6 +6627,11 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi) nf_tables_rule_destroy(&ctx, rule); } } + list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { + list_del(&flowtable->list); + table->use--; + nf_tables_flowtable_destroy(flowtable); + } list_for_each_entry_safe(set, ns, &table->sets, list) { list_del(&set->list); table->use--; @@ -5945,6 +6676,8 @@ static int __init nf_tables_module_init(void) if (err < 0) goto err3; + register_netdevice_notifier(&nf_tables_flowtable_notifier); + pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n"); return register_pernet_subsys(&nf_tables_net_ops); err3: @@ -5959,6 +6692,7 @@ static void __exit nf_tables_module_exit(void) { unregister_pernet_subsys(&nf_tables_net_ops); nfnetlink_subsys_unregister(&nf_tables_subsys); + unregister_netdevice_notifier(&nf_tables_flowtable_notifier); rcu_barrier(); nf_tables_core_module_exit(); kfree(info); diff --git a/net/netfilter/nf_tables_inet.c b/net/netfilter/nf_tables_inet.c index f713cc205669..58b9be7480bb 100644 --- a/net/netfilter/nf_tables_inet.c +++ b/net/netfilter/nf_tables_inet.c @@ -9,6 +9,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_tables.h> @@ -16,26 +17,31 @@ #include <net/netfilter/nf_tables_ipv6.h> #include <net/ip.h> -static void nft_inet_hook_ops_init(struct nf_hook_ops *ops, unsigned int n) +static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state) { - struct nft_af_info *afi; - - if (n == 1) - afi = &nft_af_ipv4; - else - afi = &nft_af_ipv6; - - ops->pf = afi->family; - if (afi->hooks[ops->hooknum]) - ops->hook = afi->hooks[ops->hooknum]; + struct nft_pktinfo pkt; + + nft_set_pktinfo(&pkt, skb, state); + + switch (state->pf) { + case NFPROTO_IPV4: + nft_set_pktinfo_ipv4(&pkt, skb); + break; + case NFPROTO_IPV6: + nft_set_pktinfo_ipv6(&pkt, skb); + break; + default: + break; + } + + return nft_do_chain(&pkt, priv); } static struct nft_af_info nft_af_inet __read_mostly = { .family = NFPROTO_INET, .nhooks = NF_INET_NUMHOOKS, .owner = THIS_MODULE, - .nops = 2, - .hook_ops_init = nft_inet_hook_ops_init, }; static int __net_init nf_tables_inet_init_net(struct net *net) @@ -76,6 +82,13 @@ static const struct nf_chain_type filter_inet = { (1 << NF_INET_FORWARD) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_POST_ROUTING), + .hooks = { + [NF_INET_LOCAL_IN] = nft_do_chain_inet, + [NF_INET_LOCAL_OUT] = nft_do_chain_inet, + [NF_INET_FORWARD] = nft_do_chain_inet, + [NF_INET_PRE_ROUTING] = nft_do_chain_inet, + [NF_INET_POST_ROUTING] = nft_do_chain_inet, + }, }; static int __init nf_tables_inet_init(void) diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c index 403432988313..42f6f6d42a6d 100644 --- a/net/netfilter/nf_tables_netdev.c +++ b/net/netfilter/nf_tables_netdev.c @@ -21,15 +21,17 @@ nft_do_chain_netdev(void *priv, struct sk_buff *skb, { struct nft_pktinfo pkt; + nft_set_pktinfo(&pkt, skb, state); + switch (skb->protocol) { case htons(ETH_P_IP): - nft_set_pktinfo_ipv4_validate(&pkt, skb, state); + nft_set_pktinfo_ipv4_validate(&pkt, skb); break; case htons(ETH_P_IPV6): - nft_set_pktinfo_ipv6_validate(&pkt, skb, state); + nft_set_pktinfo_ipv6_validate(&pkt, skb); break; default: - nft_set_pktinfo_unspec(&pkt, skb, state); + nft_set_pktinfo_unspec(&pkt, skb); break; } @@ -41,10 +43,6 @@ static struct nft_af_info nft_af_netdev __read_mostly = { .nhooks = NF_NETDEV_NUMHOOKS, .owner = THIS_MODULE, .flags = NFT_AF_NEEDS_DEV, - .nops = 1, - .hooks = { - [NF_NETDEV_INGRESS] = nft_do_chain_netdev, - }, }; static int nf_tables_netdev_init_net(struct net *net) @@ -81,6 +79,9 @@ static const struct nf_chain_type nft_filter_chain_netdev = { .family = NFPROTO_NETDEV, .owner = THIS_MODULE, .hook_mask = (1 << NF_NETDEV_INGRESS), + .hooks = { + [NF_NETDEV_INGRESS] = nft_do_chain_netdev, + }, }; static void nft_netdev_event(unsigned long event, struct net_device *dev, @@ -96,7 +97,7 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev, __nft_release_basechain(ctx); break; case NETDEV_CHANGENAME: - if (dev->ifindex != basechain->ops[0].dev->ifindex) + if (dev->ifindex != basechain->ops.dev->ifindex) return; strncpy(basechain->dev_name, dev->name, IFNAMSIZ); diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index c09b36755ed7..2db35f2d553d 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -941,23 +941,18 @@ static struct notifier_block nfqnl_dev_notifier = { .notifier_call = nfqnl_rcv_dev_event, }; -static unsigned int nfqnl_nf_hook_drop(struct net *net) +static void nfqnl_nf_hook_drop(struct net *net) { struct nfnl_queue_net *q = nfnl_queue_pernet(net); - unsigned int instances = 0; int i; for (i = 0; i < INSTANCE_BUCKETS; i++) { struct nfqnl_instance *inst; struct hlist_head *head = &q->instance_table[i]; - hlist_for_each_entry_rcu(inst, head, hlist) { + hlist_for_each_entry_rcu(inst, head, hlist) nfqnl_flush(inst, NULL, 0); - instances++; - } } - - return instances; } static int diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index c2945eb3397c..fa90a8402845 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -44,6 +44,7 @@ static void nft_cmp_eval(const struct nft_expr *expr, case NFT_CMP_LT: if (d == 0) goto mismatch; + /* fall through */ case NFT_CMP_LTE: if (d > 0) goto mismatch; @@ -51,6 +52,7 @@ static void nft_cmp_eval(const struct nft_expr *expr, case NFT_CMP_GT: if (d == 0) goto mismatch; + /* fall through */ case NFT_CMP_GTE: if (d < 0) goto mismatch; diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index b89f4f65b2a0..dcff0dc8d28b 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -169,7 +169,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); - const struct nf_hook_ops *ops = &basechain->ops[0]; + const struct nf_hook_ops *ops = &basechain->ops; par->hook_mask = 1 << ops->hooknum; } else { @@ -302,7 +302,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); - const struct nf_hook_ops *ops = &basechain->ops[0]; + const struct nf_hook_ops *ops = &basechain->ops; hook_mask = 1 << ops->hooknum; if (target->hooks && !(hook_mask & target->hooks)) @@ -383,7 +383,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); - const struct nf_hook_ops *ops = &basechain->ops[0]; + const struct nf_hook_ops *ops = &basechain->ops; par->hook_mask = 1 << ops->hooknum; } else { @@ -481,7 +481,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, if (nft_is_base_chain(ctx->chain)) { const struct nft_base_chain *basechain = nft_base_chain(ctx->chain); - const struct nf_hook_ops *ops = &basechain->ops[0]; + const struct nf_hook_ops *ops = &basechain->ops; hook_mask = 1 << ops->hooknum; if (match->hooks && !(hook_mask & match->hooks)) diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 66221ad891a9..ec0fd78231d8 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -184,7 +184,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (tb[NFTA_DYNSET_EXPR] != NULL) { if (!(set->flags & NFT_SET_EVAL)) return -EINVAL; - if (!(set->flags & NFT_SET_ANONYMOUS)) + if (!nft_set_is_anonymous(set)) return -EOPNOTSUPP; priv->expr = nft_expr_init(ctx, tb[NFTA_DYNSET_EXPR]); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c new file mode 100644 index 000000000000..dd38785dfed9 --- /dev/null +++ b/net/netfilter/nft_flow_offload.c @@ -0,0 +1,264 @@ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/netlink.h> +#include <linux/netfilter.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/netfilter/nf_tables.h> +#include <net/ip.h> /* for ipv4 options. */ +#include <net/netfilter/nf_tables.h> +#include <net/netfilter/nf_tables_core.h> +#include <net/netfilter/nf_conntrack_core.h> +#include <linux/netfilter/nf_conntrack_common.h> +#include <net/netfilter/nf_flow_table.h> + +struct nft_flow_offload { + struct nft_flowtable *flowtable; +}; + +static int nft_flow_route(const struct nft_pktinfo *pkt, + const struct nf_conn *ct, + struct nf_flow_route *route, + enum ip_conntrack_dir dir) +{ + struct dst_entry *this_dst = skb_dst(pkt->skb); + struct dst_entry *other_dst = NULL; + struct flowi fl; + + memset(&fl, 0, sizeof(fl)); + switch (nft_pf(pkt)) { + case NFPROTO_IPV4: + fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip; + break; + case NFPROTO_IPV6: + fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6; + break; + } + + nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt)); + if (!other_dst) + return -ENOENT; + + route->tuple[dir].dst = this_dst; + route->tuple[dir].ifindex = nft_in(pkt)->ifindex; + route->tuple[!dir].dst = other_dst; + route->tuple[!dir].ifindex = nft_out(pkt)->ifindex; + + return 0; +} + +static bool nft_flow_offload_skip(struct sk_buff *skb) +{ + struct ip_options *opt = &(IPCB(skb)->opt); + + if (unlikely(opt->optlen)) + return true; + if (skb_sec_path(skb)) + return true; + + return false; +} + +static void nft_flow_offload_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + struct nft_flow_offload *priv = nft_expr_priv(expr); + struct nf_flowtable *flowtable = &priv->flowtable->data; + enum ip_conntrack_info ctinfo; + struct nf_flow_route route; + struct flow_offload *flow; + enum ip_conntrack_dir dir; + struct nf_conn *ct; + int ret; + + if (nft_flow_offload_skip(pkt->skb)) + goto out; + + ct = nf_ct_get(pkt->skb, &ctinfo); + if (!ct) + goto out; + + switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + default: + goto out; + } + + if (test_bit(IPS_HELPER_BIT, &ct->status)) + goto out; + + if (ctinfo == IP_CT_NEW || + ctinfo == IP_CT_RELATED) + goto out; + + if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status)) + goto out; + + dir = CTINFO2DIR(ctinfo); + if (nft_flow_route(pkt, ct, &route, dir) < 0) + goto err_flow_route; + + flow = flow_offload_alloc(ct, &route); + if (!flow) + goto err_flow_alloc; + + ret = flow_offload_add(flowtable, flow); + if (ret < 0) + goto err_flow_add; + + return; + +err_flow_add: + flow_offload_free(flow); +err_flow_alloc: + dst_release(route.tuple[!dir].dst); +err_flow_route: + clear_bit(IPS_OFFLOAD_BIT, &ct->status); +out: + regs->verdict.code = NFT_BREAK; +} + +static int nft_flow_offload_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + unsigned int hook_mask = (1 << NF_INET_FORWARD); + + return nft_chain_validate_hooks(ctx->chain, hook_mask); +} + +static int nft_flow_offload_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_flow_offload *priv = nft_expr_priv(expr); + u8 genmask = nft_genmask_next(ctx->net); + struct nft_flowtable *flowtable; + + if (!tb[NFTA_FLOW_TABLE_NAME]) + return -EINVAL; + + flowtable = nf_tables_flowtable_lookup(ctx->table, + tb[NFTA_FLOW_TABLE_NAME], + genmask); + if (IS_ERR(flowtable)) + return PTR_ERR(flowtable); + + priv->flowtable = flowtable; + flowtable->use++; + + return nf_ct_netns_get(ctx->net, ctx->afi->family); +} + +static void nft_flow_offload_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + struct nft_flow_offload *priv = nft_expr_priv(expr); + + priv->flowtable->use--; + nf_ct_netns_put(ctx->net, ctx->afi->family); +} + +static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + struct nft_flow_offload *priv = nft_expr_priv(expr); + + if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name)) + goto nla_put_failure; + + return 0; + +nla_put_failure: + return -1; +} + +static struct nft_expr_type nft_flow_offload_type; +static const struct nft_expr_ops nft_flow_offload_ops = { + .type = &nft_flow_offload_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)), + .eval = nft_flow_offload_eval, + .init = nft_flow_offload_init, + .destroy = nft_flow_offload_destroy, + .validate = nft_flow_offload_validate, + .dump = nft_flow_offload_dump, +}; + +static struct nft_expr_type nft_flow_offload_type __read_mostly = { + .name = "flow_offload", + .ops = &nft_flow_offload_ops, + .maxattr = NFTA_FLOW_MAX, + .owner = THIS_MODULE, +}; + +static void flow_offload_iterate_cleanup(struct flow_offload *flow, void *data) +{ + struct net_device *dev = data; + + if (dev && flow->tuplehash[0].tuple.iifidx != dev->ifindex) + return; + + flow_offload_dead(flow); +} + +static void nft_flow_offload_iterate_cleanup(struct nf_flowtable *flowtable, + void *data) +{ + nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data); +} + +static int flow_offload_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (event != NETDEV_DOWN) + return NOTIFY_DONE; + + nft_flow_table_iterate(dev_net(dev), nft_flow_offload_iterate_cleanup, dev); + + return NOTIFY_DONE; +} + +static struct notifier_block flow_offload_netdev_notifier = { + .notifier_call = flow_offload_netdev_event, +}; + +static int __init nft_flow_offload_module_init(void) +{ + int err; + + register_netdevice_notifier(&flow_offload_netdev_notifier); + + err = nft_register_expr(&nft_flow_offload_type); + if (err < 0) + goto register_expr; + + return 0; + +register_expr: + unregister_netdevice_notifier(&flow_offload_netdev_notifier); + return err; +} + +static void __exit nft_flow_offload_module_exit(void) +{ + struct net *net; + + nft_unregister_expr(&nft_flow_offload_type); + unregister_netdevice_notifier(&flow_offload_netdev_notifier); + rtnl_lock(); + for_each_net(net) + nft_flow_table_iterate(net, nft_flow_offload_iterate_cleanup, NULL); + rtnl_unlock(); +} + +module_init(nft_flow_offload_module_init); +module_exit(nft_flow_offload_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>"); +MODULE_ALIAS_NFT_EXPR("flow_offload"); diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 5a60eb23a7ed..1a91e676f13e 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -210,6 +210,11 @@ void nft_meta_get_eval(const struct nft_expr *expr, *dest = prandom_u32_state(state); break; } +#ifdef CONFIG_XFRM + case NFT_META_SECPATH: + nft_reg_store8(dest, !!skb->sp); + break; +#endif default: WARN_ON(1); goto err; @@ -308,6 +313,11 @@ int nft_meta_get_init(const struct nft_ctx *ctx, prandom_init_once(&nft_prandom_state); len = sizeof(u32); break; +#ifdef CONFIG_XFRM + case NFT_META_SECPATH: + len = sizeof(u8); + break; +#endif default: return -EOPNOTSUPP; } @@ -318,6 +328,38 @@ int nft_meta_get_init(const struct nft_ctx *ctx, } EXPORT_SYMBOL_GPL(nft_meta_get_init); +static int nft_meta_get_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ +#ifdef CONFIG_XFRM + const struct nft_meta *priv = nft_expr_priv(expr); + unsigned int hooks; + + if (priv->key != NFT_META_SECPATH) + return 0; + + switch (ctx->afi->family) { + case NFPROTO_NETDEV: + hooks = 1 << NF_NETDEV_INGRESS; + break; + case NFPROTO_IPV4: + case NFPROTO_IPV6: + case NFPROTO_INET: + hooks = (1 << NF_INET_PRE_ROUTING) | + (1 << NF_INET_LOCAL_IN) | + (1 << NF_INET_FORWARD); + break; + default: + return -EOPNOTSUPP; + } + + return nft_chain_validate_hooks(ctx->chain, hooks); +#else + return 0; +#endif +} + int nft_meta_set_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) @@ -434,6 +476,7 @@ static const struct nft_expr_ops nft_meta_get_ops = { .eval = nft_meta_get_eval, .init = nft_meta_get_init, .dump = nft_meta_get_dump, + .validate = nft_meta_get_validate, }; static const struct nft_expr_ops nft_meta_set_ops = { diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c index a6b7d05aeacf..11a2071b6dd4 100644 --- a/net/netfilter/nft_rt.c +++ b/net/netfilter/nft_rt.c @@ -27,7 +27,7 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb { u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst); const struct sk_buff *skb = pkt->skb; - const struct nf_afinfo *ai; + struct dst_entry *dst = NULL; struct flowi fl; memset(&fl, 0, sizeof(fl)); @@ -43,15 +43,10 @@ static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skb break; } - ai = nf_get_afinfo(nft_pf(pkt)); - if (ai) { - struct dst_entry *dst = NULL; - - ai->route(nft_net(pkt), &dst, &fl, false); - if (dst) { - mtu = min(mtu, dst_mtu(dst)); - dst_release(dst); - } + nf_route(nft_net(pkt), &dst, &fl, false, nft_pf(pkt)); + if (dst) { + mtu = min(mtu, dst_mtu(dst)); + dst_release(dst); } if (mtu <= minlen || mtu > 0xffff) diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c new file mode 100644 index 000000000000..0b660c568156 --- /dev/null +++ b/net/netfilter/utils.c @@ -0,0 +1,90 @@ +#include <linux/kernel.h> +#include <linux/netfilter.h> +#include <linux/netfilter_ipv4.h> +#include <linux/netfilter_ipv6.h> +#include <net/netfilter/nf_queue.h> + +__sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, u_int8_t protocol, + unsigned short family) +{ + const struct nf_ipv6_ops *v6ops; + __sum16 csum = 0; + + switch (family) { + case AF_INET: + csum = nf_ip_checksum(skb, hook, dataoff, protocol); + break; + case AF_INET6: + v6ops = rcu_dereference(nf_ipv6_ops); + if (v6ops) + csum = v6ops->checksum(skb, hook, dataoff, protocol); + break; + } + + return csum; +} +EXPORT_SYMBOL_GPL(nf_checksum); + +__sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, + unsigned int dataoff, unsigned int len, + u_int8_t protocol, unsigned short family) +{ + const struct nf_ipv6_ops *v6ops; + __sum16 csum = 0; + + switch (family) { + case AF_INET: + csum = nf_ip_checksum_partial(skb, hook, dataoff, len, + protocol); + break; + case AF_INET6: + v6ops = rcu_dereference(nf_ipv6_ops); + if (v6ops) + csum = v6ops->checksum_partial(skb, hook, dataoff, len, + protocol); + break; + } + + return csum; +} +EXPORT_SYMBOL_GPL(nf_checksum_partial); + +int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, + bool strict, unsigned short family) +{ + const struct nf_ipv6_ops *v6ops; + int ret = 0; + + switch (family) { + case AF_INET: + ret = nf_ip_route(net, dst, fl, strict); + break; + case AF_INET6: + v6ops = rcu_dereference(nf_ipv6_ops); + if (v6ops) + ret = v6ops->route(net, dst, fl, strict); + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(nf_route); + +int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry) +{ + const struct nf_ipv6_ops *v6ops; + int ret = 0; + + switch (entry->state.pf) { + case AF_INET: + ret = nf_ip_reroute(skb, entry); + break; + case AF_INET6: + v6ops = rcu_dereference(nf_ipv6_ops); + if (v6ops) + ret = v6ops->reroute(skb, entry); + break; + } + return ret; +} diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 55802e97f906..10c19a3f4cbd 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1027,7 +1027,7 @@ void xt_free_table_info(struct xt_table_info *info) } EXPORT_SYMBOL(xt_free_table_info); -/* Find table by name, grabs mutex & ref. Returns NULL on error. */ +/* Find table by name, grabs mutex & ref. Returns ERR_PTR on error. */ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, const char *name) { @@ -1043,17 +1043,17 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, /* Table doesn't exist in this netns, re-try init */ list_for_each_entry(t, &init_net.xt.tables[af], list) { + int err; + if (strcmp(t->name, name)) continue; - if (!try_module_get(t->me)) { - mutex_unlock(&xt[af].mutex); - return NULL; - } - + if (!try_module_get(t->me)) + goto out; mutex_unlock(&xt[af].mutex); - if (t->table_init(net) != 0) { + err = t->table_init(net); + if (err < 0) { module_put(t->me); - return NULL; + return ERR_PTR(err); } found = t; @@ -1073,10 +1073,28 @@ struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, module_put(found->me); out: mutex_unlock(&xt[af].mutex); - return NULL; + return ERR_PTR(-ENOENT); } EXPORT_SYMBOL_GPL(xt_find_table_lock); +struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, + const char *name) +{ + struct xt_table *t = xt_find_table_lock(net, af, name); + +#ifdef CONFIG_MODULE + if (IS_ERR(t)) { + int err = request_module("%stable_%s", xt_prefix[af], name); + if (err) + return ERR_PTR(err); + t = xt_find_table_lock(net, af, name); + } +#endif + + return t; +} +EXPORT_SYMBOL_GPL(xt_request_find_table_lock); + void xt_table_unlock(struct xt_table *table) { mutex_unlock(&xt[table->af].mutex); @@ -1397,7 +1415,7 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, trav->curr = trav->curr->next; if (trav->curr != trav->head) break; - /* fallthru, _stop will unlock */ + /* fall through */ default: return NULL; } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 9dae4d665965..99bb8e410f22 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -48,7 +48,6 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net, unsigned int family) { struct flowi fl; - const struct nf_afinfo *ai; struct rtable *rt = NULL; u_int32_t mtu = ~0U; @@ -62,10 +61,8 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net, memset(fl6, 0, sizeof(*fl6)); fl6->daddr = ipv6_hdr(skb)->saddr; } - ai = nf_get_afinfo(family); - if (ai != NULL) - ai->route(net, (struct dst_entry **)&rt, &fl, false); + nf_route(net, (struct dst_entry **)&rt, &fl, false, family); if (rt != NULL) { mtu = dst_mtu(&rt->dst); dst_release(&rt->dst); diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index 3b2be2ae6987..911a7c0da504 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c @@ -36,7 +36,7 @@ MODULE_ALIAS("ip6t_addrtype"); static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, const struct in6_addr *addr, u16 mask) { - const struct nf_afinfo *afinfo; + const struct nf_ipv6_ops *v6ops; struct flowi6 flow; struct rt6_info *rt; u32 ret = 0; @@ -47,17 +47,14 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, if (dev) flow.flowi6_oif = dev->ifindex; - afinfo = nf_get_afinfo(NFPROTO_IPV6); - if (afinfo != NULL) { - const struct nf_ipv6_ops *v6ops; - + v6ops = nf_get_ipv6_ops(); + if (v6ops) { if (dev && (mask & XT_ADDRTYPE_LOCAL)) { - v6ops = nf_get_ipv6_ops(); - if (v6ops && v6ops->chk_addr(net, addr, dev, true)) + if (v6ops->chk_addr(net, addr, dev, true)) ret = XT_ADDRTYPE_LOCAL; } - route_err = afinfo->route(net, (struct dst_entry **)&rt, - flowi6_to_flowi(&flow), false); + route_err = v6ops->route(net, (struct dst_entry **)&rt, + flowi6_to_flowi(&flow), false); } else { route_err = 1; } diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 1f7fbd3c7e5a..06b090d8e901 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c @@ -55,21 +55,11 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) { - mm_segment_t oldfs = get_fs(); - int retval, fd; - if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) return -EINVAL; - set_fs(KERNEL_DS); - fd = bpf_obj_get_user(path, 0); - set_fs(oldfs); - if (fd < 0) - return fd; - - retval = __bpf_mt_check_fd(fd, ret); - sys_close(fd); - return retval; + *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER); + return PTR_ERR_OR_ZERO(*ret); } static int bpf_mt_check(const struct xt_mtchk_param *par) diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index a6214f235333..b1b17b9353e1 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -12,292 +12,30 @@ * GPL (C) 1999 Rusty Russell (rusty@rustcorp.com.au). */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/in.h> -#include <linux/in6.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/jhash.h> -#include <linux/slab.h> -#include <linux/list.h> -#include <linux/rbtree.h> + #include <linux/module.h> -#include <linux/random.h> #include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/netfilter/nf_conntrack_tcp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_connlimit.h> + #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_tuple.h> #include <net/netfilter/nf_conntrack_zones.h> - -#define CONNLIMIT_SLOTS 256U - -#ifdef CONFIG_LOCKDEP -#define CONNLIMIT_LOCK_SLOTS 8U -#else -#define CONNLIMIT_LOCK_SLOTS 256U -#endif - -#define CONNLIMIT_GC_MAX_NODES 8 - -/* we will save the tuples of all connections we care about */ -struct xt_connlimit_conn { - struct hlist_node node; - struct nf_conntrack_tuple tuple; -}; - -struct xt_connlimit_rb { - struct rb_node node; - struct hlist_head hhead; /* connections/hosts in same subnet */ - union nf_inet_addr addr; /* search key */ -}; - -static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp; - -struct xt_connlimit_data { - struct rb_root climit_root[CONNLIMIT_SLOTS]; -}; - -static u_int32_t connlimit_rnd __read_mostly; -static struct kmem_cache *connlimit_rb_cachep __read_mostly; -static struct kmem_cache *connlimit_conn_cachep __read_mostly; - -static inline unsigned int connlimit_iphash(__be32 addr) -{ - return jhash_1word((__force __u32)addr, - connlimit_rnd) % CONNLIMIT_SLOTS; -} - -static inline unsigned int -connlimit_iphash6(const union nf_inet_addr *addr) -{ - return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), - connlimit_rnd) % CONNLIMIT_SLOTS; -} - -static inline bool already_closed(const struct nf_conn *conn) -{ - if (nf_ct_protonum(conn) == IPPROTO_TCP) - return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT || - conn->proto.tcp.state == TCP_CONNTRACK_CLOSE; - else - return 0; -} - -static int -same_source(const union nf_inet_addr *addr, - const union nf_inet_addr *u3, u_int8_t family) -{ - if (family == NFPROTO_IPV4) - return ntohl(addr->ip) - ntohl(u3->ip); - - return memcmp(addr->ip6, u3->ip6, sizeof(addr->ip6)); -} - -static bool add_hlist(struct hlist_head *head, - const struct nf_conntrack_tuple *tuple, - const union nf_inet_addr *addr) -{ - struct xt_connlimit_conn *conn; - - conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC); - if (conn == NULL) - return false; - conn->tuple = *tuple; - hlist_add_head(&conn->node, head); - return true; -} - -static unsigned int check_hlist(struct net *net, - struct hlist_head *head, - const struct nf_conntrack_tuple *tuple, - const struct nf_conntrack_zone *zone, - bool *addit) -{ - const struct nf_conntrack_tuple_hash *found; - struct xt_connlimit_conn *conn; - struct hlist_node *n; - struct nf_conn *found_ct; - unsigned int length = 0; - - *addit = true; - - /* check the saved connections */ - hlist_for_each_entry_safe(conn, n, head, node) { - found = nf_conntrack_find_get(net, zone, &conn->tuple); - if (found == NULL) { - hlist_del(&conn->node); - kmem_cache_free(connlimit_conn_cachep, conn); - continue; - } - - found_ct = nf_ct_tuplehash_to_ctrack(found); - - if (nf_ct_tuple_equal(&conn->tuple, tuple)) { - /* - * Just to be sure we have it only once in the list. - * We should not see tuples twice unless someone hooks - * this into a table without "-p tcp --syn". - */ - *addit = false; - } else if (already_closed(found_ct)) { - /* - * we do not care about connections which are - * closed already -> ditch it - */ - nf_ct_put(found_ct); - hlist_del(&conn->node); - kmem_cache_free(connlimit_conn_cachep, conn); - continue; - } - - nf_ct_put(found_ct); - length++; - } - - return length; -} - -static void tree_nodes_free(struct rb_root *root, - struct xt_connlimit_rb *gc_nodes[], - unsigned int gc_count) -{ - struct xt_connlimit_rb *rbconn; - - while (gc_count) { - rbconn = gc_nodes[--gc_count]; - rb_erase(&rbconn->node, root); - kmem_cache_free(connlimit_rb_cachep, rbconn); - } -} - -static unsigned int -count_tree(struct net *net, struct rb_root *root, - const struct nf_conntrack_tuple *tuple, - const union nf_inet_addr *addr, - u8 family, const struct nf_conntrack_zone *zone) -{ - struct xt_connlimit_rb *gc_nodes[CONNLIMIT_GC_MAX_NODES]; - struct rb_node **rbnode, *parent; - struct xt_connlimit_rb *rbconn; - struct xt_connlimit_conn *conn; - unsigned int gc_count; - bool no_gc = false; - - restart: - gc_count = 0; - parent = NULL; - rbnode = &(root->rb_node); - while (*rbnode) { - int diff; - bool addit; - - rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node); - - parent = *rbnode; - diff = same_source(addr, &rbconn->addr, family); - if (diff < 0) { - rbnode = &((*rbnode)->rb_left); - } else if (diff > 0) { - rbnode = &((*rbnode)->rb_right); - } else { - /* same source network -> be counted! */ - unsigned int count; - count = check_hlist(net, &rbconn->hhead, tuple, zone, &addit); - - tree_nodes_free(root, gc_nodes, gc_count); - if (!addit) - return count; - - if (!add_hlist(&rbconn->hhead, tuple, addr)) - return 0; /* hotdrop */ - - return count + 1; - } - - if (no_gc || gc_count >= ARRAY_SIZE(gc_nodes)) - continue; - - /* only used for GC on hhead, retval and 'addit' ignored */ - check_hlist(net, &rbconn->hhead, tuple, zone, &addit); - if (hlist_empty(&rbconn->hhead)) - gc_nodes[gc_count++] = rbconn; - } - - if (gc_count) { - no_gc = true; - tree_nodes_free(root, gc_nodes, gc_count); - /* tree_node_free before new allocation permits - * allocator to re-use newly free'd object. - * - * This is a rare event; in most cases we will find - * existing node to re-use. (or gc_count is 0). - */ - goto restart; - } - - /* no match, need to insert new node */ - rbconn = kmem_cache_alloc(connlimit_rb_cachep, GFP_ATOMIC); - if (rbconn == NULL) - return 0; - - conn = kmem_cache_alloc(connlimit_conn_cachep, GFP_ATOMIC); - if (conn == NULL) { - kmem_cache_free(connlimit_rb_cachep, rbconn); - return 0; - } - - conn->tuple = *tuple; - rbconn->addr = *addr; - - INIT_HLIST_HEAD(&rbconn->hhead); - hlist_add_head(&conn->node, &rbconn->hhead); - - rb_link_node(&rbconn->node, parent, rbnode); - rb_insert_color(&rbconn->node, root); - return 1; -} - -static int count_them(struct net *net, - struct xt_connlimit_data *data, - const struct nf_conntrack_tuple *tuple, - const union nf_inet_addr *addr, - u_int8_t family, - const struct nf_conntrack_zone *zone) -{ - struct rb_root *root; - int count; - u32 hash; - - if (family == NFPROTO_IPV6) - hash = connlimit_iphash6(addr); - else - hash = connlimit_iphash(addr->ip); - root = &data->climit_root[hash]; - - spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]); - - count = count_tree(net, root, tuple, addr, family, zone); - - spin_unlock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]); - - return count; -} +#include <net/netfilter/nf_conntrack_count.h> static bool connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct net *net = xt_net(par); const struct xt_connlimit_info *info = par->matchinfo; - union nf_inet_addr addr; struct nf_conntrack_tuple tuple; const struct nf_conntrack_tuple *tuple_ptr = &tuple; const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; enum ip_conntrack_info ctinfo; const struct nf_conn *ct; unsigned int connections; + u32 key[5]; ct = nf_ct_get(skb, &ctinfo); if (ct != NULL) { @@ -310,6 +48,7 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) if (xt_family(par) == NFPROTO_IPV6) { const struct ipv6hdr *iph = ipv6_hdr(skb); + union nf_inet_addr addr; unsigned int i; memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ? @@ -317,22 +56,24 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) for (i = 0; i < ARRAY_SIZE(addr.ip6); ++i) addr.ip6[i] &= info->mask.ip6[i]; + memcpy(key, &addr, sizeof(addr.ip6)); + key[4] = zone->id; } else { const struct iphdr *iph = ip_hdr(skb); - addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ? + key[0] = (info->flags & XT_CONNLIMIT_DADDR) ? iph->daddr : iph->saddr; - addr.ip &= info->mask.ip; + key[0] &= info->mask.ip; + key[1] = zone->id; } - connections = count_them(net, info->data, tuple_ptr, &addr, - xt_family(par), zone); + connections = nf_conncount_count(net, info->data, key, + xt_family(par), tuple_ptr, zone); if (connections == 0) /* kmalloc failed, drop it entirely */ goto hotdrop; - return (connections > info->limit) ^ - !!(info->flags & XT_CONNLIMIT_INVERT); + return (connections > info->limit) ^ !!(info->flags & XT_CONNLIMIT_INVERT); hotdrop: par->hotdrop = true; @@ -342,61 +83,27 @@ connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) static int connlimit_mt_check(const struct xt_mtchk_param *par) { struct xt_connlimit_info *info = par->matchinfo; - unsigned int i; - int ret; + unsigned int keylen; - net_get_random_once(&connlimit_rnd, sizeof(connlimit_rnd)); - - ret = nf_ct_netns_get(par->net, par->family); - if (ret < 0) { - pr_info("cannot load conntrack support for " - "address family %u\n", par->family); - return ret; - } + keylen = sizeof(u32); + if (par->family == NFPROTO_IPV6) + keylen += sizeof(struct in6_addr); + else + keylen += sizeof(struct in_addr); /* init private data */ - info->data = kmalloc(sizeof(struct xt_connlimit_data), GFP_KERNEL); - if (info->data == NULL) { - nf_ct_netns_put(par->net, par->family); - return -ENOMEM; - } - - for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i) - info->data->climit_root[i] = RB_ROOT; + info->data = nf_conncount_init(par->net, par->family, keylen); + if (IS_ERR(info->data)) + return PTR_ERR(info->data); return 0; } -static void destroy_tree(struct rb_root *r) -{ - struct xt_connlimit_conn *conn; - struct xt_connlimit_rb *rbconn; - struct hlist_node *n; - struct rb_node *node; - - while ((node = rb_first(r)) != NULL) { - rbconn = rb_entry(node, struct xt_connlimit_rb, node); - - rb_erase(node, r); - - hlist_for_each_entry_safe(conn, n, &rbconn->hhead, node) - kmem_cache_free(connlimit_conn_cachep, conn); - - kmem_cache_free(connlimit_rb_cachep, rbconn); - } -} - static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_connlimit_info *info = par->matchinfo; - unsigned int i; - - nf_ct_netns_put(par->net, par->family); - - for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i) - destroy_tree(&info->data->climit_root[i]); - kfree(info->data); + nf_conncount_destroy(par->net, par->family, info->data); } static struct xt_match connlimit_mt_reg __read_mostly = { @@ -413,40 +120,12 @@ static struct xt_match connlimit_mt_reg __read_mostly = { static int __init connlimit_mt_init(void) { - int ret, i; - - BUILD_BUG_ON(CONNLIMIT_LOCK_SLOTS > CONNLIMIT_SLOTS); - BUILD_BUG_ON((CONNLIMIT_SLOTS % CONNLIMIT_LOCK_SLOTS) != 0); - - for (i = 0; i < CONNLIMIT_LOCK_SLOTS; ++i) - spin_lock_init(&xt_connlimit_locks[i]); - - connlimit_conn_cachep = kmem_cache_create("xt_connlimit_conn", - sizeof(struct xt_connlimit_conn), - 0, 0, NULL); - if (!connlimit_conn_cachep) - return -ENOMEM; - - connlimit_rb_cachep = kmem_cache_create("xt_connlimit_rb", - sizeof(struct xt_connlimit_rb), - 0, 0, NULL); - if (!connlimit_rb_cachep) { - kmem_cache_destroy(connlimit_conn_cachep); - return -ENOMEM; - } - ret = xt_register_match(&connlimit_mt_reg); - if (ret != 0) { - kmem_cache_destroy(connlimit_conn_cachep); - kmem_cache_destroy(connlimit_rb_cachep); - } - return ret; + return xt_register_match(&connlimit_mt_reg); } static void __exit connlimit_mt_exit(void) { xt_unregister_match(&connlimit_mt_reg); - kmem_cache_destroy(connlimit_conn_cachep); - kmem_cache_destroy(connlimit_rb_cachep); } module_init(connlimit_mt_init); diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c index 64285702afd5..16b6b11ee83f 100644 --- a/net/netfilter/xt_set.c +++ b/net/netfilter/xt_set.c @@ -39,13 +39,17 @@ match_set(ip_set_id_t index, const struct sk_buff *skb, return inv; } -#define ADT_OPT(n, f, d, fs, cfs, t) \ -struct ip_set_adt_opt n = { \ - .family = f, \ - .dim = d, \ - .flags = fs, \ - .cmdflags = cfs, \ - .ext.timeout = t, \ +#define ADT_OPT(n, f, d, fs, cfs, t, p, b, po, bo) \ +struct ip_set_adt_opt n = { \ + .family = f, \ + .dim = d, \ + .flags = fs, \ + .cmdflags = cfs, \ + .ext.timeout = t, \ + .ext.packets = p, \ + .ext.bytes = b, \ + .ext.packets_op = po, \ + .ext.bytes_op = bo, \ } /* Revision 0 interface: backward compatible with netfilter/iptables */ @@ -56,7 +60,8 @@ set_match_v0(const struct sk_buff *skb, struct xt_action_param *par) const struct xt_set_info_match_v0 *info = par->matchinfo; ADT_OPT(opt, xt_family(par), info->match_set.u.compat.dim, - info->match_set.u.compat.flags, 0, UINT_MAX); + info->match_set.u.compat.flags, 0, UINT_MAX, + 0, 0, 0, 0); return match_set(info->match_set.index, skb, par, &opt, info->match_set.u.compat.flags & IPSET_INV_MATCH); @@ -119,7 +124,8 @@ set_match_v1(const struct sk_buff *skb, struct xt_action_param *par) const struct xt_set_info_match_v1 *info = par->matchinfo; ADT_OPT(opt, xt_family(par), info->match_set.dim, - info->match_set.flags, 0, UINT_MAX); + info->match_set.flags, 0, UINT_MAX, + 0, 0, 0, 0); if (opt.flags & IPSET_RETURN_NOMATCH) opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH; @@ -161,45 +167,21 @@ set_match_v1_destroy(const struct xt_mtdtor_param *par) /* Revision 3 match */ static bool -match_counter0(u64 counter, const struct ip_set_counter_match0 *info) -{ - switch (info->op) { - case IPSET_COUNTER_NONE: - return true; - case IPSET_COUNTER_EQ: - return counter == info->value; - case IPSET_COUNTER_NE: - return counter != info->value; - case IPSET_COUNTER_LT: - return counter < info->value; - case IPSET_COUNTER_GT: - return counter > info->value; - } - return false; -} - -static bool set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v3 *info = par->matchinfo; - int ret; ADT_OPT(opt, xt_family(par), info->match_set.dim, - info->match_set.flags, info->flags, UINT_MAX); + info->match_set.flags, info->flags, UINT_MAX, + info->packets.value, info->bytes.value, + info->packets.op, info->bytes.op); if (info->packets.op != IPSET_COUNTER_NONE || info->bytes.op != IPSET_COUNTER_NONE) opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; - ret = match_set(info->match_set.index, skb, par, &opt, - info->match_set.flags & IPSET_INV_MATCH); - - if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) - return ret; - - if (!match_counter0(opt.ext.packets, &info->packets)) - return false; - return match_counter0(opt.ext.bytes, &info->bytes); + return match_set(info->match_set.index, skb, par, &opt, + info->match_set.flags & IPSET_INV_MATCH); } #define set_match_v3_checkentry set_match_v1_checkentry @@ -208,45 +190,21 @@ set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) /* Revision 4 match */ static bool -match_counter(u64 counter, const struct ip_set_counter_match *info) -{ - switch (info->op) { - case IPSET_COUNTER_NONE: - return true; - case IPSET_COUNTER_EQ: - return counter == info->value; - case IPSET_COUNTER_NE: - return counter != info->value; - case IPSET_COUNTER_LT: - return counter < info->value; - case IPSET_COUNTER_GT: - return counter > info->value; - } - return false; -} - -static bool set_match_v4(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v4 *info = par->matchinfo; - int ret; ADT_OPT(opt, xt_family(par), info->match_set.dim, - info->match_set.flags, info->flags, UINT_MAX); + info->match_set.flags, info->flags, UINT_MAX, + info->packets.value, info->bytes.value, + info->packets.op, info->bytes.op); if (info->packets.op != IPSET_COUNTER_NONE || info->bytes.op != IPSET_COUNTER_NONE) opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; - ret = match_set(info->match_set.index, skb, par, &opt, - info->match_set.flags & IPSET_INV_MATCH); - - if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) - return ret; - - if (!match_counter(opt.ext.packets, &info->packets)) - return false; - return match_counter(opt.ext.bytes, &info->bytes); + return match_set(info->match_set.index, skb, par, &opt, + info->match_set.flags & IPSET_INV_MATCH); } #define set_match_v4_checkentry set_match_v1_checkentry @@ -260,9 +218,11 @@ set_target_v0(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_set_info_target_v0 *info = par->targinfo; ADT_OPT(add_opt, xt_family(par), info->add_set.u.compat.dim, - info->add_set.u.compat.flags, 0, UINT_MAX); + info->add_set.u.compat.flags, 0, UINT_MAX, + 0, 0, 0, 0); ADT_OPT(del_opt, xt_family(par), info->del_set.u.compat.dim, - info->del_set.u.compat.flags, 0, UINT_MAX); + info->del_set.u.compat.flags, 0, UINT_MAX, + 0, 0, 0, 0); if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); @@ -333,9 +293,11 @@ set_target_v1(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_set_info_target_v1 *info = par->targinfo; ADT_OPT(add_opt, xt_family(par), info->add_set.dim, - info->add_set.flags, 0, UINT_MAX); + info->add_set.flags, 0, UINT_MAX, + 0, 0, 0, 0); ADT_OPT(del_opt, xt_family(par), info->del_set.dim, - info->del_set.flags, 0, UINT_MAX); + info->del_set.flags, 0, UINT_MAX, + 0, 0, 0, 0); if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); @@ -402,9 +364,11 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) const struct xt_set_info_target_v2 *info = par->targinfo; ADT_OPT(add_opt, xt_family(par), info->add_set.dim, - info->add_set.flags, info->flags, info->timeout); + info->add_set.flags, info->flags, info->timeout, + 0, 0, 0, 0); ADT_OPT(del_opt, xt_family(par), info->del_set.dim, - info->del_set.flags, 0, UINT_MAX); + info->del_set.flags, 0, UINT_MAX, + 0, 0, 0, 0); /* Normalize to fit into jiffies */ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && @@ -432,11 +396,14 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par) int ret; ADT_OPT(add_opt, xt_family(par), info->add_set.dim, - info->add_set.flags, info->flags, info->timeout); + info->add_set.flags, info->flags, info->timeout, + 0, 0, 0, 0); ADT_OPT(del_opt, xt_family(par), info->del_set.dim, - info->del_set.flags, 0, UINT_MAX); + info->del_set.flags, 0, UINT_MAX, + 0, 0, 0, 0); ADT_OPT(map_opt, xt_family(par), info->map_set.dim, - info->map_set.flags, 0, UINT_MAX); + info->map_set.flags, 0, UINT_MAX, + 0, 0, 0, 0); /* Normalize to fit into jiffies */ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index b27c5c6d9cab..62f36cc938ca 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1266,14 +1266,14 @@ static int parse_nat(const struct nlattr *attr, /* Do not allow flags if no type is given. */ if (info->range.flags) { OVS_NLERR(log, - "NAT flags may be given only when NAT range (SRC or DST) is also specified.\n" + "NAT flags may be given only when NAT range (SRC or DST) is also specified." ); return -EINVAL; } info->nat = OVS_CT_NAT; /* NAT existing connections. */ } else if (!info->commit) { OVS_NLERR(log, - "NAT attributes may be specified only when CT COMMIT flag is also specified.\n" + "NAT attributes may be specified only when CT COMMIT flag is also specified." ); return -EINVAL; } diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 3e7747549f90..bb95c43aae76 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -16,7 +16,6 @@ * 02110-1301, USA */ -#include <linux/hardirq.h> #include <linux/if_vlan.h> #include <linux/kernel.h> #include <linux/netdevice.h> diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index da215e5c1399..ee7aa0ba3a67 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -247,12 +247,13 @@ static int packet_direct_xmit(struct sk_buff *skb) struct sk_buff *orig_skb = skb; struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; + bool again = false; if (unlikely(!netif_running(dev) || !netif_carrier_ok(dev))) goto drop; - skb = validate_xmit_skb_list(skb, dev); + skb = validate_xmit_skb_list(skb, dev, &again); if (skb != orig_skb) goto drop; diff --git a/net/rds/bind.c b/net/rds/bind.c index 75d43dc8e96b..5aa3a64aa4f0 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c @@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) rs, &addr, (int)ntohs(*port)); break; } else { + rs->rs_bound_addr = 0; rds_sock_put(rs); ret = -ENOMEM; break; diff --git a/net/rds/cong.c b/net/rds/cong.c index 8398fee7c866..8d19fd25dce3 100644 --- a/net/rds/cong.c +++ b/net/rds/cong.c @@ -219,7 +219,11 @@ void rds_cong_queue_updates(struct rds_cong_map *map) spin_lock_irqsave(&rds_cong_lock, flags); list_for_each_entry(conn, &map->m_conn_list, c_map_item) { - if (!test_and_set_bit(0, &conn->c_map_queued)) { + struct rds_conn_path *cp = &conn->c_path[0]; + + rcu_read_lock(); + if (!test_and_set_bit(0, &conn->c_map_queued) && + !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { rds_stats_inc(s_cong_update_queued); /* We cannot inline the call to rds_send_xmit() here * for two reasons (both pertaining to a TCP transport): @@ -235,9 +239,9 @@ void rds_cong_queue_updates(struct rds_cong_map *map) * therefore trigger warnings. * Defer the xmit to rds_send_worker() instead. */ - queue_delayed_work(rds_wq, - &conn->c_path[0].cp_send_w, 0); + queue_delayed_work(rds_wq, &cp->cp_send_w, 0); } + rcu_read_unlock(); } spin_unlock_irqrestore(&rds_cong_lock, flags); diff --git a/net/rds/connection.c b/net/rds/connection.c index 6492c0b608a4..b10c0ef36d8d 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -366,8 +366,6 @@ void rds_conn_shutdown(struct rds_conn_path *cp) * to the conn hash, so we never trigger a reconnect on this * conn - the reconnect is always triggered by the active peer. */ cancel_delayed_work_sync(&cp->cp_conn_w); - if (conn->c_destroy_in_prog) - return; rcu_read_lock(); if (!hlist_unhashed(&conn->c_hash_node)) { rcu_read_unlock(); @@ -384,10 +382,13 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp) { struct rds_message *rm, *rtmp; + set_bit(RDS_DESTROY_PENDING, &cp->cp_flags); + if (!cp->cp_transport_data) return; /* make sure lingering queued work won't try to ref the conn */ + synchronize_rcu(); cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); @@ -405,6 +406,11 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp) if (cp->cp_xmit_rm) rds_message_put(cp->cp_xmit_rm); + WARN_ON(delayed_work_pending(&cp->cp_send_w)); + WARN_ON(delayed_work_pending(&cp->cp_recv_w)); + WARN_ON(delayed_work_pending(&cp->cp_conn_w)); + WARN_ON(work_pending(&cp->cp_down_w)); + cp->cp_conn->c_trans->conn_free(cp->cp_transport_data); } @@ -426,7 +432,6 @@ void rds_conn_destroy(struct rds_connection *conn) "%pI4\n", conn, &conn->c_laddr, &conn->c_faddr); - conn->c_destroy_in_prog = 1; /* Ensure conn will not be scheduled for reconnect */ spin_lock_irq(&rds_conn_lock); hlist_del_init_rcu(&conn->c_hash_node); @@ -685,10 +690,13 @@ void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy) { atomic_set(&cp->cp_state, RDS_CONN_ERROR); - if (!destroy && cp->cp_conn->c_destroy_in_prog) + rcu_read_lock(); + if (!destroy && test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { + rcu_read_unlock(); return; - + } queue_work(rds_wq, &cp->cp_down_w); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rds_conn_path_drop); @@ -705,9 +713,15 @@ EXPORT_SYMBOL_GPL(rds_conn_drop); */ void rds_conn_path_connect_if_down(struct rds_conn_path *cp) { + rcu_read_lock(); + if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { + rcu_read_unlock(); + return; + } if (rds_conn_path_state(cp) == RDS_CONN_DOWN && !test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags)) queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down); diff --git a/net/rds/rdma.c b/net/rds/rdma.c index bc2f1e0977d6..634cfcb7bba6 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -525,6 +525,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args) local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; + if (args->nr_local == 0) + return -EINVAL; + /* figure out the number of pages in the vector */ for (i = 0; i < args->nr_local; i++) { if (copy_from_user(&vec, &local_vec[i], @@ -874,6 +877,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, err: if (page) put_page(page); + rm->atomic.op_active = 0; kfree(rm->atomic.op_notifier); return ret; diff --git a/net/rds/rds.h b/net/rds/rds.h index d09f6c1facb4..374ae83b60d4 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -88,6 +88,7 @@ enum { #define RDS_RECONNECT_PENDING 1 #define RDS_IN_XMIT 2 #define RDS_RECV_REFILL 3 +#define RDS_DESTROY_PENDING 4 /* Max number of multipaths per RDS connection. Must be a power of 2 */ #define RDS_MPATH_WORKERS 8 @@ -139,8 +140,7 @@ struct rds_connection { __be32 c_faddr; unsigned int c_loopback:1, c_ping_triggered:1, - c_destroy_in_prog:1, - c_pad_to_32:29; + c_pad_to_32:30; int c_npaths; struct rds_connection *c_passive; struct rds_transport *c_trans; diff --git a/net/rds/send.c b/net/rds/send.c index b52cdc8ae428..d3e32d1f3c7d 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -162,6 +162,12 @@ restart: goto out; } + if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { + release_in_xmit(cp); + ret = -ENETUNREACH; /* dont requeue send work */ + goto out; + } + /* * we record the send generation after doing the xmit acquire. * if someone else manages to jump in and do some work, we'll use @@ -437,7 +443,12 @@ over_batch: !list_empty(&cp->cp_send_queue)) && !raced) { if (batch_count < send_batch_count) goto restart; - queue_delayed_work(rds_wq, &cp->cp_send_w, 1); + rcu_read_lock(); + if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) + ret = -ENETUNREACH; + else + queue_delayed_work(rds_wq, &cp->cp_send_w, 1); + rcu_read_unlock(); } else if (raced) { rds_stats_inc(s_send_lock_queue_raced); } @@ -1009,6 +1020,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes) continue; if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { + if (cmsg->cmsg_len < + CMSG_LEN(sizeof(struct rds_rdma_args))) + return -EINVAL; args = CMSG_DATA(cmsg); *rdma_bytes += args->remote_vec.bytes; } @@ -1148,6 +1162,11 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) else cpath = &conn->c_path[0]; + if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) { + ret = -EAGAIN; + goto out; + } + rds_conn_path_connect_if_down(cpath); ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); @@ -1187,9 +1206,17 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) rds_stats_inc(s_send_queued); ret = rds_send_xmit(cpath); - if (ret == -ENOMEM || ret == -EAGAIN) - queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); - + if (ret == -ENOMEM || ret == -EAGAIN) { + ret = 0; + rcu_read_lock(); + if (test_bit(RDS_DESTROY_PENDING, &cpath->cp_flags)) + ret = -ENETUNREACH; + else + queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); + rcu_read_unlock(); + } + if (ret) + goto out; rds_message_put(rm); return payload_len; @@ -1267,7 +1294,10 @@ rds_send_probe(struct rds_conn_path *cp, __be16 sport, rds_stats_inc(s_send_pong); /* schedule the send work on rds_wq */ - queue_delayed_work(rds_wq, &cp->cp_send_w, 1); + rcu_read_lock(); + if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) + queue_delayed_work(rds_wq, &cp->cp_send_w, 1); + rcu_read_unlock(); rds_message_put(rm); return 0; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 39f502d47969..2e554ef6d75f 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -270,16 +270,33 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr) return -EADDRNOTAVAIL; } +static void rds_tcp_conn_free(void *arg) +{ + struct rds_tcp_connection *tc = arg; + unsigned long flags; + + rdsdebug("freeing tc %p\n", tc); + + spin_lock_irqsave(&rds_tcp_conn_lock, flags); + if (!tc->t_tcp_node_detached) + list_del(&tc->t_tcp_node); + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); + + kmem_cache_free(rds_tcp_conn_slab, tc); +} + static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_tcp_connection *tc; - int i; + int i, j; + int ret = 0; for (i = 0; i < RDS_MPATH_WORKERS; i++) { tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp); - if (!tc) - return -ENOMEM; - + if (!tc) { + ret = -ENOMEM; + break; + } mutex_init(&tc->t_conn_path_lock); tc->t_sock = NULL; tc->t_tinc = NULL; @@ -290,27 +307,17 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) tc->t_cpath = &conn->c_path[i]; spin_lock_irq(&rds_tcp_conn_lock); + tc->t_tcp_node_detached = false; list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list); spin_unlock_irq(&rds_tcp_conn_lock); rdsdebug("rds_conn_path [%d] tc %p\n", i, conn->c_path[i].cp_transport_data); } - - return 0; -} - -static void rds_tcp_conn_free(void *arg) -{ - struct rds_tcp_connection *tc = arg; - unsigned long flags; - rdsdebug("freeing tc %p\n", tc); - - spin_lock_irqsave(&rds_tcp_conn_lock, flags); - if (!tc->t_tcp_node_detached) - list_del(&tc->t_tcp_node); - spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); - - kmem_cache_free(rds_tcp_conn_slab, tc); + if (ret) { + for (j = 0; j < i; j++) + rds_tcp_conn_free(conn->c_path[j].cp_transport_data); + } + return ret; } static bool list_has_conn(struct list_head *list, struct rds_connection *conn) diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index 46f74dad0e16..534c67aeb20f 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -170,7 +170,7 @@ void rds_tcp_conn_path_shutdown(struct rds_conn_path *cp) cp->cp_conn, tc, sock); if (sock) { - if (cp->cp_conn->c_destroy_in_prog) + if (test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) rds_tcp_set_linger(sock); sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN); lock_sock(sock->sk); diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index e006ef8e6d40..dd707b9e73e5 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -321,8 +321,12 @@ void rds_tcp_data_ready(struct sock *sk) ready = tc->t_orig_data_ready; rds_tcp_stats_inc(s_tcp_data_ready_calls); - if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) - queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); + if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) { + rcu_read_lock(); + if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) + queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); + rcu_read_unlock(); + } out: read_unlock_bh(&sk->sk_callback_lock); ready(sk); diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index dc860d1bb608..73c74763ca72 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -202,8 +202,11 @@ void rds_tcp_write_space(struct sock *sk) tc->t_last_seen_una = rds_tcp_snd_una(tc); rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked); - if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) + rcu_read_lock(); + if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf && + !test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) queue_delayed_work(rds_wq, &cp->cp_send_w, 0); + rcu_read_unlock(); out: read_unlock_bh(&sk->sk_callback_lock); diff --git a/net/rds/threads.c b/net/rds/threads.c index f121daa402c8..eb76db1360b0 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -87,8 +87,12 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr) cp->cp_reconnect_jiffies = 0; set_bit(0, &cp->cp_conn->c_map_queued); - queue_delayed_work(rds_wq, &cp->cp_send_w, 0); - queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); + rcu_read_lock(); + if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) { + queue_delayed_work(rds_wq, &cp->cp_send_w, 0); + queue_delayed_work(rds_wq, &cp->cp_recv_w, 0); + } + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rds_connect_path_complete); @@ -133,7 +137,10 @@ void rds_queue_reconnect(struct rds_conn_path *cp) set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags); if (cp->cp_reconnect_jiffies == 0) { cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies; - queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); + rcu_read_lock(); + if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) + queue_delayed_work(rds_wq, &cp->cp_conn_w, 0); + rcu_read_unlock(); return; } @@ -141,8 +148,11 @@ void rds_queue_reconnect(struct rds_conn_path *cp) rdsdebug("%lu delay %lu ceil conn %p for %pI4 -> %pI4\n", rand % cp->cp_reconnect_jiffies, cp->cp_reconnect_jiffies, conn, &conn->c_laddr, &conn->c_faddr); - queue_delayed_work(rds_wq, &cp->cp_conn_w, - rand % cp->cp_reconnect_jiffies); + rcu_read_lock(); + if (!test_bit(RDS_DESTROY_PENDING, &cp->cp_flags)) + queue_delayed_work(rds_wq, &cp->cp_conn_w, + rand % cp->cp_reconnect_jiffies); + rcu_read_unlock(); cp->cp_reconnect_jiffies = min(cp->cp_reconnect_jiffies * 2, rds_sysctl_reconnect_max_jiffies); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 9d632e92cad0..b56986d41c87 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -159,7 +159,7 @@ static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, if (action == TC_ACT_SHOT) this_cpu_ptr(gact->common.cpu_qstats)->drops += packets; - tm->lastuse = lastuse; + tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 37e5e4decbd6..e6ff88f72900 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -231,7 +231,7 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, struct tcf_t *tm = &m->tcf_tm; _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); - tm->lastuse = lastuse; + tm->lastuse = max_t(u64, tm->lastuse, lastuse); } static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 4591b87eaab5..6708b6953bfa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -373,6 +373,8 @@ void tcf_block_put(struct tcf_block *block) { struct tcf_block_ext_info ei = {0, }; + if (!block) + return; tcf_block_put_ext(block, block->q, &ei); } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 3a3a1da6b071..8a04c36e579f 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -807,11 +807,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, tcm->tcm_info = refcount_read(&q->refcnt); if (nla_put_string(skb, TCA_KIND, q->ops->id)) goto nla_put_failure; - if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) - goto nla_put_failure; if (q->ops->dump && q->ops->dump(q, skb) < 0) goto nla_put_failure; - + if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) + goto nla_put_failure; qlen = qdisc_qlen_sum(q); stab = rtnl_dereference(q->stab); @@ -1402,10 +1401,8 @@ replay: return -EINVAL; } q = qdisc_lookup(dev, tcm->tcm_handle); - if (!q) { - NL_SET_ERR_MSG(extack, "No qdisc found for specified handle"); + if (!q) goto create_n_graft; - } if (n->nlmsg_flags & NLM_F_EXCL) { NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override"); return -EEXIST; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 10aaa3b615ce..a883c501d5ec 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -32,6 +32,7 @@ #include <net/pkt_sched.h> #include <net/dst.h> #include <trace/events/qdisc.h> +#include <net/xfrm.h> /* Qdisc to use by default */ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; @@ -111,10 +112,16 @@ static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { - __skb_queue_head(&q->gso_skb, skb); - q->qstats.requeues++; - qdisc_qstats_backlog_inc(q, skb); - q->q.qlen++; /* it's still part of the queue */ + while (skb) { + struct sk_buff *next = skb->next; + + __skb_queue_tail(&q->gso_skb, skb); + q->qstats.requeues++; + qdisc_qstats_backlog_inc(q, skb); + q->q.qlen++; /* it's still part of the queue */ + + skb = next; + } __netif_schedule(q); return 0; @@ -125,12 +132,19 @@ static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q) spinlock_t *lock = qdisc_lock(q); spin_lock(lock); - __skb_queue_tail(&q->gso_skb, skb); + while (skb) { + struct sk_buff *next = skb->next; + + __skb_queue_tail(&q->gso_skb, skb); + + qdisc_qstats_cpu_requeues_inc(q); + qdisc_qstats_cpu_backlog_inc(q, skb); + qdisc_qstats_cpu_qlen_inc(q); + + skb = next; + } spin_unlock(lock); - qdisc_qstats_cpu_requeues_inc(q); - qdisc_qstats_cpu_backlog_inc(q, skb); - qdisc_qstats_cpu_qlen_inc(q); __netif_schedule(q); return 0; @@ -230,6 +244,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, /* skb in gso_skb were already validated */ *validate = false; + if (xfrm_offload(skb)) + *validate = true; /* check the reason of requeuing without tx lock first */ txq = skb_get_tx_queue(txq->dev, skb); if (!netif_xmit_frozen_or_stopped(txq)) { @@ -285,6 +301,7 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, spinlock_t *root_lock, bool validate) { int ret = NETDEV_TX_BUSY; + bool again = false; /* And release qdisc */ if (root_lock) @@ -292,7 +309,17 @@ bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, /* Note that we validate skb (GSO, checksum, ...) outside of locks */ if (validate) - skb = validate_xmit_skb_list(skb, dev); + skb = validate_xmit_skb_list(skb, dev, &again); + +#ifdef CONFIG_XFRM_OFFLOAD + if (unlikely(again)) { + if (root_lock) + spin_lock(root_lock); + + dev_requeue_skb(skb, q); + return false; + } +#endif if (likely(skb)) { HARD_TX_LOCK(dev, txq, smp_processor_id()); @@ -1247,6 +1274,8 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, if (!tp_head) { RCU_INIT_POINTER(*miniqp->p_miniq, NULL); + /* Wait for flying RCU callback before it is freed. */ + rcu_barrier_bh(); return; } @@ -1262,7 +1291,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, rcu_assign_pointer(*miniqp->p_miniq, miniq); if (miniq_old) - /* This is counterpart of the rcu barrier above. We need to + /* This is counterpart of the rcu barriers above. We need to * block potential new user of miniq_old until all readers * are not seeing it. */ diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index ec0bd36e09a9..0af1c1254e0b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -157,7 +157,6 @@ static int red_offload(struct Qdisc *sch, bool enable) .handle = sch->handle, .parent = sch->parent, }; - int err; if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) return -EOPNOTSUPP; @@ -172,14 +171,7 @@ static int red_offload(struct Qdisc *sch, bool enable) opt.command = TC_RED_DESTROY; } - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); - - if (!err && enable) - sch->flags |= TCQ_F_OFFLOADED; - else - sch->flags &= ~TCQ_F_OFFLOADED; - - return err; + return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); } static void red_destroy(struct Qdisc *sch) @@ -297,12 +289,22 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt) .stats.qstats = &sch->qstats, }, }; + int err; - if (!(sch->flags & TCQ_F_OFFLOADED)) + sch->flags &= ~TCQ_F_OFFLOADED; + + if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) return 0; - return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, - &hw_stats); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, + &hw_stats); + if (err == -EOPNOTSUPP) + return 0; + + if (!err) + sch->flags |= TCQ_F_OFFLOADED; + + return err; } static int red_dump(struct Qdisc *sch, struct sk_buff *skb) @@ -342,32 +344,24 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct red_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - struct tc_red_xstats st = { - .early = q->stats.prob_drop + q->stats.forced_drop, - .pdrop = q->stats.pdrop, - .other = q->stats.other, - .marked = q->stats.prob_mark + q->stats.forced_mark, - }; + struct tc_red_xstats st = {0}; if (sch->flags & TCQ_F_OFFLOADED) { - struct red_stats hw_stats = {0}; struct tc_red_qopt_offload hw_stats_request = { .command = TC_RED_XSTATS, .handle = sch->handle, .parent = sch->parent, { - .xstats = &hw_stats, + .xstats = &q->stats, }, }; - if (!dev->netdev_ops->ndo_setup_tc(dev, - TC_SETUP_QDISC_RED, - &hw_stats_request)) { - st.early += hw_stats.prob_drop + hw_stats.forced_drop; - st.pdrop += hw_stats.pdrop; - st.other += hw_stats.other; - st.marked += hw_stats.prob_mark + hw_stats.forced_mark; - } + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, + &hw_stats_request); } + st.early = q->stats.prob_drop + q->stats.forced_drop; + st.pdrop = q->stats.pdrop; + st.other = q->stats.other; + st.marked = q->stats.prob_mark + q->stats.forced_mark; return gnet_stats_copy_app(d, &st, sizeof(st)); } diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index d9c04dc1b3f3..c740b189d4ba 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -37,18 +37,6 @@ menuconfig IP_SCTP if IP_SCTP -config NET_SCTPPROBE - tristate "SCTP: Association probing" - depends on PROC_FS && KPROBES - ---help--- - This module allows for capturing the changes to SCTP association - state in response to incoming packets. It is used for debugging - SCTP congestion control algorithms. If you don't understand - what was just said, you don't need it: say N. - - To compile this code as a module, choose M here: the - module will be called sctp_probe. - config SCTP_DBG_OBJCNT bool "SCTP: Debug object counts" depends on PROC_FS diff --git a/net/sctp/Makefile b/net/sctp/Makefile index 54bd9c1a8aa1..6776582ec449 100644 --- a/net/sctp/Makefile +++ b/net/sctp/Makefile @@ -4,7 +4,6 @@ # obj-$(CONFIG_IP_SCTP) += sctp.o -obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o obj-$(CONFIG_INET_SCTP_DIAG) += sctp_diag.o sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ @@ -16,8 +15,6 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ offload.o stream_sched.o stream_sched_prio.o \ stream_sched_rr.o stream_interleave.o -sctp_probe-y := probe.o - sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o sctp-$(CONFIG_PROC_FS) += proc.o sctp-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sctp/input.c b/net/sctp/input.c index 621b5ca3fd1c..141c9c466ec1 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -399,20 +399,24 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, return; } - if (t->param_flags & SPP_PMTUD_ENABLE) { - /* Update transports view of the MTU */ - sctp_transport_update_pmtu(t, pmtu); - - /* Update association pmtu. */ - sctp_assoc_sync_pmtu(asoc); - } + if (!(t->param_flags & SPP_PMTUD_ENABLE)) + /* We can't allow retransmitting in such case, as the + * retransmission would be sized just as before, and thus we + * would get another icmp, and retransmit again. + */ + return; - /* Retransmit with the new pmtu setting. - * Normally, if PMTU discovery is disabled, an ICMP Fragmentation - * Needed will never be sent, but if a message was sent before - * PMTU discovery was disabled that was larger than the PMTU, it - * would not be fragmented, so it must be re-transmitted fragmented. + /* Update transports view of the MTU. Return if no update was needed. + * If an update wasn't needed/possible, it also doesn't make sense to + * try to retransmit now. */ + if (!sctp_transport_update_pmtu(t, pmtu)) + return; + + /* Update association pmtu. */ + sctp_assoc_sync_pmtu(asoc); + + /* Retransmit with the new pmtu setting. */ sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); } diff --git a/net/sctp/probe.c b/net/sctp/probe.c deleted file mode 100644 index 1280f85a598d..000000000000 --- a/net/sctp/probe.c +++ /dev/null @@ -1,244 +0,0 @@ -/* - * sctp_probe - Observe the SCTP flow with kprobes. - * - * The idea for this came from Werner Almesberger's umlsim - * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> - * - * Modified for SCTP from Stephen Hemminger's code - * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/kernel.h> -#include <linux/kprobes.h> -#include <linux/socket.h> -#include <linux/sctp.h> -#include <linux/proc_fs.h> -#include <linux/vmalloc.h> -#include <linux/module.h> -#include <linux/kfifo.h> -#include <linux/time.h> -#include <net/net_namespace.h> - -#include <net/sctp/sctp.h> -#include <net/sctp/sm.h> - -MODULE_SOFTDEP("pre: sctp"); -MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); -MODULE_DESCRIPTION("SCTP snooper"); -MODULE_LICENSE("GPL"); - -static int port __read_mostly = 0; -MODULE_PARM_DESC(port, "Port to match (0=all)"); -module_param(port, int, 0); - -static unsigned int fwmark __read_mostly = 0; -MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)"); -module_param(fwmark, uint, 0); - -static int bufsize __read_mostly = 64 * 1024; -MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); -module_param(bufsize, int, 0); - -static int full __read_mostly = 1; -MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); -module_param(full, int, 0); - -static const char procname[] = "sctpprobe"; - -static struct { - struct kfifo fifo; - spinlock_t lock; - wait_queue_head_t wait; - struct timespec64 tstart; -} sctpw; - -static __printf(1, 2) void printl(const char *fmt, ...) -{ - va_list args; - int len; - char tbuf[256]; - - va_start(args, fmt); - len = vscnprintf(tbuf, sizeof(tbuf), fmt, args); - va_end(args); - - kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); - wake_up(&sctpw.wait); -} - -static int sctpprobe_open(struct inode *inode, struct file *file) -{ - kfifo_reset(&sctpw.fifo); - ktime_get_ts64(&sctpw.tstart); - - return 0; -} - -static ssize_t sctpprobe_read(struct file *file, char __user *buf, - size_t len, loff_t *ppos) -{ - int error = 0, cnt = 0; - unsigned char *tbuf; - - if (!buf) - return -EINVAL; - - if (len == 0) - return 0; - - tbuf = vmalloc(len); - if (!tbuf) - return -ENOMEM; - - error = wait_event_interruptible(sctpw.wait, - kfifo_len(&sctpw.fifo) != 0); - if (error) - goto out_free; - - cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); - error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; - -out_free: - vfree(tbuf); - - return error ? error : cnt; -} - -static const struct file_operations sctpprobe_fops = { - .owner = THIS_MODULE, - .open = sctpprobe_open, - .read = sctpprobe_read, - .llseek = noop_llseek, -}; - -static enum sctp_disposition jsctp_sf_eat_sack( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) -{ - struct sctp_chunk *chunk = arg; - struct sk_buff *skb = chunk->skb; - struct sctp_transport *sp; - static __u32 lcwnd = 0; - struct timespec64 now; - - sp = asoc->peer.primary_path; - - if (((port == 0 && fwmark == 0) || - asoc->peer.port == port || - ep->base.bind_addr.port == port || - (fwmark > 0 && skb->mark == fwmark)) && - (full || sp->cwnd != lcwnd)) { - lcwnd = sp->cwnd; - - ktime_get_ts64(&now); - now = timespec64_sub(now, sctpw.tstart); - - printl("%lu.%06lu ", (unsigned long) now.tv_sec, - (unsigned long) now.tv_nsec / NSEC_PER_USEC); - - printl("%p %5d %5d %5d %8d %5d ", asoc, - ep->base.bind_addr.port, asoc->peer.port, - asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); - - list_for_each_entry(sp, &asoc->peer.transport_addr_list, - transports) { - if (sp == asoc->peer.primary_path) - printl("*"); - - printl("%pISc %2u %8u %8u %8u %8u %8u ", - &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh, - sp->flight_size, sp->partial_bytes_acked, - sp->pathmtu); - } - printl("\n"); - } - - jprobe_return(); - return 0; -} - -static struct jprobe sctp_recv_probe = { - .kp = { - .symbol_name = "sctp_sf_eat_sack_6_2", - }, - .entry = jsctp_sf_eat_sack, -}; - -static __init int sctp_setup_jprobe(void) -{ - int ret = register_jprobe(&sctp_recv_probe); - - if (ret) { - if (request_module("sctp")) - goto out; - ret = register_jprobe(&sctp_recv_probe); - } - -out: - return ret; -} - -static __init int sctpprobe_init(void) -{ - int ret = -ENOMEM; - - /* Warning: if the function signature of sctp_sf_eat_sack_6_2, - * has been changed, you also have to change the signature of - * jsctp_sf_eat_sack, otherwise you end up right here! - */ - BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2, - jsctp_sf_eat_sack) == 0); - - init_waitqueue_head(&sctpw.wait); - spin_lock_init(&sctpw.lock); - if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) - return ret; - - if (!proc_create(procname, S_IRUSR, init_net.proc_net, - &sctpprobe_fops)) - goto free_kfifo; - - ret = sctp_setup_jprobe(); - if (ret) - goto remove_proc; - - pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n", - port, fwmark, bufsize); - return 0; - -remove_proc: - remove_proc_entry(procname, init_net.proc_net); -free_kfifo: - kfifo_free(&sctpw.fifo); - return ret; -} - -static __exit void sctpprobe_exit(void) -{ - kfifo_free(&sctpw.fifo); - remove_proc_entry(procname, init_net.proc_net); - unregister_jprobe(&sctp_recv_probe); -} - -module_init(sctpprobe_init); -module_exit(sctpprobe_exit); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 541f34735346..eb7905ffe5f2 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -59,6 +59,9 @@ #include <net/sctp/sm.h> #include <net/sctp/structs.h> +#define CREATE_TRACE_POINTS +#include <trace/events/sctp.h> + static struct sctp_packet *sctp_abort_pkt_new( struct net *net, const struct sctp_endpoint *ep, @@ -3219,6 +3222,8 @@ enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net, struct sctp_sackhdr *sackh; __u32 ctsn; + trace_sctp_probe(ep, asoc, chunk); + if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index aadcd4244d9b..a5e2150ab013 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4569,7 +4569,7 @@ static int sctp_init_sock(struct sock *sk) SCTP_DBG_OBJCNT_INC(sock); local_bh_disable(); - percpu_counter_inc(&sctp_sockets_allocated); + sk_sockets_allocated_inc(sk); sock_prot_inuse_add(net, sk->sk_prot, 1); /* Nothing can fail after this block, otherwise @@ -4613,7 +4613,7 @@ static void sctp_destroy_sock(struct sock *sk) } sctp_endpoint_free(sp->ep); local_bh_disable(); - percpu_counter_dec(&sctp_sockets_allocated); + sk_sockets_allocated_dec(sk); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); local_bh_enable(); } diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 06b644dd858c..cedf672487f9 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -156,9 +156,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, sctp_stream_outq_migrate(stream, NULL, outcnt); sched->sched_all(stream); - i = sctp_stream_alloc_out(stream, outcnt, gfp); - if (i) - return i; + ret = sctp_stream_alloc_out(stream, outcnt, gfp); + if (ret) + goto out; stream->outcnt = outcnt; for (i = 0; i < stream->outcnt; i++) @@ -171,19 +171,17 @@ in: if (!incnt) goto out; - i = sctp_stream_alloc_in(stream, incnt, gfp); - if (i) { - ret = -ENOMEM; - goto free; + ret = sctp_stream_alloc_in(stream, incnt, gfp); + if (ret) { + sched->free(stream); + kfree(stream->out); + stream->out = NULL; + stream->outcnt = 0; + goto out; } stream->incnt = incnt; - goto out; -free: - sched->free(stream); - kfree(stream->out); - stream->out = NULL; out: return ret; } diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 1e5a22430cf5..47f82bd794d9 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -248,28 +248,37 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) +bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst = sctp_transport_dst_check(t); + bool change = true; if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { - pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", - __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); - /* Use default minimum segment size and disable - * pmtu discovery on this transport. - */ - t->pathmtu = SCTP_DEFAULT_MINSEGMENT; - } else { - t->pathmtu = pmtu; + pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n", + __func__, pmtu, SCTP_DEFAULT_MINSEGMENT); + /* Use default minimum segment instead */ + pmtu = SCTP_DEFAULT_MINSEGMENT; } + pmtu = SCTP_TRUNC4(pmtu); if (dst) { dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu); dst = sctp_transport_dst_check(t); } - if (!dst) + if (!dst) { t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk); + dst = t->dst; + } + + if (dst) { + /* Re-fetch, as under layers may have a higher minimum size */ + pmtu = SCTP_TRUNC4(dst_mtu(dst)); + change = t->pathmtu != pmtu; + } + t->pathmtu = pmtu; + + return change; } /* Caches the dst entry and source address for a transport's destination diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index c5fda15ba319..1fdab5c4eda8 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -401,7 +401,7 @@ void strp_data_ready(struct strparser *strp) * allows a thread in BH context to safely check if the process * lock is held. In this case, if the lock is held, queue work. */ - if (sock_owned_by_user(strp->sk)) { + if (sock_owned_by_user_nocheck(strp->sk)) { queue_work(strp_wq, &strp->work); return; } diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 47ec121574ce..c8001471da6c 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -324,6 +324,7 @@ restart: if (res) { pr_warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res); + kfree(b); return -EINVAL; } @@ -347,8 +348,10 @@ restart: if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); - if (tipc_mon_create(net, bearer_id)) + if (tipc_mon_create(net, bearer_id)) { + bearer_disable(net, b); return -ENOMEM; + } pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", name, diff --git a/net/tipc/core.h b/net/tipc/core.h index 964342689f2c..20b21af2ff14 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -49,7 +49,6 @@ #include <linux/uaccess.h> #include <linux/interrupt.h> #include <linux/atomic.h> -#include <asm/hardirq.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/list.h> diff --git a/net/tipc/group.c b/net/tipc/group.c index 7ebbdeb2a90e..497ee34bfab9 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -49,8 +49,6 @@ #define ADV_ACTIVE (ADV_UNIT * 12) enum mbr_state { - MBR_QUARANTINED, - MBR_DISCOVERED, MBR_JOINING, MBR_PUBLISHED, MBR_JOINED, @@ -64,8 +62,7 @@ enum mbr_state { struct tipc_member { struct rb_node tree_node; struct list_head list; - struct list_head congested; - struct sk_buff *event_msg; + struct list_head small_win; struct sk_buff_head deferredq; struct tipc_group *group; u32 node; @@ -77,21 +74,18 @@ struct tipc_member { u16 bc_rcv_nxt; u16 bc_syncpt; u16 bc_acked; - bool usr_pending; }; struct tipc_group { struct rb_root members; - struct list_head congested; + struct list_head small_win; struct list_head pending; struct list_head active; - struct list_head reclaiming; struct tipc_nlist dests; struct net *net; int subid; u32 type; u32 instance; - u32 domain; u32 scope; u32 portid; u16 member_cnt; @@ -101,15 +95,32 @@ struct tipc_group { u16 bc_ackers; bool loopback; bool events; + bool open; }; static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m, int mtyp, struct sk_buff_head *xmitq); +bool tipc_group_is_open(struct tipc_group *grp) +{ + return grp->open; +} + +static void tipc_group_open(struct tipc_member *m, bool *wakeup) +{ + *wakeup = false; + if (list_empty(&m->small_win)) + return; + list_del_init(&m->small_win); + m->group->open = true; + *wakeup = true; +} + static void tipc_group_decr_active(struct tipc_group *grp, struct tipc_member *m) { - if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING) + if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING || + m->state == MBR_REMITTED) grp->active_cnt--; } @@ -136,14 +147,14 @@ u16 tipc_group_bc_snd_nxt(struct tipc_group *grp) return grp->bc_snd_nxt; } -static bool tipc_group_is_enabled(struct tipc_member *m) +static bool tipc_group_is_receiver(struct tipc_member *m) { - return m->state != MBR_QUARANTINED && m->state != MBR_LEAVING; + return m && m->state != MBR_JOINING && m->state != MBR_LEAVING; } -static bool tipc_group_is_receiver(struct tipc_member *m) +static bool tipc_group_is_sender(struct tipc_member *m) { - return m && m->state >= MBR_JOINED; + return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED; } u32 tipc_group_exclude(struct tipc_group *grp) @@ -161,6 +172,8 @@ int tipc_group_size(struct tipc_group *grp) struct tipc_group *tipc_group_create(struct net *net, u32 portid, struct tipc_group_req *mreq) { + u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS; + bool global = mreq->scope != TIPC_NODE_SCOPE; struct tipc_group *grp; u32 type = mreq->type; @@ -168,25 +181,40 @@ struct tipc_group *tipc_group_create(struct net *net, u32 portid, if (!grp) return NULL; tipc_nlist_init(&grp->dests, tipc_own_addr(net)); - INIT_LIST_HEAD(&grp->congested); + INIT_LIST_HEAD(&grp->small_win); INIT_LIST_HEAD(&grp->active); INIT_LIST_HEAD(&grp->pending); - INIT_LIST_HEAD(&grp->reclaiming); grp->members = RB_ROOT; grp->net = net; grp->portid = portid; - grp->domain = addr_domain(net, mreq->scope); grp->type = type; grp->instance = mreq->instance; grp->scope = mreq->scope; grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK; grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS; - if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, &grp->subid)) + filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE; + if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0, + filter, &grp->subid)) return grp; kfree(grp); return NULL; } +void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf) +{ + struct rb_root *tree = &grp->members; + struct tipc_member *m, *tmp; + struct sk_buff_head xmitq; + + skb_queue_head_init(&xmitq); + rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) { + tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq); + tipc_group_update_member(m, 0); + } + tipc_node_distr_xmit(net, &xmitq); + *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); +} + void tipc_group_delete(struct net *net, struct tipc_group *grp) { struct rb_root *tree = &grp->members; @@ -232,7 +260,7 @@ static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp, struct tipc_member *m; m = tipc_group_find_member(grp, node, port); - if (m && tipc_group_is_enabled(m)) + if (m && tipc_group_is_receiver(m)) return m; return NULL; } @@ -277,7 +305,7 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, u32 node, u32 port, - int state) + u32 instance, int state) { struct tipc_member *m; @@ -285,11 +313,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, if (!m) return NULL; INIT_LIST_HEAD(&m->list); - INIT_LIST_HEAD(&m->congested); + INIT_LIST_HEAD(&m->small_win); __skb_queue_head_init(&m->deferredq); m->group = grp; m->node = node; m->port = port; + m->instance = instance; m->bc_acked = grp->bc_snd_nxt - 1; grp->member_cnt++; tipc_group_add_to_tree(grp, m); @@ -298,9 +327,10 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, return m; } -void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port) +void tipc_group_add_member(struct tipc_group *grp, u32 node, + u32 port, u32 instance) { - tipc_group_create_member(grp, node, port, MBR_DISCOVERED); + tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED); } static void tipc_group_delete_member(struct tipc_group *grp, @@ -314,7 +344,7 @@ static void tipc_group_delete_member(struct tipc_group *grp, grp->bc_ackers--; list_del_init(&m->list); - list_del_init(&m->congested); + list_del_init(&m->small_win); tipc_group_decr_active(grp, m); /* If last member on a node, remove node from dest list */ @@ -343,7 +373,7 @@ void tipc_group_update_member(struct tipc_member *m, int len) struct tipc_group *grp = m->group; struct tipc_member *_m, *tmp; - if (!tipc_group_is_enabled(m)) + if (!tipc_group_is_receiver(m)) return; m->window -= len; @@ -351,16 +381,14 @@ void tipc_group_update_member(struct tipc_member *m, int len) if (m->window >= ADV_IDLE) return; - list_del_init(&m->congested); + list_del_init(&m->small_win); - /* Sort member into congested members' list */ - list_for_each_entry_safe(_m, tmp, &grp->congested, congested) { - if (m->window > _m->window) - continue; - list_add_tail(&m->congested, &_m->congested); - return; + /* Sort member into small_window members' list */ + list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) { + if (_m->window > m->window) + break; } - list_add_tail(&m->congested, &grp->congested); + list_add_tail(&m->small_win, &_m->small_win); } void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack) @@ -368,18 +396,20 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack) u16 prev = grp->bc_snd_nxt - 1; struct tipc_member *m; struct rb_node *n; + u16 ackers = 0; for (n = rb_first(&grp->members); n; n = rb_next(n)) { m = container_of(n, struct tipc_member, tree_node); - if (tipc_group_is_enabled(m)) { + if (tipc_group_is_receiver(m)) { tipc_group_update_member(m, len); m->bc_acked = prev; + ackers++; } } /* Mark number of acknowledges to expect, if any */ if (ack) - grp->bc_ackers = grp->member_cnt; + grp->bc_ackers = ackers; grp->bc_snd_nxt++; } @@ -391,20 +421,20 @@ bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport, int adv, state; m = tipc_group_find_dest(grp, dnode, dport); - *mbr = m; - if (!m) + if (!tipc_group_is_receiver(m)) { + *mbr = NULL; return false; - if (m->usr_pending) - return true; + } + *mbr = m; + if (m->window >= len) return false; - m->usr_pending = true; + + grp->open = false; /* If not fully advertised, do it now to prevent mutual blocking */ adv = m->advertised; state = m->state; - if (state < MBR_JOINED) - return true; if (state == MBR_JOINED && adv == ADV_IDLE) return true; if (state == MBR_ACTIVE && adv == ADV_ACTIVE) @@ -422,13 +452,14 @@ bool tipc_group_bc_cong(struct tipc_group *grp, int len) struct tipc_member *m = NULL; /* If prev bcast was replicast, reject until all receivers have acked */ - if (grp->bc_ackers) + if (grp->bc_ackers) { + grp->open = false; return true; - - if (list_empty(&grp->congested)) + } + if (list_empty(&grp->small_win)) return false; - m = list_first_entry(&grp->congested, struct tipc_member, congested); + m = list_first_entry(&grp->small_win, struct tipc_member, small_win); if (m->window >= len) return false; @@ -483,7 +514,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq, goto drop; m = tipc_group_find_member(grp, node, port); - if (!tipc_group_is_receiver(m)) + if (!tipc_group_is_sender(m)) goto drop; if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt)) @@ -560,7 +591,7 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, int max_active = grp->max_active; int reclaim_limit = max_active * 3 / 4; int active_cnt = grp->active_cnt; - struct tipc_member *m, *rm; + struct tipc_member *m, *rm, *pm; m = tipc_group_find_member(grp, node, port); if (!m) @@ -570,24 +601,34 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, switch (m->state) { case MBR_JOINED: - /* Reclaim advertised space from least active member */ - if (!list_empty(active) && active_cnt >= reclaim_limit) { + /* First, decide if member can go active */ + if (active_cnt <= max_active) { + m->state = MBR_ACTIVE; + list_add_tail(&m->list, active); + grp->active_cnt++; + tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); + } else { + m->state = MBR_PENDING; + list_add_tail(&m->list, &grp->pending); + } + + if (active_cnt < reclaim_limit) + break; + + /* Reclaim from oldest active member, if possible */ + if (!list_empty(active)) { rm = list_first_entry(active, struct tipc_member, list); rm->state = MBR_RECLAIMING; - list_move_tail(&rm->list, &grp->reclaiming); + list_del_init(&rm->list); tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq); - } - /* If max active, become pending and wait for reclaimed space */ - if (active_cnt >= max_active) { - m->state = MBR_PENDING; - list_add_tail(&m->list, &grp->pending); break; } - /* Otherwise become active */ - m->state = MBR_ACTIVE; - list_add_tail(&m->list, &grp->active); - grp->active_cnt++; - /* Fall through */ + /* Nobody to reclaim from; - revert oldest pending to JOINED */ + pm = list_first_entry(&grp->pending, struct tipc_member, list); + list_del_init(&pm->list); + pm->state = MBR_JOINED; + tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq); + break; case MBR_ACTIVE: if (!list_is_last(&m->list, &grp->active)) list_move_tail(&m->list, &grp->active); @@ -599,13 +640,23 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, if (m->advertised > ADV_IDLE) break; m->state = MBR_JOINED; + grp->active_cnt--; if (m->advertised < ADV_IDLE) { pr_warn_ratelimited("Rcv unexpected msg after REMIT\n"); tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); } + + if (list_empty(&grp->pending)) + return; + + /* Set oldest pending member to active and advertise */ + pm = list_first_entry(&grp->pending, struct tipc_member, list); + pm->state = MBR_ACTIVE; + list_move_tail(&pm->list, &grp->active); + grp->active_cnt++; + tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq); break; case MBR_RECLAIMING: - case MBR_DISCOVERED: case MBR_JOINING: case MBR_LEAVING: default: @@ -613,6 +664,40 @@ void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, } } +static void tipc_group_create_event(struct tipc_group *grp, + struct tipc_member *m, + u32 event, u16 seqno, + struct sk_buff_head *inputq) +{ u32 dnode = tipc_own_addr(grp->net); + struct tipc_event evt; + struct sk_buff *skb; + struct tipc_msg *hdr; + + evt.event = event; + evt.found_lower = m->instance; + evt.found_upper = m->instance; + evt.port.ref = m->port; + evt.port.node = m->node; + evt.s.seq.type = grp->type; + evt.s.seq.lower = m->instance; + evt.s.seq.upper = m->instance; + + skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT, + GROUP_H_SIZE, sizeof(evt), dnode, m->node, + grp->portid, m->port, 0); + if (!skb) + return; + + hdr = buf_msg(skb); + msg_set_nametype(hdr, grp->type); + msg_set_grp_evt(hdr, event); + msg_set_dest_droppable(hdr, true); + msg_set_grp_bc_seqno(hdr, seqno); + memcpy(msg_data(hdr), &evt, sizeof(evt)); + TIPC_SKB_CB(skb)->orig_member = m->instance; + __skb_queue_tail(inputq, skb); +} + static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m, int mtyp, struct sk_buff_head *xmitq) { @@ -658,107 +743,95 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup, u32 node = msg_orignode(hdr); u32 port = msg_origport(hdr); struct tipc_member *m, *pm; - struct tipc_msg *ehdr; u16 remitted, in_flight; if (!grp) return; + if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net)) + return; + m = tipc_group_find_member(grp, node, port); switch (msg_type(hdr)) { case GRP_JOIN_MSG: if (!m) m = tipc_group_create_member(grp, node, port, - MBR_QUARANTINED); + 0, MBR_JOINING); if (!m) return; m->bc_syncpt = msg_grp_bc_syncpt(hdr); m->bc_rcv_nxt = m->bc_syncpt; m->window += msg_adv_win(hdr); - /* Wait until PUBLISH event is received */ - if (m->state == MBR_DISCOVERED) { - m->state = MBR_JOINING; - } else if (m->state == MBR_PUBLISHED) { - m->state = MBR_JOINED; - *usr_wakeup = true; - m->usr_pending = false; - tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); - ehdr = buf_msg(m->event_msg); - msg_set_grp_bc_seqno(ehdr, m->bc_syncpt); - __skb_queue_tail(inputq, m->event_msg); - } - list_del_init(&m->congested); + /* Wait until PUBLISH event is received if necessary */ + if (m->state != MBR_PUBLISHED) + return; + + /* Member can be taken into service */ + m->state = MBR_JOINED; + tipc_group_open(m, usr_wakeup); tipc_group_update_member(m, 0); + tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); + tipc_group_create_event(grp, m, TIPC_PUBLISHED, + m->bc_syncpt, inputq); return; case GRP_LEAVE_MSG: if (!m) return; m->bc_syncpt = msg_grp_bc_syncpt(hdr); list_del_init(&m->list); - list_del_init(&m->congested); - *usr_wakeup = true; - - /* Wait until WITHDRAW event is received */ - if (m->state != MBR_LEAVING) { - tipc_group_decr_active(grp, m); - m->state = MBR_LEAVING; - return; - } - /* Otherwise deliver already received WITHDRAW event */ - ehdr = buf_msg(m->event_msg); - msg_set_grp_bc_seqno(ehdr, m->bc_syncpt); - __skb_queue_tail(inputq, m->event_msg); + tipc_group_open(m, usr_wakeup); + tipc_group_decr_active(grp, m); + m->state = MBR_LEAVING; + tipc_group_create_event(grp, m, TIPC_WITHDRAWN, + m->bc_syncpt, inputq); return; case GRP_ADV_MSG: if (!m) return; m->window += msg_adv_win(hdr); - *usr_wakeup = m->usr_pending; - m->usr_pending = false; - list_del_init(&m->congested); + tipc_group_open(m, usr_wakeup); return; case GRP_ACK_MSG: if (!m) return; m->bc_acked = msg_grp_bc_acked(hdr); if (--grp->bc_ackers) - break; + return; + list_del_init(&m->small_win); + m->group->open = true; *usr_wakeup = true; - m->usr_pending = false; + tipc_group_update_member(m, 0); return; case GRP_RECLAIM_MSG: if (!m) return; - *usr_wakeup = m->usr_pending; - m->usr_pending = false; tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq); m->window = ADV_IDLE; + tipc_group_open(m, usr_wakeup); return; case GRP_REMIT_MSG: if (!m || m->state != MBR_RECLAIMING) return; - list_del_init(&m->list); - grp->active_cnt--; remitted = msg_grp_remitted(hdr); /* Messages preceding the REMIT still in receive queue */ if (m->advertised > remitted) { m->state = MBR_REMITTED; in_flight = m->advertised - remitted; + m->advertised = ADV_IDLE + in_flight; + return; } - /* All messages preceding the REMIT have been read */ - if (m->advertised <= remitted) { - m->state = MBR_JOINED; - in_flight = 0; - } - /* ..and the REMIT overtaken by more messages => re-advertise */ + /* This should never happen */ if (m->advertised < remitted) - tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq); + pr_warn_ratelimited("Unexpected REMIT msg\n"); - m->advertised = ADV_IDLE + in_flight; + /* All messages preceding the REMIT have been read */ + m->state = MBR_JOINED; + grp->active_cnt--; + m->advertised = ADV_IDLE; /* Set oldest pending member to active and advertise */ if (list_empty(&grp->pending)) @@ -780,11 +853,10 @@ void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup, void tipc_group_member_evt(struct tipc_group *grp, bool *usr_wakeup, int *sk_rcvbuf, - struct sk_buff *skb, + struct tipc_msg *hdr, struct sk_buff_head *inputq, struct sk_buff_head *xmitq) { - struct tipc_msg *hdr = buf_msg(skb); struct tipc_event *evt = (void *)msg_data(hdr); u32 instance = evt->found_lower; u32 node = evt->port.node; @@ -792,80 +864,59 @@ void tipc_group_member_evt(struct tipc_group *grp, int event = evt->event; struct tipc_member *m; struct net *net; - bool node_up; u32 self; if (!grp) - goto drop; + return; net = grp->net; self = tipc_own_addr(net); if (!grp->loopback && node == self && port == grp->portid) - goto drop; - - /* Convert message before delivery to user */ - msg_set_hdr_sz(hdr, GROUP_H_SIZE); - msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE); - msg_set_type(hdr, TIPC_GRP_MEMBER_EVT); - msg_set_origport(hdr, port); - msg_set_orignode(hdr, node); - msg_set_nametype(hdr, grp->type); - msg_set_grp_evt(hdr, event); + return; m = tipc_group_find_member(grp, node, port); - if (event == TIPC_PUBLISHED) { - if (!m) - m = tipc_group_create_member(grp, node, port, - MBR_DISCOVERED); - if (!m) - goto drop; - - /* Hold back event if JOIN message not yet received */ - if (m->state == MBR_DISCOVERED) { - m->event_msg = skb; - m->state = MBR_PUBLISHED; - } else { - msg_set_grp_bc_seqno(hdr, m->bc_syncpt); - __skb_queue_tail(inputq, skb); - m->state = MBR_JOINED; - *usr_wakeup = true; - m->usr_pending = false; + switch (event) { + case TIPC_PUBLISHED: + /* Send and wait for arrival of JOIN message if necessary */ + if (!m) { + m = tipc_group_create_member(grp, node, port, instance, + MBR_PUBLISHED); + if (!m) + break; + tipc_group_update_member(m, 0); + tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq); + break; } + + if (m->state != MBR_JOINING) + break; + + /* Member can be taken into service */ m->instance = instance; - TIPC_SKB_CB(skb)->orig_member = m->instance; + m->state = MBR_JOINED; + tipc_group_open(m, usr_wakeup); + tipc_group_update_member(m, 0); tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq); - if (m->window < ADV_IDLE) - tipc_group_update_member(m, 0); - else - list_del_init(&m->congested); - } else if (event == TIPC_WITHDRAWN) { + tipc_group_create_event(grp, m, TIPC_PUBLISHED, + m->bc_syncpt, inputq); + break; + case TIPC_WITHDRAWN: if (!m) - goto drop; - - TIPC_SKB_CB(skb)->orig_member = m->instance; + break; - *usr_wakeup = true; - m->usr_pending = false; - node_up = tipc_node_is_up(net, node); - - /* Hold back event if more messages might be expected */ - if (m->state != MBR_LEAVING && node_up) { - m->event_msg = skb; - tipc_group_decr_active(grp, m); - m->state = MBR_LEAVING; - } else { - if (node_up) - msg_set_grp_bc_seqno(hdr, m->bc_syncpt); - else - msg_set_grp_bc_seqno(hdr, m->bc_rcv_nxt); - __skb_queue_tail(inputq, skb); - } + tipc_group_decr_active(grp, m); + m->state = MBR_LEAVING; list_del_init(&m->list); - list_del_init(&m->congested); + tipc_group_open(m, usr_wakeup); + + /* Only send event if no LEAVE message can be expected */ + if (!tipc_node_is_up(net, node)) + tipc_group_create_event(grp, m, TIPC_WITHDRAWN, + m->bc_rcv_nxt, inputq); + break; + default: + break; } *sk_rcvbuf = tipc_group_rcvbuf_limit(grp); - return; -drop: - kfree_skb(skb); } diff --git a/net/tipc/group.h b/net/tipc/group.h index d525e1cd7de5..f4a596ed9848 100644 --- a/net/tipc/group.h +++ b/net/tipc/group.h @@ -44,8 +44,10 @@ struct tipc_msg; struct tipc_group *tipc_group_create(struct net *net, u32 portid, struct tipc_group_req *mreq); +void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf); void tipc_group_delete(struct net *net, struct tipc_group *grp); -void tipc_group_add_member(struct tipc_group *grp, u32 node, u32 port); +void tipc_group_add_member(struct tipc_group *grp, u32 node, + u32 port, u32 instance); struct tipc_nlist *tipc_group_dests(struct tipc_group *grp); void tipc_group_self(struct tipc_group *grp, struct tipc_name_seq *seq, int *scope); @@ -54,7 +56,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq, struct sk_buff_head *xmitq); void tipc_group_member_evt(struct tipc_group *grp, bool *wakeup, - int *sk_rcvbuf, struct sk_buff *skb, + int *sk_rcvbuf, struct tipc_msg *hdr, struct sk_buff_head *inputq, struct sk_buff_head *xmitq); void tipc_group_proto_rcv(struct tipc_group *grp, bool *wakeup, @@ -65,9 +67,9 @@ void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack); bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport, int len, struct tipc_member **m); bool tipc_group_bc_cong(struct tipc_group *grp, int len); +bool tipc_group_is_open(struct tipc_group *grp); void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node, u32 port, struct sk_buff_head *xmitq); u16 tipc_group_bc_snd_nxt(struct tipc_group *grp); void tipc_group_update_member(struct tipc_member *m, int len); -int tipc_group_size(struct tipc_group *grp); #endif diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 8e884ed06d4b..32dc33a94bc7 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -642,9 +642,13 @@ void tipc_mon_delete(struct net *net, int bearer_id) { struct tipc_net *tn = tipc_net(net); struct tipc_monitor *mon = tipc_monitor(net, bearer_id); - struct tipc_peer *self = get_self(net, bearer_id); + struct tipc_peer *self; struct tipc_peer *peer, *tmp; + if (!mon) + return; + + self = get_self(net, bearer_id); write_lock_bh(&mon->lock); tn->monitors[bearer_id] = NULL; list_for_each_entry_safe(peer, tmp, &self->list, list) { diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index b3829bcf63c7..64cdd3c302b0 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -328,7 +328,8 @@ static struct publication *tipc_nameseq_insert_publ(struct net *net, list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscrp_report_overlap(s, publ->lower, publ->upper, TIPC_PUBLISHED, publ->ref, - publ->node, created_subseq); + publ->node, publ->scope, + created_subseq); } return publ; } @@ -398,19 +399,21 @@ found: list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { tipc_subscrp_report_overlap(s, publ->lower, publ->upper, TIPC_WITHDRAWN, publ->ref, - publ->node, removed_subseq); + publ->node, publ->scope, + removed_subseq); } return publ; } /** - * tipc_nameseq_subscribe - attach a subscription, and issue - * the prescribed number of events if there is any sub- + * tipc_nameseq_subscribe - attach a subscription, and optionally + * issue the prescribed number of events if there is any sub- * sequence overlapping with the requested sequence */ static void tipc_nameseq_subscribe(struct name_seq *nseq, - struct tipc_subscription *s) + struct tipc_subscription *s, + bool status) { struct sub_seq *sseq = nseq->sseqs; struct tipc_name_seq ns; @@ -420,7 +423,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, tipc_subscrp_get(s); list_add(&s->nameseq_list, &nseq->subscriptions); - if (!sseq) + if (!status || !sseq) return; while (sseq != &nseq->sseqs[nseq->first_free]) { @@ -434,6 +437,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, sseq->upper, TIPC_PUBLISHED, crs->ref, crs->node, + crs->scope, must_report); must_report = 0; } @@ -597,7 +601,7 @@ not_found: return ref; } -bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain, +bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 scope, struct list_head *dsts, int *dstcnt, u32 exclude, bool all) { @@ -607,9 +611,6 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain, struct name_seq *seq; struct sub_seq *sseq; - if (!tipc_in_scope(domain, self)) - return false; - *dstcnt = 0; rcu_read_lock(); seq = nametbl_find_seq(net, type); @@ -620,7 +621,7 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain, if (likely(sseq)) { info = sseq->info; list_for_each_entry(publ, &info->zone_list, zone_list) { - if (!tipc_in_scope(domain, publ->node)) + if (publ->scope != scope) continue; if (publ->ref == exclude && publ->node == self) continue; @@ -638,13 +639,14 @@ exit: return !list_empty(dsts); } -int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, - u32 limit, struct list_head *dports) +int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, + u32 scope, bool exact, struct list_head *dports) { - struct name_seq *seq; - struct sub_seq *sseq; struct sub_seq *sseq_stop; struct name_info *info; + struct publication *p; + struct name_seq *seq; + struct sub_seq *sseq; int res = 0; rcu_read_lock(); @@ -656,15 +658,12 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); sseq_stop = seq->sseqs + seq->first_free; for (; sseq != sseq_stop; sseq++) { - struct publication *publ; - if (sseq->lower > upper) break; - info = sseq->info; - list_for_each_entry(publ, &info->node_list, node_list) { - if (publ->scope <= limit) - tipc_dest_push(dports, 0, publ->ref); + list_for_each_entry(p, &info->node_list, node_list) { + if (p->scope == scope || (!exact && p->scope < scope)) + tipc_dest_push(dports, 0, p->ref); } if (info->cluster_list_size != info->node_list_size) @@ -681,7 +680,7 @@ exit: * - Determines if any node local ports overlap */ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, - u32 upper, u32 domain, + u32 upper, u32 scope, struct tipc_nlist *nodes) { struct sub_seq *sseq, *stop; @@ -700,7 +699,7 @@ void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, for (; sseq != stop && sseq->lower <= upper; sseq++) { info = sseq->info; list_for_each_entry(publ, &info->zone_list, zone_list) { - if (tipc_in_scope(domain, publ->node)) + if (publ->scope == scope) tipc_nlist_add(nodes, publ->node); } } @@ -712,7 +711,7 @@ exit: /* tipc_nametbl_build_group - build list of communication group members */ void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, - u32 type, u32 domain) + u32 type, u32 scope) { struct sub_seq *sseq, *stop; struct name_info *info; @@ -730,9 +729,9 @@ void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, for (; sseq != stop; sseq++) { info = sseq->info; list_for_each_entry(p, &info->zone_list, zone_list) { - if (!tipc_in_scope(domain, p->node)) + if (p->scope != scope) continue; - tipc_group_add_member(grp, p->node, p->ref); + tipc_group_add_member(grp, p->node, p->ref, p->lower); } } spin_unlock_bh(&seq->lock); @@ -811,7 +810,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, /** * tipc_nametbl_subscribe - add a subscription object to the name table */ -void tipc_nametbl_subscribe(struct tipc_subscription *s) +void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status) { struct tipc_net *tn = net_generic(s->net, tipc_net_id); u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap); @@ -825,7 +824,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s) seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]); if (seq) { spin_lock_bh(&seq->lock); - tipc_nameseq_subscribe(seq, s); + tipc_nameseq_subscribe(seq, s, status); spin_unlock_bh(&seq->lock); } else { tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns); diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 71926e429446..b595d8aa00f0 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -100,8 +100,8 @@ struct name_table { int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node); -int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper, - u32 limit, struct list_head *dports); +int tipc_nametbl_mc_lookup(struct net *net, u32 type, u32 lower, u32 upper, + u32 scope, bool exact, struct list_head *dports); void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, u32 type, u32 domain); void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower, @@ -121,7 +121,7 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type, struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, u32 lower, u32 node, u32 ref, u32 key); -void tipc_nametbl_subscribe(struct tipc_subscription *s); +void tipc_nametbl_subscribe(struct tipc_subscription *s, bool status); void tipc_nametbl_unsubscribe(struct tipc_subscription *s); int tipc_nametbl_init(struct net *net); void tipc_nametbl_stop(struct net *net); diff --git a/net/tipc/server.c b/net/tipc/server.c index d60c30342327..8ee5e86b7870 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -489,8 +489,8 @@ void tipc_conn_terminate(struct tipc_server *s, int conid) } } -bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, - u32 lower, u32 upper, int *conid) +bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, + u32 upper, u32 filter, int *conid) { struct tipc_subscriber *scbr; struct tipc_subscr sub; @@ -501,7 +501,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, sub.seq.lower = lower; sub.seq.upper = upper; sub.timeout = TIPC_WAIT_FOREVER; - sub.filter = TIPC_SUB_PORTS; + sub.filter = filter; *(u32 *)&sub.usr_handle = port; con = tipc_alloc_conn(tipc_topsrv(net)); diff --git a/net/tipc/server.h b/net/tipc/server.h index 2113c9192633..17f49ee44cfd 100644 --- a/net/tipc/server.h +++ b/net/tipc/server.h @@ -41,6 +41,9 @@ #include <net/net_namespace.h> #define TIPC_SERVER_NAME_LEN 32 +#define TIPC_SUB_CLUSTER_SCOPE 0x20 +#define TIPC_SUB_NODE_SCOPE 0x40 +#define TIPC_SUB_NO_STATUS 0x80 /** * struct tipc_server - TIPC server structure @@ -83,8 +86,8 @@ struct tipc_server { int tipc_conn_sendmsg(struct tipc_server *s, int conid, struct sockaddr_tipc *addr, void *data, size_t len); -bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, - u32 lower, u32 upper, int *conid); +bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower, + u32 upper, u32 filter, int *conid); void tipc_topsrv_kern_unsubscr(struct net *net, int conid); /** diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 0cdf5c2ad881..1f236271766c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -715,7 +715,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); - struct tipc_group *grp = tsk->group; + struct tipc_group *grp; u32 revents = 0; sock_poll_wait(file, sk_sleep(sk), wait); @@ -727,18 +727,18 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock, switch (sk->sk_state) { case TIPC_ESTABLISHED: + case TIPC_CONNECTING: if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk)) revents |= POLLOUT; /* fall thru' */ case TIPC_LISTEN: - case TIPC_CONNECTING: if (!skb_queue_empty(&sk->sk_receive_queue)) revents |= POLLIN | POLLRDNORM; break; case TIPC_OPEN: - if (!grp || tipc_group_size(grp)) - if (!tsk->cong_link_cnt) - revents |= POLLOUT; + grp = tsk->group; + if ((!grp || tipc_group_is_open(grp)) && !tsk->cong_link_cnt) + revents |= POLLOUT; if (!tipc_sk_type_connectionless(sk)) break; if (skb_queue_empty(&sk->sk_receive_queue)) @@ -928,21 +928,22 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, struct list_head *cong_links = &tsk->cong_links; int blks = tsk_blocks(GROUP_H_SIZE + dlen); struct tipc_group *grp = tsk->group; + struct tipc_msg *hdr = &tsk->phdr; struct tipc_member *first = NULL; struct tipc_member *mbr = NULL; struct net *net = sock_net(sk); u32 node, port, exclude; - u32 type, inst, domain; struct list_head dsts; + u32 type, inst, scope; int lookups = 0; int dstcnt, rc; bool cong; INIT_LIST_HEAD(&dsts); - type = dest->addr.name.name.type; + type = msg_nametype(hdr); inst = dest->addr.name.name.instance; - domain = addr_domain(net, dest->scope); + scope = msg_lookup_scope(hdr); exclude = tipc_group_exclude(grp); while (++lookups < 4) { @@ -950,7 +951,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, /* Look for a non-congested destination member, if any */ while (1) { - if (!tipc_nametbl_lookup(net, type, inst, domain, &dsts, + if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, &dstcnt, exclude, false)) return -EHOSTUNREACH; tipc_dest_pop(&dsts, &node, &port); @@ -1079,22 +1080,23 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); - struct tipc_name_seq *seq = &dest->addr.nameseq; struct tipc_sock *tsk = tipc_sk(sk); struct tipc_group *grp = tsk->group; + struct tipc_msg *hdr = &tsk->phdr; struct net *net = sock_net(sk); - u32 domain, exclude, dstcnt; + u32 type, inst, scope, exclude; struct list_head dsts; + u32 dstcnt; INIT_LIST_HEAD(&dsts); - if (seq->lower != seq->upper) - return -ENOTSUPP; - - domain = addr_domain(net, dest->scope); + type = msg_nametype(hdr); + inst = dest->addr.name.name.instance; + scope = msg_lookup_scope(hdr); exclude = tipc_group_exclude(grp); - if (!tipc_nametbl_lookup(net, seq->type, seq->lower, domain, - &dsts, &dstcnt, exclude, true)) + + if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts, + &dstcnt, exclude, true)) return -EHOSTUNREACH; if (dstcnt == 1) { @@ -1116,24 +1118,29 @@ static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, struct sk_buff_head *inputq) { - u32 scope = TIPC_CLUSTER_SCOPE; u32 self = tipc_own_addr(net); + u32 type, lower, upper, scope; struct sk_buff *skb, *_skb; - u32 lower = 0, upper = ~0; - struct sk_buff_head tmpq; u32 portid, oport, onode; + struct sk_buff_head tmpq; struct list_head dports; - struct tipc_msg *msg; - int user, mtyp, hsz; + struct tipc_msg *hdr; + int user, mtyp, hlen; + bool exact; __skb_queue_head_init(&tmpq); INIT_LIST_HEAD(&dports); skb = tipc_skb_peek(arrvq, &inputq->lock); for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { - msg = buf_msg(skb); - user = msg_user(msg); - mtyp = msg_type(msg); + hdr = buf_msg(skb); + user = msg_user(hdr); + mtyp = msg_type(hdr); + hlen = skb_headroom(skb) + msg_hdr_sz(hdr); + oport = msg_origport(hdr); + onode = msg_orignode(hdr); + type = msg_nametype(hdr); + if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { spin_lock_bh(&inputq->lock); if (skb_peek(arrvq) == skb) { @@ -1144,21 +1151,31 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, spin_unlock_bh(&inputq->lock); continue; } - hsz = skb_headroom(skb) + msg_hdr_sz(msg); - oport = msg_origport(msg); - onode = msg_orignode(msg); - if (onode == self) - scope = TIPC_NODE_SCOPE; - - /* Create destination port list and message clones: */ - if (!msg_in_group(msg)) { - lower = msg_namelower(msg); - upper = msg_nameupper(msg); + + /* Group messages require exact scope match */ + if (msg_in_group(hdr)) { + lower = 0; + upper = ~0; + scope = msg_lookup_scope(hdr); + exact = true; + } else { + /* TIPC_NODE_SCOPE means "any scope" in this context */ + if (onode == self) + scope = TIPC_NODE_SCOPE; + else + scope = TIPC_CLUSTER_SCOPE; + exact = false; + lower = msg_namelower(hdr); + upper = msg_nameupper(hdr); } - tipc_nametbl_mc_translate(net, msg_nametype(msg), lower, upper, - scope, &dports); + + /* Create destination port list: */ + tipc_nametbl_mc_lookup(net, type, lower, upper, + scope, exact, &dports); + + /* Clone message per destination */ while (tipc_dest_pop(&dports, NULL, &portid)) { - _skb = __pskb_copy(skb, hsz, GFP_ATOMIC); + _skb = __pskb_copy(skb, hlen, GFP_ATOMIC); if (_skb) { msg_set_destport(buf_msg(_skb), portid); __skb_queue_tail(&tmpq, _skb); @@ -1933,8 +1950,7 @@ static void tipc_sk_proto_rcv(struct sock *sk, break; case TOP_SRV: tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf, - skb, inputq, xmitq); - skb = NULL; + hdr, inputq, xmitq); break; default: break; @@ -2732,7 +2748,6 @@ void tipc_sk_rht_destroy(struct net *net) static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) { struct net *net = sock_net(&tsk->sk); - u32 domain = addr_domain(net, mreq->scope); struct tipc_group *grp = tsk->group; struct tipc_msg *hdr = &tsk->phdr; struct tipc_name_seq seq; @@ -2740,6 +2755,8 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) if (mreq->type < TIPC_RESERVED_TYPES) return -EACCES; + if (mreq->scope > TIPC_NODE_SCOPE) + return -EINVAL; if (grp) return -EACCES; grp = tipc_group_create(net, tsk->portid, mreq); @@ -2752,16 +2769,16 @@ static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq) seq.type = mreq->type; seq.lower = mreq->instance; seq.upper = seq.lower; - tipc_nametbl_build_group(net, grp, mreq->type, domain); + tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope); rc = tipc_sk_publish(tsk, mreq->scope, &seq); if (rc) { tipc_group_delete(net, grp); tsk->group = NULL; } - - /* Eliminate any risk that a broadcast overtakes the sent JOIN */ + /* Eliminate any risk that a broadcast overtakes sent JOINs */ tsk->mc_method.rcast = true; tsk->mc_method.mandatory = true; + tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf); return rc; } diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 251065dfd8df..44df528ed6ab 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -118,15 +118,19 @@ void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap, void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, u32 event, u32 port_ref, - u32 node, int must) + u32 node, u32 scope, int must) { + u32 filter = htohl(sub->evt.s.filter, sub->swap); struct tipc_name_seq seq; tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq); if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper)) return; - if (!must && - !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS)) + if (!must && !(filter & TIPC_SUB_PORTS)) + return; + if (filter & TIPC_SUB_CLUSTER_SCOPE && scope == TIPC_NODE_SCOPE) + return; + if (filter & TIPC_SUB_NODE_SCOPE && scope != TIPC_NODE_SCOPE) return; tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref, @@ -286,7 +290,8 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net, } static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s, - struct tipc_subscriber *subscriber, int swap) + struct tipc_subscriber *subscriber, int swap, + bool status) { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_subscription *sub = NULL; @@ -299,7 +304,7 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s, spin_lock_bh(&subscriber->lock); list_add(&sub->subscrp_list, &subscriber->subscrp_list); sub->subscriber = subscriber; - tipc_nametbl_subscribe(sub); + tipc_nametbl_subscribe(sub, status); tipc_subscrb_get(subscriber); spin_unlock_bh(&subscriber->lock); @@ -323,6 +328,7 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, { struct tipc_subscriber *subscriber = usr_data; struct tipc_subscr *s = (struct tipc_subscr *)buf; + bool status; int swap; /* Determine subscriber's endianness */ @@ -334,8 +340,8 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); return tipc_subscrp_cancel(s, subscriber); } - - tipc_subscrp_subscribe(net, s, subscriber, swap); + status = !(s->filter & htohl(TIPC_SUB_NO_STATUS, swap)); + tipc_subscrp_subscribe(net, s, subscriber, swap, status); } /* Handle one request to establish a new subscriber */ diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index ee52957dc952..f3edca775d9f 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -71,7 +71,7 @@ int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower, u32 found_upper); void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, u32 event, - u32 port_ref, u32 node, int must); + u32 port_ref, u32 node, u32 scope, int must); void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap, struct tipc_name_seq *out); u32 tipc_subscrp_convert_seq_type(u32 type, int swap); diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 413d4f4e6334..a1d10993d08a 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -126,6 +126,11 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, wdev->ibss_fixed = params->channel_fixed; wdev->ibss_dfs_possible = params->userspace_handles_dfs; wdev->chandef = params->chandef; + if (connkeys) { + params->wep_keys = connkeys->params; + params->wep_tx_key = connkeys->def; + } + #ifdef CONFIG_CFG80211_WEXT wdev->wext.ibss.chandef = params->chandef; #endif diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index e7c64a8dce54..bbb9907bfa86 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -692,7 +692,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev, return rdev_mgmt_tx(rdev, wdev, params, cookie); } -bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, +bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags) { struct wiphy *wiphy = wdev->wiphy; @@ -708,7 +708,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); u16 stype; - trace_cfg80211_rx_mgmt(wdev, freq, sig_mbm); + trace_cfg80211_rx_mgmt(wdev, freq, sig_dbm); stype = (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) >> 4; if (!(stypes->rx & BIT(stype))) { @@ -735,7 +735,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm, /* Indicate the received Action frame to user space */ if (nl80211_send_mgmt(rdev, wdev, reg->nlportid, - freq, sig_mbm, + freq, sig_dbm, buf, len, flags, GFP_ATOMIC)) continue; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 213d0c498c97..c084dd2205ac 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -734,11 +734,12 @@ struct key_parse { bool def_uni, def_multi; }; -static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) +static int nl80211_parse_key_new(struct genl_info *info, struct nlattr *key, + struct key_parse *k) { struct nlattr *tb[NL80211_KEY_MAX + 1]; int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, - nl80211_key_policy, NULL); + nl80211_key_policy, info->extack); if (err) return err; @@ -771,7 +772,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) if (tb[NL80211_KEY_TYPE]) { k->type = nla_get_u32(tb[NL80211_KEY_TYPE]); if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) - return -EINVAL; + return genl_err_attr(info, -EINVAL, + tb[NL80211_KEY_TYPE]); } if (tb[NL80211_KEY_DEFAULT_TYPES]) { @@ -779,7 +781,8 @@ static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], - nl80211_key_default_policy, NULL); + nl80211_key_default_policy, + info->extack); if (err) return err; @@ -820,8 +823,10 @@ static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) if (info->attrs[NL80211_ATTR_KEY_TYPE]) { k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); - if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) + if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) { + GENL_SET_ERR_MSG(info, "key type out of range"); return -EINVAL; + } } if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) { @@ -850,31 +855,42 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) k->type = -1; if (info->attrs[NL80211_ATTR_KEY]) - err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k); + err = nl80211_parse_key_new(info, info->attrs[NL80211_ATTR_KEY], k); else err = nl80211_parse_key_old(info, k); if (err) return err; - if (k->def && k->defmgmt) + if (k->def && k->defmgmt) { + GENL_SET_ERR_MSG(info, "key with def && defmgmt is invalid"); return -EINVAL; + } if (k->defmgmt) { - if (k->def_uni || !k->def_multi) + if (k->def_uni || !k->def_multi) { + GENL_SET_ERR_MSG(info, "defmgmt key must be mcast"); return -EINVAL; + } } if (k->idx != -1) { if (k->defmgmt) { - if (k->idx < 4 || k->idx > 5) + if (k->idx < 4 || k->idx > 5) { + GENL_SET_ERR_MSG(info, + "defmgmt key idx not 4 or 5"); return -EINVAL; + } } else if (k->def) { - if (k->idx < 0 || k->idx > 3) + if (k->idx < 0 || k->idx > 3) { + GENL_SET_ERR_MSG(info, "def key idx not 0-3"); return -EINVAL; + } } else { - if (k->idx < 0 || k->idx > 5) + if (k->idx < 0 || k->idx > 5) { + GENL_SET_ERR_MSG(info, "key idx not 0-5"); return -EINVAL; + } } } @@ -883,8 +899,9 @@ static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) static struct cfg80211_cached_keys * nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, - struct nlattr *keys, bool *no_ht) + struct genl_info *info, bool *no_ht) { + struct nlattr *keys = info->attrs[NL80211_ATTR_KEYS]; struct key_parse parse; struct nlattr *key; struct cfg80211_cached_keys *result; @@ -909,17 +926,22 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, memset(&parse, 0, sizeof(parse)); parse.idx = -1; - err = nl80211_parse_key_new(key, &parse); + err = nl80211_parse_key_new(info, key, &parse); if (err) goto error; err = -EINVAL; if (!parse.p.key) goto error; - if (parse.idx < 0 || parse.idx > 3) + if (parse.idx < 0 || parse.idx > 3) { + GENL_SET_ERR_MSG(info, "key index out of range [0-3]"); goto error; + } if (parse.def) { - if (def) + if (def) { + GENL_SET_ERR_MSG(info, + "only one key can be default"); goto error; + } def = 1; result->def = parse.idx; if (!parse.def_uni || !parse.def_multi) @@ -932,6 +954,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, goto error; if (parse.p.cipher != WLAN_CIPHER_SUITE_WEP40 && parse.p.cipher != WLAN_CIPHER_SUITE_WEP104) { + GENL_SET_ERR_MSG(info, "connect key must be WEP"); err = -EINVAL; goto error; } @@ -947,6 +970,7 @@ nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, if (result->def < 0) { err = -EINVAL; + GENL_SET_ERR_MSG(info, "need a default/TX key"); goto error; } @@ -7817,6 +7841,11 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, intbss->ts_boottime, NL80211_BSS_PAD)) goto nla_put_failure; + if (!nl80211_put_signal(msg, intbss->pub.chains, + intbss->pub.chain_signal, + NL80211_BSS_CHAIN_SIGNAL)) + goto nla_put_failure; + switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal)) @@ -8613,9 +8642,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { bool no_ht = false; - connkeys = nl80211_parse_connkeys(rdev, - info->attrs[NL80211_ATTR_KEYS], - &no_ht); + connkeys = nl80211_parse_connkeys(rdev, info, &no_ht); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); @@ -9019,8 +9046,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) } if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { - connkeys = nl80211_parse_connkeys(rdev, - info->attrs[NL80211_ATTR_KEYS], NULL); + connkeys = nl80211_parse_connkeys(rdev, info, NULL); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); } @@ -11361,7 +11387,8 @@ static int nl80211_nan_add_func(struct sk_buff *skb, break; case NL80211_NAN_FUNC_FOLLOW_UP: if (!tb[NL80211_NAN_FUNC_FOLLOW_UP_ID] || - !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID]) { + !tb[NL80211_NAN_FUNC_FOLLOW_UP_REQ_ID] || + !tb[NL80211_NAN_FUNC_FOLLOW_UP_DEST]) { err = -EINVAL; goto out; } @@ -13944,7 +13971,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || - (from_ap && reason && + (reason && nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) || (from_ap && nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) || diff --git a/net/wireless/scan.c b/net/wireless/scan.c index f6c5fe482506..d36c3eb7b931 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -981,6 +981,9 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, found->ts = tmp->ts; found->ts_boottime = tmp->ts_boottime; found->parent_tsf = tmp->parent_tsf; + found->pub.chains = tmp->pub.chains; + memcpy(found->pub.chain_signal, tmp->pub.chain_signal, + IEEE80211_MAX_CHAINS); ether_addr_copy(found->parent_bssid, tmp->parent_bssid); } else { struct cfg80211_internal_bss *new; @@ -1233,6 +1236,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy, tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info); tmp.ts_boottime = data->boottime_ns; tmp.parent_tsf = data->parent_tsf; + tmp.pub.chains = data->chains; + memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS); ether_addr_copy(tmp.parent_bssid, data->parent_bssid); signal_valid = abs(data->chan->center_freq - channel->center_freq) <= diff --git a/net/wireless/trace.h b/net/wireless/trace.h index f3353fe5b35b..bcfedd39e7a3 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2544,20 +2544,20 @@ DEFINE_EVENT(cfg80211_netdev_mac_evt, cfg80211_del_sta, ); TRACE_EVENT(cfg80211_rx_mgmt, - TP_PROTO(struct wireless_dev *wdev, int freq, int sig_mbm), - TP_ARGS(wdev, freq, sig_mbm), + TP_PROTO(struct wireless_dev *wdev, int freq, int sig_dbm), + TP_ARGS(wdev, freq, sig_dbm), TP_STRUCT__entry( WDEV_ENTRY __field(int, freq) - __field(int, sig_mbm) + __field(int, sig_dbm) ), TP_fast_assign( WDEV_ASSIGN; __entry->freq = freq; - __entry->sig_mbm = sig_mbm; + __entry->sig_dbm = sig_dbm; ), - TP_printk(WDEV_PR_FMT ", freq: %d, sig mbm: %d", - WDEV_PR_ARG, __entry->freq, __entry->sig_mbm) + TP_printk(WDEV_PR_FMT ", freq: %d, sig dbm: %d", + WDEV_PR_ARG, __entry->freq, __entry->sig_dbm) ); TRACE_EVENT(cfg80211_mgmt_tx_status, diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 00641b611aed..75982506617b 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -23,32 +23,114 @@ #include <linux/notifier.h> #ifdef CONFIG_XFRM_OFFLOAD -int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) +struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) { int err; + unsigned long flags; struct xfrm_state *x; + struct sk_buff *skb2; + struct softnet_data *sd; + netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); - if (skb_is_gso(skb)) - return 0; + if (!xo) + return skb; - if (xo) { - x = skb->sp->xvec[skb->sp->len - 1]; - if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) - return 0; + if (!(features & NETIF_F_HW_ESP)) + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); + + x = skb->sp->xvec[skb->sp->len - 1]; + if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) + return skb; + + local_irq_save(flags); + sd = this_cpu_ptr(&softnet_data); + err = !skb_queue_empty(&sd->xfrm_backlog); + local_irq_restore(flags); + if (err) { + *again = true; + return skb; + } + + if (skb_is_gso(skb)) { + struct net_device *dev = skb->dev; + + if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) { + struct sk_buff *segs; + + /* Packet got rerouted, fixup features and segment it. */ + esp_features = esp_features & ~(NETIF_F_HW_ESP + | NETIF_F_GSO_ESP); + + segs = skb_gso_segment(skb, esp_features); + if (IS_ERR(segs)) { + kfree_skb(skb); + atomic_long_inc(&dev->tx_dropped); + return NULL; + } else { + consume_skb(skb); + skb = segs; + } + } + } + + if (!skb->next) { x->outer_mode->xmit(x, skb); - err = x->type_offload->xmit(x, skb, features); + xo->flags |= XFRM_DEV_RESUME; + + err = x->type_offload->xmit(x, skb, esp_features); if (err) { + if (err == -EINPROGRESS) + return NULL; + XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); - return err; + kfree_skb(skb); + return NULL; } skb_push(skb, skb->data - skb_mac_header(skb)); + + return skb; } - return 0; + skb2 = skb; + + do { + struct sk_buff *nskb = skb2->next; + skb2->next = NULL; + + xo = xfrm_offload(skb2); + xo->flags |= XFRM_DEV_RESUME; + + x->outer_mode->xmit(x, skb2); + + err = x->type_offload->xmit(x, skb2, esp_features); + if (!err) { + skb2->next = nskb; + } else if (err != -EINPROGRESS) { + XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); + skb2->next = nskb; + kfree_skb_list(skb2); + return NULL; + } else { + if (skb == skb2) + skb = nskb; + + if (!skb) + return NULL; + + goto skip_push; + } + + skb_push(skb2, skb2->data - skb_mac_header(skb2)); + +skip_push: + skb2 = nskb; + } while (skb2); + + return skb; } EXPORT_SYMBOL_GPL(validate_xmit_xfrm); @@ -120,8 +202,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) if (!x->type_offload || x->encap) return false; - if ((x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev)) && - !xdst->child->xfrm && x->type->get_mtu) { + if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) && + (!xdst->child->xfrm && x->type->get_mtu)) { mtu = x->type->get_mtu(x, xdst->child_mtu_cached); if (skb->len <= mtu) @@ -140,19 +222,82 @@ ok: return true; } EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); + +void xfrm_dev_resume(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + int ret = NETDEV_TX_BUSY; + struct netdev_queue *txq; + struct softnet_data *sd; + unsigned long flags; + + rcu_read_lock(); + txq = netdev_pick_tx(dev, skb, NULL); + + HARD_TX_LOCK(dev, txq, smp_processor_id()); + if (!netif_xmit_frozen_or_stopped(txq)) + skb = dev_hard_start_xmit(skb, dev, txq, &ret); + HARD_TX_UNLOCK(dev, txq); + + if (!dev_xmit_complete(ret)) { + local_irq_save(flags); + sd = this_cpu_ptr(&softnet_data); + skb_queue_tail(&sd->xfrm_backlog, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(xfrm_dev_resume); + +void xfrm_dev_backlog(struct softnet_data *sd) +{ + struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog; + struct sk_buff_head list; + struct sk_buff *skb; + + if (skb_queue_empty(xfrm_backlog)) + return; + + __skb_queue_head_init(&list); + + spin_lock(&xfrm_backlog->lock); + skb_queue_splice_init(xfrm_backlog, &list); + spin_unlock(&xfrm_backlog->lock); + + while (!skb_queue_empty(&list)) { + skb = __skb_dequeue(&list); + xfrm_dev_resume(skb); + } + +} #endif -static int xfrm_dev_register(struct net_device *dev) +static int xfrm_api_check(struct net_device *dev) { - if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) - return NOTIFY_BAD; +#ifdef CONFIG_XFRM_OFFLOAD if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && !(dev->features & NETIF_F_HW_ESP)) return NOTIFY_BAD; + if ((dev->features & NETIF_F_HW_ESP) && + (!(dev->xfrmdev_ops && + dev->xfrmdev_ops->xdo_dev_state_add && + dev->xfrmdev_ops->xdo_dev_state_delete))) + return NOTIFY_BAD; +#else + if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM)) + return NOTIFY_BAD; +#endif + return NOTIFY_DONE; } +static int xfrm_dev_register(struct net_device *dev) +{ + return xfrm_api_check(dev); +} + static int xfrm_dev_unregister(struct net_device *dev) { xfrm_policy_cache_flush(); @@ -161,16 +306,7 @@ static int xfrm_dev_unregister(struct net_device *dev) static int xfrm_dev_feat_change(struct net_device *dev) { - if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) - return NOTIFY_BAD; - else if (!(dev->features & NETIF_F_HW_ESP)) - dev->xfrmdev_ops = NULL; - - if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && - !(dev->features & NETIF_F_HW_ESP)) - return NOTIFY_BAD; - - return NOTIFY_DONE; + return xfrm_api_check(dev); } static int xfrm_dev_down(struct net_device *dev) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index ac277b97e0d7..26b10eb7a206 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -8,15 +8,29 @@ * */ +#include <linux/bottom_half.h> +#include <linux/interrupt.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/percpu.h> #include <net/dst.h> #include <net/ip.h> #include <net/xfrm.h> #include <net/ip_tunnels.h> #include <net/ip6_tunnel.h> +struct xfrm_trans_tasklet { + struct tasklet_struct tasklet; + struct sk_buff_head queue; +}; + +struct xfrm_trans_cb { + int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb); +}; + +#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) + static struct kmem_cache *secpath_cachep __read_mostly; static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); @@ -25,6 +39,8 @@ static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1]; static struct gro_cells gro_cells; static struct net_device xfrm_napi_dev; +static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet); + int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; @@ -207,7 +223,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) xfrm_address_t *daddr; struct xfrm_mode *inner_mode; u32 mark = skb->mark; - unsigned int family; + unsigned int family = AF_UNSPEC; int decaps = 0; int async = 0; bool xfrm_gro = false; @@ -216,6 +232,16 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) if (encap_type < 0) { x = xfrm_input_state(skb); + + if (unlikely(x->km.state != XFRM_STATE_VALID)) { + if (x->km.state == XFRM_STATE_ACQ) + XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); + else + XFRM_INC_STATS(net, + LINUX_MIB_XFRMINSTATEINVALID); + goto drop; + } + family = x->outer_mode->afinfo->family; /* An encap_type of -1 indicates async resumption. */ @@ -466,9 +492,41 @@ int xfrm_input_resume(struct sk_buff *skb, int nexthdr) } EXPORT_SYMBOL(xfrm_input_resume); +static void xfrm_trans_reinject(unsigned long data) +{ + struct xfrm_trans_tasklet *trans = (void *)data; + struct sk_buff_head queue; + struct sk_buff *skb; + + __skb_queue_head_init(&queue); + skb_queue_splice_init(&trans->queue, &queue); + + while ((skb = __skb_dequeue(&queue))) + XFRM_TRANS_SKB_CB(skb)->finish(dev_net(skb->dev), NULL, skb); +} + +int xfrm_trans_queue(struct sk_buff *skb, + int (*finish)(struct net *, struct sock *, + struct sk_buff *)) +{ + struct xfrm_trans_tasklet *trans; + + trans = this_cpu_ptr(&xfrm_trans_tasklet); + + if (skb_queue_len(&trans->queue) >= netdev_max_backlog) + return -ENOBUFS; + + XFRM_TRANS_SKB_CB(skb)->finish = finish; + skb_queue_tail(&trans->queue, skb); + tasklet_schedule(&trans->tasklet); + return 0; +} +EXPORT_SYMBOL(xfrm_trans_queue); + void __init xfrm_input_init(void) { int err; + int i; init_dummy_netdev(&xfrm_napi_dev); err = gro_cells_init(&gro_cells, &xfrm_napi_dev); @@ -479,4 +537,13 @@ void __init xfrm_input_init(void) sizeof(struct sec_path), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + + for_each_possible_cpu(i) { + struct xfrm_trans_tasklet *trans; + + trans = &per_cpu(xfrm_trans_tasklet, i); + __skb_queue_head_init(&trans->queue); + tasklet_init(&trans->tasklet, xfrm_trans_reinject, + (unsigned long)trans); + } } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index e3a5aca9cdda..d8a8129b9232 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1168,9 +1168,15 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, again: pol = rcu_dereference(sk->sk_policy[dir]); if (pol != NULL) { - bool match = xfrm_selector_match(&pol->selector, fl, family); + bool match; int err = 0; + if (pol->family != family) { + pol = NULL; + goto out; + } + + match = xfrm_selector_match(&pol->selector, fl, family); if (match) { if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { pol = NULL; @@ -1835,6 +1841,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, sizeof(struct xfrm_policy *) * num_pols) == 0 && xfrm_xdst_can_reuse(xdst, xfrm, err)) { dst_hold(&xdst->u.dst); + xfrm_pols_put(pols, num_pols); while (err > 0) xfrm_state_put(xfrm[--err]); return xdst; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 1b7856be3eeb..cc4c519cad76 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1343,6 +1343,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, if (orig->aead) { x->aead = xfrm_algo_aead_clone(orig->aead); + x->geniv = orig->geniv; if (!x->aead) goto error; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 983b0233767b..bdb48e5dba04 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1419,11 +1419,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) { + u16 prev_family; int i; if (nr > XFRM_MAX_DEPTH) return -EINVAL; + prev_family = family; + for (i = 0; i < nr; i++) { /* We never validated the ut->family value, so many * applications simply leave it at zero. The check was @@ -1435,6 +1438,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) if (!ut[i].family) ut[i].family = family; + if ((ut[i].mode == XFRM_MODE_TRANSPORT) && + (ut[i].family != prev_family)) + return -EINVAL; + + prev_family = ut[i].family; + switch (ut[i].family) { case AF_INET: break; @@ -1445,6 +1454,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) default: return -EINVAL; } + + switch (ut[i].id.proto) { + case IPPROTO_AH: + case IPPROTO_ESP: + case IPPROTO_COMP: +#if IS_ENABLED(CONFIG_IPV6) + case IPPROTO_ROUTING: + case IPPROTO_DSTOPTS: +#endif + case IPSEC_PROTO_ANY: + break; + default: + return -EINVAL; + } + } return 0; @@ -2470,7 +2494,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_PROTO] = { .type = NLA_U8 }, [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, - [XFRMA_OUTPUT_MARK] = { .len = NLA_U32 }, + [XFRMA_OUTPUT_MARK] = { .type = NLA_U32 }, }; static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 4fb944a7ecf8..3ff7a05bea9a 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -41,6 +41,7 @@ hostprogs-y += xdp_redirect hostprogs-y += xdp_redirect_map hostprogs-y += xdp_redirect_cpu hostprogs-y += xdp_monitor +hostprogs-y += xdp_rxq_info hostprogs-y += syscall_tp # Libbpf dependencies @@ -90,6 +91,7 @@ xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o xdp_redirect_cpu-objs := bpf_load.o $(LIBBPF) xdp_redirect_cpu_user.o xdp_monitor-objs := bpf_load.o $(LIBBPF) xdp_monitor_user.o +xdp_rxq_info-objs := bpf_load.o $(LIBBPF) xdp_rxq_info_user.o syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o # Tell kbuild to always build the programs @@ -139,6 +141,7 @@ always += xdp_redirect_kern.o always += xdp_redirect_map_kern.o always += xdp_redirect_cpu_kern.o always += xdp_monitor_kern.o +always += xdp_rxq_info_kern.o always += syscall_tp_kern.o HOSTCFLAGS += -I$(objtree)/usr/include @@ -182,6 +185,7 @@ HOSTLOADLIBES_xdp_redirect += -lelf HOSTLOADLIBES_xdp_redirect_map += -lelf HOSTLOADLIBES_xdp_redirect_cpu += -lelf HOSTLOADLIBES_xdp_monitor += -lelf +HOSTLOADLIBES_xdp_rxq_info += -lelf HOSTLOADLIBES_syscall_tp += -lelf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: diff --git a/samples/bpf/xdp_rxq_info_kern.c b/samples/bpf/xdp_rxq_info_kern.c new file mode 100644 index 000000000000..3fd209291653 --- /dev/null +++ b/samples/bpf/xdp_rxq_info_kern.c @@ -0,0 +1,96 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. + * + * Example howto extract XDP RX-queue info + */ +#include <uapi/linux/bpf.h> +#include "bpf_helpers.h" + +/* Config setup from with userspace + * + * User-side setup ifindex in config_map, to verify that + * ctx->ingress_ifindex is correct (against configured ifindex) + */ +struct config { + __u32 action; + int ifindex; +}; +struct bpf_map_def SEC("maps") config_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(struct config), + .max_entries = 1, +}; + +/* Common stats data record (shared with userspace) */ +struct datarec { + __u64 processed; + __u64 issue; +}; + +struct bpf_map_def SEC("maps") stats_global_map = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(struct datarec), + .max_entries = 1, +}; + +#define MAX_RXQs 64 + +/* Stats per rx_queue_index (per CPU) */ +struct bpf_map_def SEC("maps") rx_queue_index_map = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(struct datarec), + .max_entries = MAX_RXQs + 1, +}; + +SEC("xdp_prog0") +int xdp_prognum0(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct datarec *rec, *rxq_rec; + int ingress_ifindex; + struct config *config; + u32 key = 0; + + /* Global stats record */ + rec = bpf_map_lookup_elem(&stats_global_map, &key); + if (!rec) + return XDP_ABORTED; + rec->processed++; + + /* Accessing ctx->ingress_ifindex, cause BPF to rewrite BPF + * instructions inside kernel to access xdp_rxq->dev->ifindex + */ + ingress_ifindex = ctx->ingress_ifindex; + + config = bpf_map_lookup_elem(&config_map, &key); + if (!config) + return XDP_ABORTED; + + /* Simple test: check ctx provided ifindex is as expected */ + if (ingress_ifindex != config->ifindex) { + /* count this error case */ + rec->issue++; + return XDP_ABORTED; + } + + /* Update stats per rx_queue_index. Handle if rx_queue_index + * is larger than stats map can contain info for. + */ + key = ctx->rx_queue_index; + if (key >= MAX_RXQs) + key = MAX_RXQs; + rxq_rec = bpf_map_lookup_elem(&rx_queue_index_map, &key); + if (!rxq_rec) + return XDP_ABORTED; + rxq_rec->processed++; + if (key == MAX_RXQs) + rxq_rec->issue++; + + return config->action; +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c new file mode 100644 index 000000000000..32430e8b3a6a --- /dev/null +++ b/samples/bpf/xdp_rxq_info_user.c @@ -0,0 +1,531 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. + */ +static const char *__doc__ = " XDP RX-queue info extract example\n\n" + "Monitor how many packets per sec (pps) are received\n" + "per NIC RX queue index and which CPU processed the packet\n" + ; + +#include <errno.h> +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> +#include <string.h> +#include <unistd.h> +#include <locale.h> +#include <sys/resource.h> +#include <getopt.h> +#include <net/if.h> +#include <time.h> + +#include <arpa/inet.h> +#include <linux/if_link.h> + +#include "libbpf.h" +#include "bpf_load.h" +#include "bpf_util.h" + +static int ifindex = -1; +static char ifname_buf[IF_NAMESIZE]; +static char *ifname; + +static __u32 xdp_flags; + +/* Exit return codes */ +#define EXIT_OK 0 +#define EXIT_FAIL 1 +#define EXIT_FAIL_OPTION 2 +#define EXIT_FAIL_XDP 3 +#define EXIT_FAIL_BPF 4 +#define EXIT_FAIL_MEM 5 + +static const struct option long_options[] = { + {"help", no_argument, NULL, 'h' }, + {"dev", required_argument, NULL, 'd' }, + {"skb-mode", no_argument, NULL, 'S' }, + {"sec", required_argument, NULL, 's' }, + {"no-separators", no_argument, NULL, 'z' }, + {"action", required_argument, NULL, 'a' }, + {0, 0, NULL, 0 } +}; + +static void int_exit(int sig) +{ + fprintf(stderr, + "Interrupted: Removing XDP program on ifindex:%d device:%s\n", + ifindex, ifname); + if (ifindex > -1) + set_link_xdp_fd(ifindex, -1, xdp_flags); + exit(EXIT_OK); +} + +struct config { + __u32 action; + int ifindex; +}; +#define XDP_ACTION_MAX (XDP_TX + 1) +#define XDP_ACTION_MAX_STRLEN 11 +static const char *xdp_action_names[XDP_ACTION_MAX] = { + [XDP_ABORTED] = "XDP_ABORTED", + [XDP_DROP] = "XDP_DROP", + [XDP_PASS] = "XDP_PASS", + [XDP_TX] = "XDP_TX", +}; + +static const char *action2str(int action) +{ + if (action < XDP_ACTION_MAX) + return xdp_action_names[action]; + return NULL; +} + +static int parse_xdp_action(char *action_str) +{ + size_t maxlen; + __u64 action = -1; + int i; + + for (i = 0; i < XDP_ACTION_MAX; i++) { + maxlen = XDP_ACTION_MAX_STRLEN; + if (strncmp(xdp_action_names[i], action_str, maxlen) == 0) { + action = i; + break; + } + } + return action; +} + +static void list_xdp_actions(void) +{ + int i; + + printf("Available XDP --action <options>\n"); + for (i = 0; i < XDP_ACTION_MAX; i++) + printf("\t%s\n", xdp_action_names[i]); + printf("\n"); +} + +static void usage(char *argv[]) +{ + int i; + + printf("\nDOCUMENTATION:\n%s\n", __doc__); + printf(" Usage: %s (options-see-below)\n", argv[0]); + printf(" Listing options:\n"); + for (i = 0; long_options[i].name != 0; i++) { + printf(" --%-12s", long_options[i].name); + if (long_options[i].flag != NULL) + printf(" flag (internal value:%d)", + *long_options[i].flag); + else + printf(" short-option: -%c", + long_options[i].val); + printf("\n"); + } + printf("\n"); + list_xdp_actions(); +} + +#define NANOSEC_PER_SEC 1000000000 /* 10^9 */ +static __u64 gettime(void) +{ + struct timespec t; + int res; + + res = clock_gettime(CLOCK_MONOTONIC, &t); + if (res < 0) { + fprintf(stderr, "Error with gettimeofday! (%i)\n", res); + exit(EXIT_FAIL); + } + return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; +} + +/* Common stats data record shared with _kern.c */ +struct datarec { + __u64 processed; + __u64 issue; +}; +struct record { + __u64 timestamp; + struct datarec total; + struct datarec *cpu; +}; +struct stats_record { + struct record stats; + struct record *rxq; +}; + +static struct datarec *alloc_record_per_cpu(void) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + struct datarec *array; + size_t size; + + size = sizeof(struct datarec) * nr_cpus; + array = malloc(size); + memset(array, 0, size); + if (!array) { + fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus); + exit(EXIT_FAIL_MEM); + } + return array; +} + +static struct record *alloc_record_per_rxq(void) +{ + unsigned int nr_rxqs = map_data[2].def.max_entries; + struct record *array; + size_t size; + + size = sizeof(struct record) * nr_rxqs; + array = malloc(size); + memset(array, 0, size); + if (!array) { + fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs); + exit(EXIT_FAIL_MEM); + } + return array; +} + +static struct stats_record *alloc_stats_record(void) +{ + unsigned int nr_rxqs = map_data[2].def.max_entries; + struct stats_record *rec; + int i; + + rec = malloc(sizeof(*rec)); + memset(rec, 0, sizeof(*rec)); + if (!rec) { + fprintf(stderr, "Mem alloc error\n"); + exit(EXIT_FAIL_MEM); + } + rec->rxq = alloc_record_per_rxq(); + for (i = 0; i < nr_rxqs; i++) + rec->rxq[i].cpu = alloc_record_per_cpu(); + + rec->stats.cpu = alloc_record_per_cpu(); + return rec; +} + +static void free_stats_record(struct stats_record *r) +{ + unsigned int nr_rxqs = map_data[2].def.max_entries; + int i; + + for (i = 0; i < nr_rxqs; i++) + free(r->rxq[i].cpu); + + free(r->rxq); + free(r->stats.cpu); + free(r); +} + +static bool map_collect_percpu(int fd, __u32 key, struct record *rec) +{ + /* For percpu maps, userspace gets a value per possible CPU */ + unsigned int nr_cpus = bpf_num_possible_cpus(); + struct datarec values[nr_cpus]; + __u64 sum_processed = 0; + __u64 sum_issue = 0; + int i; + + if ((bpf_map_lookup_elem(fd, &key, values)) != 0) { + fprintf(stderr, + "ERR: bpf_map_lookup_elem failed key:0x%X\n", key); + return false; + } + /* Get time as close as possible to reading map contents */ + rec->timestamp = gettime(); + + /* Record and sum values from each CPU */ + for (i = 0; i < nr_cpus; i++) { + rec->cpu[i].processed = values[i].processed; + sum_processed += values[i].processed; + rec->cpu[i].issue = values[i].issue; + sum_issue += values[i].issue; + } + rec->total.processed = sum_processed; + rec->total.issue = sum_issue; + return true; +} + +static void stats_collect(struct stats_record *rec) +{ + int fd, i, max_rxqs; + + fd = map_data[1].fd; /* map: stats_global_map */ + map_collect_percpu(fd, 0, &rec->stats); + + fd = map_data[2].fd; /* map: rx_queue_index_map */ + max_rxqs = map_data[2].def.max_entries; + for (i = 0; i < max_rxqs; i++) + map_collect_percpu(fd, i, &rec->rxq[i]); +} + +static double calc_period(struct record *r, struct record *p) +{ + double period_ = 0; + __u64 period = 0; + + period = r->timestamp - p->timestamp; + if (period > 0) + period_ = ((double) period / NANOSEC_PER_SEC); + + return period_; +} + +static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_) +{ + __u64 packets = 0; + __u64 pps = 0; + + if (period_ > 0) { + packets = r->processed - p->processed; + pps = packets / period_; + } + return pps; +} + +static __u64 calc_errs_pps(struct datarec *r, + struct datarec *p, double period_) +{ + __u64 packets = 0; + __u64 pps = 0; + + if (period_ > 0) { + packets = r->issue - p->issue; + pps = packets / period_; + } + return pps; +} + +static void stats_print(struct stats_record *stats_rec, + struct stats_record *stats_prev, + int action) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + unsigned int nr_rxqs = map_data[2].def.max_entries; + double pps = 0, err = 0; + struct record *rec, *prev; + double t; + int rxq; + int i; + + /* Header */ + printf("\nRunning XDP on dev:%s (ifindex:%d) action:%s\n", + ifname, ifindex, action2str(action)); + + /* stats_global_map */ + { + char *fmt_rx = "%-15s %-7d %'-11.0f %'-10.0f %s\n"; + char *fm2_rx = "%-15s %-7s %'-11.0f\n"; + char *errstr = ""; + + printf("%-15s %-7s %-11s %-11s\n", + "XDP stats", "CPU", "pps", "issue-pps"); + + rec = &stats_rec->stats; + prev = &stats_prev->stats; + t = calc_period(rec, prev); + for (i = 0; i < nr_cpus; i++) { + struct datarec *r = &rec->cpu[i]; + struct datarec *p = &prev->cpu[i]; + + pps = calc_pps (r, p, t); + err = calc_errs_pps(r, p, t); + if (err > 0) + errstr = "invalid-ifindex"; + if (pps > 0) + printf(fmt_rx, "XDP-RX CPU", + i, pps, err, errstr); + } + pps = calc_pps (&rec->total, &prev->total, t); + err = calc_errs_pps(&rec->total, &prev->total, t); + printf(fm2_rx, "XDP-RX CPU", "total", pps, err); + } + + /* rx_queue_index_map */ + printf("\n%-15s %-7s %-11s %-11s\n", + "RXQ stats", "RXQ:CPU", "pps", "issue-pps"); + + for (rxq = 0; rxq < nr_rxqs; rxq++) { + char *fmt_rx = "%-15s %3d:%-3d %'-11.0f %'-10.0f %s\n"; + char *fm2_rx = "%-15s %3d:%-3s %'-11.0f\n"; + char *errstr = ""; + int rxq_ = rxq; + + /* Last RXQ in map catch overflows */ + if (rxq_ == nr_rxqs - 1) + rxq_ = -1; + + rec = &stats_rec->rxq[rxq]; + prev = &stats_prev->rxq[rxq]; + t = calc_period(rec, prev); + for (i = 0; i < nr_cpus; i++) { + struct datarec *r = &rec->cpu[i]; + struct datarec *p = &prev->cpu[i]; + + pps = calc_pps (r, p, t); + err = calc_errs_pps(r, p, t); + if (err > 0) { + if (rxq_ == -1) + errstr = "map-overflow-RXQ"; + else + errstr = "err"; + } + if (pps > 0) + printf(fmt_rx, "rx_queue_index", + rxq_, i, pps, err, errstr); + } + pps = calc_pps (&rec->total, &prev->total, t); + err = calc_errs_pps(&rec->total, &prev->total, t); + if (pps || err) + printf(fm2_rx, "rx_queue_index", rxq_, "sum", pps, err); + } +} + + +/* Pointer swap trick */ +static inline void swap(struct stats_record **a, struct stats_record **b) +{ + struct stats_record *tmp; + + tmp = *a; + *a = *b; + *b = tmp; +} + +static void stats_poll(int interval, int action) +{ + struct stats_record *record, *prev; + + record = alloc_stats_record(); + prev = alloc_stats_record(); + stats_collect(record); + + while (1) { + swap(&prev, &record); + stats_collect(record); + stats_print(record, prev, action); + sleep(interval); + } + + free_stats_record(record); + free_stats_record(prev); +} + + +int main(int argc, char **argv) +{ + struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; + bool use_separators = true; + struct config cfg = { 0 }; + char filename[256]; + int longindex = 0; + int interval = 2; + __u32 key = 0; + int opt, err; + + char action_str_buf[XDP_ACTION_MAX_STRLEN + 1 /* for \0 */] = { 0 }; + int action = XDP_PASS; /* Default action */ + char *action_str = NULL; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } + + if (load_bpf_file(filename)) { + fprintf(stderr, "ERR in load_bpf_file(): %s", bpf_log_buf); + return EXIT_FAIL; + } + + if (!prog_fd[0]) { + fprintf(stderr, "ERR: load_bpf_file: %s\n", strerror(errno)); + return EXIT_FAIL; + } + + /* Parse commands line args */ + while ((opt = getopt_long(argc, argv, "hSd:", + long_options, &longindex)) != -1) { + switch (opt) { + case 'd': + if (strlen(optarg) >= IF_NAMESIZE) { + fprintf(stderr, "ERR: --dev name too long\n"); + goto error; + } + ifname = (char *)&ifname_buf; + strncpy(ifname, optarg, IF_NAMESIZE); + ifindex = if_nametoindex(ifname); + if (ifindex == 0) { + fprintf(stderr, + "ERR: --dev name unknown err(%d):%s\n", + errno, strerror(errno)); + goto error; + } + break; + case 's': + interval = atoi(optarg); + break; + case 'S': + xdp_flags |= XDP_FLAGS_SKB_MODE; + break; + case 'z': + use_separators = false; + break; + case 'a': + action_str = (char *)&action_str_buf; + strncpy(action_str, optarg, XDP_ACTION_MAX_STRLEN); + break; + case 'h': + error: + default: + usage(argv); + return EXIT_FAIL_OPTION; + } + } + /* Required option */ + if (ifindex == -1) { + fprintf(stderr, "ERR: required option --dev missing\n"); + usage(argv); + return EXIT_FAIL_OPTION; + } + cfg.ifindex = ifindex; + + /* Parse action string */ + if (action_str) { + action = parse_xdp_action(action_str); + if (action < 0) { + fprintf(stderr, "ERR: Invalid XDP --action: %s\n", + action_str); + list_xdp_actions(); + return EXIT_FAIL_OPTION; + } + } + cfg.action = action; + + /* Trick to pretty printf with thousands separators use %' */ + if (use_separators) + setlocale(LC_NUMERIC, "en_US"); + + /* User-side setup ifindex in config_map */ + err = bpf_map_update_elem(map_fd[0], &key, &cfg, 0); + if (err) { + fprintf(stderr, "Store config failed (err:%d)\n", err); + exit(EXIT_FAIL_BPF); + } + + /* Remove XDP program when program is interrupted */ + signal(SIGINT, int_exit); + + if (set_link_xdp_fd(ifindex, prog_fd[0], xdp_flags) < 0) { + fprintf(stderr, "link set xdp fd failed\n"); + return EXIT_FAIL_XDP; + } + + stats_poll(interval, action); + return EXIT_OK; +} diff --git a/security/Kconfig b/security/Kconfig index e8e449444e65..3d4debd0257e 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -54,6 +54,17 @@ config SECURITY_NETWORK implement socket and networking access controls. If you are unsure how to answer this question, answer N. +config PAGE_TABLE_ISOLATION + bool "Remove the kernel mapping in user mode" + default y + depends on X86_64 && !UML + help + This feature reduces the number of hardware side channels by + ensuring that the majority of kernel addresses are not mapped + into userspace. + + See Documentation/x86/pagetable-isolation.txt for more details. + config SECURITY_INFINIBAND bool "Infiniband Security Hooks" depends on SECURITY && INFINIBAND diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index ed9b4d0f9f7e..8c558cbce930 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -329,6 +329,9 @@ static int match_mnt_path_str(struct aa_profile *profile, AA_BUG(!mntpath); AA_BUG(!buffer); + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) + return 0; + error = aa_path_name(mntpath, path_flags(profile, mntpath), buffer, &mntpnt, &info, profile->disconnected); if (error) @@ -380,6 +383,9 @@ static int match_mnt(struct aa_profile *profile, const struct path *path, AA_BUG(!profile); AA_BUG(devpath && !devbuffer); + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) + return 0; + if (devpath) { error = aa_path_name(devpath, path_flags(profile, devpath), devbuffer, &devname, &info, @@ -558,6 +564,9 @@ static int profile_umount(struct aa_profile *profile, struct path *path, AA_BUG(!profile); AA_BUG(!path); + if (!PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) + return 0; + error = aa_path_name(path, path_flags(profile, path), buffer, &name, &info, profile->disconnected); if (error) @@ -613,7 +622,8 @@ static struct aa_label *build_pivotroot(struct aa_profile *profile, AA_BUG(!new_path); AA_BUG(!old_path); - if (profile_unconfined(profile)) + if (profile_unconfined(profile) || + !PROFILE_MEDIATES(profile, AA_CLASS_MOUNT)) return aa_get_newest_label(&profile->label); error = aa_path_name(old_path, path_flags(profile, old_path), diff --git a/security/commoncap.c b/security/commoncap.c index 4f8e09340956..48620c93d697 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -348,21 +348,18 @@ static __u32 sansflags(__u32 m) return m & ~VFS_CAP_FLAGS_EFFECTIVE; } -static bool is_v2header(size_t size, __le32 magic) +static bool is_v2header(size_t size, const struct vfs_cap_data *cap) { - __u32 m = le32_to_cpu(magic); if (size != XATTR_CAPS_SZ_2) return false; - return sansflags(m) == VFS_CAP_REVISION_2; + return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_2; } -static bool is_v3header(size_t size, __le32 magic) +static bool is_v3header(size_t size, const struct vfs_cap_data *cap) { - __u32 m = le32_to_cpu(magic); - if (size != XATTR_CAPS_SZ_3) return false; - return sansflags(m) == VFS_CAP_REVISION_3; + return sansflags(le32_to_cpu(cap->magic_etc)) == VFS_CAP_REVISION_3; } /* @@ -405,7 +402,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, fs_ns = inode->i_sb->s_user_ns; cap = (struct vfs_cap_data *) tmpbuf; - if (is_v2header((size_t) ret, cap->magic_etc)) { + if (is_v2header((size_t) ret, cap)) { /* If this is sizeof(vfs_cap_data) then we're ok with the * on-disk value, so return that. */ if (alloc) @@ -413,7 +410,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, else kfree(tmpbuf); return ret; - } else if (!is_v3header((size_t) ret, cap->magic_etc)) { + } else if (!is_v3header((size_t) ret, cap)) { kfree(tmpbuf); return -EINVAL; } @@ -470,9 +467,9 @@ static kuid_t rootid_from_xattr(const void *value, size_t size, return make_kuid(task_ns, rootid); } -static bool validheader(size_t size, __le32 magic) +static bool validheader(size_t size, const struct vfs_cap_data *cap) { - return is_v2header(size, magic) || is_v3header(size, magic); + return is_v2header(size, cap) || is_v3header(size, cap); } /* @@ -495,7 +492,7 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size) if (!*ivalue) return -EINVAL; - if (!validheader(size, cap->magic_etc)) + if (!validheader(size, cap)) return -EINVAL; if (!capable_wrt_inode_uidgid(inode, CAP_SETFCAP)) return -EPERM; diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 038a180d3f81..cbe818eda336 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -325,7 +325,7 @@ static int hdac_component_master_match(struct device *dev, void *data) */ int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops) { - if (WARN_ON(!hdac_acomp)) + if (!hdac_acomp) return -ENODEV; hdac_acomp->audio_ops = aops; diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index a81aacf684b2..37e1cf8218ff 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -271,6 +271,8 @@ enum { CXT_FIXUP_HP_SPECTRE, CXT_FIXUP_HP_GATE_MIC, CXT_FIXUP_MUTE_LED_GPIO, + CXT_FIXUP_HEADSET_MIC, + CXT_FIXUP_HP_MIC_NO_PRESENCE, }; /* for hda_fixup_thinkpad_acpi() */ @@ -350,6 +352,18 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec, } } +static void cxt_fixup_headset_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct conexant_spec *spec = codec->spec; + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC; + break; + } +} + /* OPLC XO 1.5 fixup */ /* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors) @@ -880,6 +894,19 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_mute_led_gpio, }, + [CXT_FIXUP_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = cxt_fixup_headset_mic, + }, + [CXT_FIXUP_HP_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1a, 0x02a1113c }, + { } + }, + .chained = true, + .chain_id = CXT_FIXUP_HEADSET_MIC, + }, }; static const struct snd_pci_quirk cxt5045_fixups[] = { @@ -934,6 +961,8 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 6a4db00511ab..8fd2d9c62c96 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -324,8 +324,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0292: alc_update_coef_idx(codec, 0x4, 1<<15, 0); break; - case 0x10ec0215: case 0x10ec0225: + case 0x10ec0295: + case 0x10ec0299: + alc_update_coef_idx(codec, 0x67, 0xf000, 0x3000); + /* fallthrough */ + case 0x10ec0215: case 0x10ec0233: case 0x10ec0236: case 0x10ec0255: @@ -336,10 +340,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0286: case 0x10ec0288: case 0x10ec0285: - case 0x10ec0295: case 0x10ec0298: case 0x10ec0289: - case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; case 0x10ec0275: @@ -6328,6 +6330,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), @@ -6586,6 +6589,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1b, 0x01011020}, {0x21, 0x02211010}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x90a60130}, + {0x14, 0x90170110}, + {0x1b, 0x01011020}, + {0x21, 0x0221101f}), + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x21, 0x02211030}), diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index 9f521a55d610..b5e41df6bb3a 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -1051,6 +1051,11 @@ static int acp_audio_probe(struct platform_device *pdev) struct resource *res; const u32 *pdata = pdev->dev.platform_data; + if (!pdata) { + dev_err(&pdev->dev, "Missing platform data\n"); + return -ENODEV; + } + audio_drv_data = devm_kzalloc(&pdev->dev, sizeof(struct audio_drv_data), GFP_KERNEL); if (audio_drv_data == NULL) @@ -1058,6 +1063,8 @@ static int acp_audio_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); audio_drv_data->acp_mmio = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(audio_drv_data->acp_mmio)) + return PTR_ERR(audio_drv_data->acp_mmio); /* The following members gets populated in device 'open' * function. Till then interrupts are disabled in 'acp_init' diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig index 4a56f3dfba51..dcee145dd179 100644 --- a/sound/soc/atmel/Kconfig +++ b/sound/soc/atmel/Kconfig @@ -64,7 +64,7 @@ config SND_AT91_SOC_SAM9X5_WM8731 config SND_ATMEL_SOC_CLASSD tristate "Atmel ASoC driver for boards using CLASSD" depends on ARCH_AT91 || COMPILE_TEST - select SND_ATMEL_SOC_DMA + select SND_SOC_GENERIC_DMAENGINE_PCM select REGMAP_MMIO help Say Y if you want to add support for Atmel ASoC driver for boards using diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c index b2d42ec1dcd9..56564ce90cb6 100644 --- a/sound/soc/codecs/da7218.c +++ b/sound/soc/codecs/da7218.c @@ -2520,7 +2520,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec) } if (da7218->dev_id == DA7218_DEV_ID) { - hpldet_np = of_find_node_by_name(np, "da7218_hpldet"); + hpldet_np = of_get_child_by_name(np, "da7218_hpldet"); if (!hpldet_np) return pdata; diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c index 5f3c42c4f74a..066ea2f4ce7b 100644 --- a/sound/soc/codecs/msm8916-wcd-analog.c +++ b/sound/soc/codecs/msm8916-wcd-analog.c @@ -267,7 +267,7 @@ #define MSM8916_WCD_ANALOG_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000) #define MSM8916_WCD_ANALOG_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE) + SNDRV_PCM_FMTBIT_S32_LE) static int btn_mask = SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_BTN_4; diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c index a10a724eb448..13354d6304a8 100644 --- a/sound/soc/codecs/msm8916-wcd-digital.c +++ b/sound/soc/codecs/msm8916-wcd-digital.c @@ -194,7 +194,7 @@ SNDRV_PCM_RATE_32000 | \ SNDRV_PCM_RATE_48000) #define MSM8916_WCD_DIGITAL_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ - SNDRV_PCM_FMTBIT_S24_LE) + SNDRV_PCM_FMTBIT_S32_LE) struct msm8916_wcd_digital_priv { struct clk *ahbclk, *mclk; @@ -645,7 +645,7 @@ static int msm8916_wcd_digital_hw_params(struct snd_pcm_substream *substream, RX_I2S_CTL_RX_I2S_MODE_MASK, RX_I2S_CTL_RX_I2S_MODE_16); break; - case SNDRV_PCM_FORMAT_S24_LE: + case SNDRV_PCM_FORMAT_S32_LE: snd_soc_update_bits(dai->codec, LPASS_CDC_CLK_TX_I2S_CTL, TX_I2S_CTL_TX_I2S_MODE_MASK, TX_I2S_CTL_TX_I2S_MODE_32); diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c index 714ce17da717..e853a6dfd33b 100644 --- a/sound/soc/codecs/nau8825.c +++ b/sound/soc/codecs/nau8825.c @@ -905,6 +905,7 @@ static int nau8825_adc_event(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_POST_PMU: + msleep(125); regmap_update_bits(nau8825->regmap, NAU8825_REG_ENA_CTRL, NAU8825_ENABLE_ADC, NAU8825_ENABLE_ADC); break; diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c index 2df91db765ac..64bf26cec20d 100644 --- a/sound/soc/codecs/rt5514-spi.c +++ b/sound/soc/codecs/rt5514-spi.c @@ -289,6 +289,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform) dev_err(&rt5514_spi->dev, "%s Failed to reguest IRQ: %d\n", __func__, ret); + else + device_init_wakeup(rt5514_dsp->dev, true); } return 0; @@ -456,8 +458,6 @@ static int rt5514_spi_probe(struct spi_device *spi) return ret; } - device_init_wakeup(&spi->dev, true); - return 0; } @@ -482,10 +482,13 @@ static int __maybe_unused rt5514_resume(struct device *dev) if (device_may_wakeup(dev)) disable_irq_wake(irq); - if (rt5514_dsp->substream) { - rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf)); - if (buf[0] & RT5514_IRQ_STATUS_BIT) - rt5514_schedule_copy(rt5514_dsp); + if (rt5514_dsp) { + if (rt5514_dsp->substream) { + rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, + sizeof(buf)); + if (buf[0] & RT5514_IRQ_STATUS_BIT) + rt5514_schedule_copy(rt5514_dsp); + } } return 0; diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index 2a5b5d74e697..2dd6e9f990a4 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c @@ -496,7 +496,7 @@ static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = { SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0), - SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_SUPPLY_S("DMIC CLK", 1, SND_SOC_NOPM, 0, 0, rt5514_set_dmic_clk, SND_SOC_DAPM_PRE_PMU), SND_SOC_DAPM_SUPPLY("ADC CLK", RT5514_CLK_CTRL1, diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index f020d2d1eef4..edc152c8a1fe 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -3823,6 +3823,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, regmap_read(regmap, RT5645_VENDOR_ID, &val); rt5645->v_id = val & 0xff; + regmap_write(rt5645->regmap, RT5645_AD_DA_MIXER, 0x8080); + ret = regmap_register_patch(rt5645->regmap, init_list, ARRAY_SIZE(init_list)); if (ret != 0) diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c index b036c9dc0c8c..d329bf719d80 100644 --- a/sound/soc/codecs/rt5663.c +++ b/sound/soc/codecs/rt5663.c @@ -1560,6 +1560,10 @@ static int rt5663_jack_detect(struct snd_soc_codec *codec, int jack_insert) RT5663_IRQ_POW_SAV_MASK, RT5663_IRQ_POW_SAV_EN); snd_soc_update_bits(codec, RT5663_IRQ_1, RT5663_EN_IRQ_JD1_MASK, RT5663_EN_IRQ_JD1_EN); + snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1, + RT5663_EM_JD_MASK, RT5663_EM_JD_RST); + snd_soc_update_bits(codec, RT5663_EM_JACK_TYPE_1, + RT5663_EM_JD_MASK, RT5663_EM_JD_NOR); while (true) { regmap_read(rt5663->regmap, RT5663_INT_ST_2, &val); diff --git a/sound/soc/codecs/rt5663.h b/sound/soc/codecs/rt5663.h index c5a9b69579ad..03adc8004ba9 100644 --- a/sound/soc/codecs/rt5663.h +++ b/sound/soc/codecs/rt5663.h @@ -1029,6 +1029,10 @@ #define RT5663_POL_EXT_JD_SHIFT 10 #define RT5663_POL_EXT_JD_EN (0x1 << 10) #define RT5663_POL_EXT_JD_DIS (0x0 << 10) +#define RT5663_EM_JD_MASK (0x1 << 7) +#define RT5663_EM_JD_SHIFT 7 +#define RT5663_EM_JD_NOR (0x1 << 7) +#define RT5663_EM_JD_RST (0x0 << 7) /* DACREF LDO Control (0x0112)*/ #define RT5663_PWR_LDO_DACREFL_MASK (0x1 << 9) diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h index 730fb2058869..1ff3edb7bbb6 100644 --- a/sound/soc/codecs/tlv320aic31xx.h +++ b/sound/soc/codecs/tlv320aic31xx.h @@ -116,7 +116,7 @@ struct aic31xx_pdata { /* INT2 interrupt control */ #define AIC31XX_INT2CTRL AIC31XX_REG(0, 49) /* GPIO1 control */ -#define AIC31XX_GPIO1 AIC31XX_REG(0, 50) +#define AIC31XX_GPIO1 AIC31XX_REG(0, 51) #define AIC31XX_DACPRB AIC31XX_REG(0, 60) /* ADC Instruction Set Register */ diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index c482b2e7a7d2..cfe72b9d4356 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c @@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec) struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); struct device_node *twl4030_codec_node = NULL; - twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node, + twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node, "codec"); if (!pdata && twl4030_codec_node) { @@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec) GFP_KERNEL); if (!pdata) { dev_err(codec->dev, "Can not allocate memory\n"); + of_node_put(twl4030_codec_node); return NULL; } twl4030_setup_pdata_of(pdata, twl4030_codec_node); + of_node_put(twl4030_codec_node); } return pdata; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 65c059b5ffd7..66e32f5d2917 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -1733,7 +1733,7 @@ static int wm_adsp_load(struct wm_adsp *dsp) le64_to_cpu(footer->timestamp)); while (pos < firmware->size && - pos - firmware->size > sizeof(*region)) { + sizeof(*region) < firmware->size - pos) { region = (void *)&(firmware->data[pos]); region_name = "Unknown"; reg = 0; @@ -1782,8 +1782,8 @@ static int wm_adsp_load(struct wm_adsp *dsp) regions, le32_to_cpu(region->len), offset, region_name); - if ((pos + le32_to_cpu(region->len) + sizeof(*region)) > - firmware->size) { + if (le32_to_cpu(region->len) > + firmware->size - pos - sizeof(*region)) { adsp_err(dsp, "%s.%d: %s region len %d bytes exceeds file length %zu\n", file, regions, region_name, @@ -2253,7 +2253,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) blocks = 0; while (pos < firmware->size && - pos - firmware->size > sizeof(*blk)) { + sizeof(*blk) < firmware->size - pos) { blk = (void *)(&firmware->data[pos]); type = le16_to_cpu(blk->type); @@ -2327,8 +2327,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) } if (reg) { - if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) > - firmware->size) { + if (le32_to_cpu(blk->len) > + firmware->size - pos - sizeof(*blk)) { adsp_err(dsp, "%s.%d: %s region len %d bytes exceeds file length %zu\n", file, blocks, region_name, diff --git a/sound/soc/fsl/fsl_asrc.h b/sound/soc/fsl/fsl_asrc.h index 0f163abe4ba3..52c27a358933 100644 --- a/sound/soc/fsl/fsl_asrc.h +++ b/sound/soc/fsl/fsl_asrc.h @@ -260,8 +260,8 @@ #define ASRFSTi_OUTPUT_FIFO_SHIFT 12 #define ASRFSTi_OUTPUT_FIFO_MASK (((1 << ASRFSTi_OUTPUT_FIFO_WIDTH) - 1) << ASRFSTi_OUTPUT_FIFO_SHIFT) #define ASRFSTi_IAEi_SHIFT 11 -#define ASRFSTi_IAEi_MASK (1 << ASRFSTi_OAFi_SHIFT) -#define ASRFSTi_IAEi (1 << ASRFSTi_OAFi_SHIFT) +#define ASRFSTi_IAEi_MASK (1 << ASRFSTi_IAEi_SHIFT) +#define ASRFSTi_IAEi (1 << ASRFSTi_IAEi_SHIFT) #define ASRFSTi_INPUT_FIFO_WIDTH 7 #define ASRFSTi_INPUT_FIFO_SHIFT 0 #define ASRFSTi_INPUT_FIFO_MASK ((1 << ASRFSTi_INPUT_FIFO_WIDTH) - 1) diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index f2f51e06e22c..424bafaf51ef 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -38,6 +38,7 @@ #include <linux/ctype.h> #include <linux/device.h> #include <linux/delay.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> @@ -265,6 +266,8 @@ struct fsl_ssi_private { u32 fifo_watermark; u32 dma_maxburst; + + struct mutex ac97_reg_lock; }; /* @@ -1260,11 +1263,13 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg, if (reg > 0x7f) return; + mutex_lock(&fsl_ac97_data->ac97_reg_lock); + ret = clk_prepare_enable(fsl_ac97_data->clk); if (ret) { pr_err("ac97 write clk_prepare_enable failed: %d\n", ret); - return; + goto ret_unlock; } lreg = reg << 12; @@ -1278,6 +1283,9 @@ static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg, udelay(100); clk_disable_unprepare(fsl_ac97_data->clk); + +ret_unlock: + mutex_unlock(&fsl_ac97_data->ac97_reg_lock); } static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97, @@ -1285,16 +1293,18 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97, { struct regmap *regs = fsl_ac97_data->regs; - unsigned short val = -1; + unsigned short val = 0; u32 reg_val; unsigned int lreg; int ret; + mutex_lock(&fsl_ac97_data->ac97_reg_lock); + ret = clk_prepare_enable(fsl_ac97_data->clk); if (ret) { pr_err("ac97 read clk_prepare_enable failed: %d\n", ret); - return -1; + goto ret_unlock; } lreg = (reg & 0x7f) << 12; @@ -1309,6 +1319,8 @@ static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97, clk_disable_unprepare(fsl_ac97_data->clk); +ret_unlock: + mutex_unlock(&fsl_ac97_data->ac97_reg_lock); return val; } @@ -1458,12 +1470,6 @@ static int fsl_ssi_probe(struct platform_device *pdev) sizeof(fsl_ssi_ac97_dai)); fsl_ac97_data = ssi_private; - - ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); - if (ret) { - dev_err(&pdev->dev, "could not set AC'97 ops\n"); - return ret; - } } else { /* Initialize this copy of the CPU DAI driver structure */ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template, @@ -1574,6 +1580,15 @@ static int fsl_ssi_probe(struct platform_device *pdev) return ret; } + if (fsl_ssi_is_ac97(ssi_private)) { + mutex_init(&ssi_private->ac97_reg_lock); + ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev); + if (ret) { + dev_err(&pdev->dev, "could not set AC'97 ops\n"); + goto error_ac97_ops; + } + } + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component, &ssi_private->cpu_dai_drv, 1); if (ret) { @@ -1657,6 +1672,13 @@ error_sound_card: fsl_ssi_debugfs_remove(&ssi_private->dbg_stats); error_asoc_register: + if (fsl_ssi_is_ac97(ssi_private)) + snd_soc_set_ac97_ops(NULL); + +error_ac97_ops: + if (fsl_ssi_is_ac97(ssi_private)) + mutex_destroy(&ssi_private->ac97_reg_lock); + if (ssi_private->soc->imx) fsl_ssi_imx_clean(pdev, ssi_private); @@ -1675,8 +1697,10 @@ static int fsl_ssi_remove(struct platform_device *pdev) if (ssi_private->soc->imx) fsl_ssi_imx_clean(pdev, ssi_private); - if (fsl_ssi_is_ac97(ssi_private)) + if (fsl_ssi_is_ac97(ssi_private)) { snd_soc_set_ac97_ops(NULL); + mutex_destroy(&ssi_private->ac97_reg_lock); + } return 0; } diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c index 6f9a8bcf20f3..6dcad0a8a0d0 100644 --- a/sound/soc/intel/boards/kbl_rt5663_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c @@ -101,7 +101,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = { { "ssp0 Tx", NULL, "spk_out" }, { "AIF Playback", NULL, "ssp1 Tx" }, - { "ssp1 Tx", NULL, "hs_out" }, + { "ssp1 Tx", NULL, "codec1_out" }, { "hs_in", NULL, "ssp1 Rx" }, { "ssp1 Rx", NULL, "AIF Capture" }, diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c index 6072164f2d43..271ae3c2c535 100644 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c @@ -109,7 +109,7 @@ static const struct snd_soc_dapm_route kabylake_map[] = { { "ssp0 Tx", NULL, "spk_out" }, { "AIF Playback", NULL, "ssp1 Tx" }, - { "ssp1 Tx", NULL, "hs_out" }, + { "ssp1 Tx", NULL, "codec1_out" }, { "hs_in", NULL, "ssp1 Rx" }, { "ssp1 Rx", NULL, "AIF Capture" }, diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index d14c50a60289..3eaac41090ca 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -119,11 +119,16 @@ static bool skl_check_ep_match(struct device *dev, struct nhlt_endpoint *epnt, if ((epnt->virtual_bus_id == instance_id) && (epnt->linktype == link_type) && - (epnt->direction == dirn) && - (epnt->device_type == dev_type)) - return true; - else - return false; + (epnt->direction == dirn)) { + /* do not check dev_type for DMIC link type */ + if (epnt->linktype == NHLT_LINK_DMIC) + return true; + + if (epnt->device_type == dev_type) + return true; + } + + return false; } struct nhlt_specific_cfg diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c index a072bcf209d2..81923da18ac2 100644 --- a/sound/soc/intel/skylake/skl-topology.c +++ b/sound/soc/intel/skylake/skl-topology.c @@ -2908,7 +2908,7 @@ static int skl_tplg_control_load(struct snd_soc_component *cmpnt, break; default: - dev_warn(bus->dev, "Control load not supported %d:%d:%d\n", + dev_dbg(bus->dev, "Control load not supported %d:%d:%d\n", hdr->ops.get, hdr->ops.put, hdr->ops.info); break; } diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index ee5055d47d13..a89fe9b6463b 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c @@ -322,26 +322,30 @@ static int rk_spdif_probe(struct platform_device *pdev) spdif->mclk = devm_clk_get(&pdev->dev, "mclk"); if (IS_ERR(spdif->mclk)) { dev_err(&pdev->dev, "Can't retrieve rk_spdif master clock\n"); - return PTR_ERR(spdif->mclk); + ret = PTR_ERR(spdif->mclk); + goto err_disable_hclk; } ret = clk_prepare_enable(spdif->mclk); if (ret) { dev_err(spdif->dev, "clock enable failed %d\n", ret); - return ret; + goto err_disable_clocks; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(regs)) - return PTR_ERR(regs); + if (IS_ERR(regs)) { + ret = PTR_ERR(regs); + goto err_disable_clocks; + } spdif->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "hclk", regs, &rk_spdif_regmap_config); if (IS_ERR(spdif->regmap)) { dev_err(&pdev->dev, "Failed to initialise managed register map\n"); - return PTR_ERR(spdif->regmap); + ret = PTR_ERR(spdif->regmap); + goto err_disable_clocks; } spdif->playback_dma_data.addr = res->start + SPDIF_SMPDR; @@ -373,6 +377,10 @@ static int rk_spdif_probe(struct platform_device *pdev) err_pm_runtime: pm_runtime_disable(&pdev->dev); +err_disable_clocks: + clk_disable_unprepare(spdif->mclk); +err_disable_hclk: + clk_disable_unprepare(spdif->hclk); return ret; } diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c index 8ddb08714faa..4672688cac32 100644 --- a/sound/soc/sh/rcar/adg.c +++ b/sound/soc/sh/rcar/adg.c @@ -222,7 +222,7 @@ int rsnd_adg_set_cmd_timsel_gen2(struct rsnd_mod *cmd_mod, NULL, &val, NULL); val = val << shift; - mask = 0xffff << shift; + mask = 0x0f1f << shift; rsnd_mod_bset(adg_mod, CMDOUT_TIMSEL, mask, val); @@ -250,7 +250,7 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod, in = in << shift; out = out << shift; - mask = 0xffff << shift; + mask = 0x0f1f << shift; switch (id / 2) { case 0: @@ -380,7 +380,7 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *ssi_mod, unsigned int rate) ckr = 0x80000000; } - rsnd_mod_bset(adg_mod, BRGCKR, 0x80FF0000, adg->ckr | ckr); + rsnd_mod_bset(adg_mod, BRGCKR, 0x80770000, adg->ckr | ckr); rsnd_mod_write(adg_mod, BRRA, adg->rbga); rsnd_mod_write(adg_mod, BRRB, adg->rbgb); diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index c70eb2097816..f12a88a21dfa 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1332,8 +1332,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd) return snd_pcm_lib_preallocate_pages_for_all( rtd->pcm, - SNDRV_DMA_TYPE_CONTINUOUS, - snd_dma_continuous_data(GFP_KERNEL), + SNDRV_DMA_TYPE_DEV, + rtd->card->snd_card->dev, PREALLOC_BUFFER, PREALLOC_BUFFER_MAX); } diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index fd557abfe390..4d750bdf8e24 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c @@ -26,10 +26,7 @@ struct rsnd_dmaen { struct dma_chan *chan; dma_cookie_t cookie; - dma_addr_t dma_buf; unsigned int dma_len; - unsigned int dma_period; - unsigned int dma_cnt; }; struct rsnd_dmapp { @@ -71,38 +68,10 @@ static struct rsnd_mod mem = { /* * Audio DMAC */ -#define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1) -#define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0) -static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io, - int i, int sync) -{ - struct device *dev = dmaen->chan->device->dev; - enum dma_data_direction dir; - int is_play = rsnd_io_is_play(io); - dma_addr_t buf; - int len, max; - size_t period; - - len = dmaen->dma_len; - period = dmaen->dma_period; - max = len / period; - i = i % max; - buf = dmaen->dma_buf + (period * i); - - dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - - if (sync) - dma_sync_single_for_device(dev, buf, period, dir); - else - dma_sync_single_for_cpu(dev, buf, period, dir); -} - static void __rsnd_dmaen_complete(struct rsnd_mod *mod, struct rsnd_dai_stream *io) { struct rsnd_priv *priv = rsnd_mod_to_priv(mod); - struct rsnd_dma *dma = rsnd_mod_to_dma(mod); - struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); bool elapsed = false; unsigned long flags; @@ -115,22 +84,9 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod, */ spin_lock_irqsave(&priv->lock, flags); - if (rsnd_io_is_working(io)) { - rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt); - - /* - * Next period is already started. - * Let's sync Next Next period - * see - * rsnd_dmaen_start() - */ - rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2); - + if (rsnd_io_is_working(io)) elapsed = true; - dmaen->dma_cnt++; - } - spin_unlock_irqrestore(&priv->lock, flags); if (elapsed) @@ -165,14 +121,8 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod, struct rsnd_dma *dma = rsnd_mod_to_dma(mod); struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma); - if (dmaen->chan) { - int is_play = rsnd_io_is_play(io); - + if (dmaen->chan) dmaengine_terminate_all(dmaen->chan); - dma_unmap_single(dmaen->chan->device->dev, - dmaen->dma_buf, dmaen->dma_len, - is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - } return 0; } @@ -237,11 +187,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod, struct device *dev = rsnd_priv_to_dev(priv); struct dma_async_tx_descriptor *desc; struct dma_slave_config cfg = {}; - dma_addr_t buf; - size_t len; - size_t period; int is_play = rsnd_io_is_play(io); - int i; int ret; cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; @@ -258,19 +204,10 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod, if (ret < 0) return ret; - len = snd_pcm_lib_buffer_bytes(substream); - period = snd_pcm_lib_period_bytes(substream); - buf = dma_map_single(dmaen->chan->device->dev, - substream->runtime->dma_area, - len, - is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (dma_mapping_error(dmaen->chan->device->dev, buf)) { - dev_err(dev, "dma map failed\n"); - return -EIO; - } - desc = dmaengine_prep_dma_cyclic(dmaen->chan, - buf, len, period, + substream->runtime->dma_addr, + snd_pcm_lib_buffer_bytes(substream), + snd_pcm_lib_period_bytes(substream), is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -282,18 +219,7 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod, desc->callback = rsnd_dmaen_complete; desc->callback_param = rsnd_mod_get(dma); - dmaen->dma_buf = buf; - dmaen->dma_len = len; - dmaen->dma_period = period; - dmaen->dma_cnt = 0; - - /* - * synchronize this and next period - * see - * __rsnd_dmaen_complete() - */ - for (i = 0; i < 2; i++) - rsnd_dmaen_sync(dmaen, io, i); + dmaen->dma_len = snd_pcm_lib_buffer_bytes(substream); dmaen->cookie = dmaengine_submit(desc); if (dmaen->cookie < 0) { diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index fece1e5f582f..cbf3bf312d23 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c @@ -446,25 +446,29 @@ static bool rsnd_ssi_pointer_update(struct rsnd_mod *mod, int byte) { struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); + bool ret = false; + int byte_pos; - ssi->byte_pos += byte; + byte_pos = ssi->byte_pos + byte; - if (ssi->byte_pos >= ssi->next_period_byte) { + if (byte_pos >= ssi->next_period_byte) { struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); ssi->period_pos++; ssi->next_period_byte += ssi->byte_per_period; if (ssi->period_pos >= runtime->periods) { - ssi->byte_pos = 0; + byte_pos = 0; ssi->period_pos = 0; ssi->next_period_byte = ssi->byte_per_period; } - return true; + ret = true; } - return false; + WRITE_ONCE(ssi->byte_pos, byte_pos); + + return ret; } /* @@ -838,7 +842,7 @@ static int rsnd_ssi_pointer(struct rsnd_mod *mod, struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod); struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); - *pointer = bytes_to_frames(runtime, ssi->byte_pos); + *pointer = bytes_to_frames(runtime, READ_ONCE(ssi->byte_pos)); return 0; } diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index 4d948757d300..6ff8a36c2c82 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c @@ -125,6 +125,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod, { int hdmi = rsnd_ssi_hdmi_port(io); int ret; + u32 mode = 0; ret = rsnd_ssiu_init(mod, io, priv); if (ret < 0) @@ -136,9 +137,11 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod, * see * rsnd_ssi_config_init() */ - rsnd_mod_write(mod, SSI_MODE, 0x1); + mode = 0x1; } + rsnd_mod_write(mod, SSI_MODE, mode); + if (rsnd_ssi_use_busif(io)) { rsnd_mod_write(mod, SSI_BUSIF_ADINR, rsnd_get_adinr_bit(mod, io) | diff --git a/tools/arch/s390/include/uapi/asm/perf_regs.h b/tools/arch/s390/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..d17dd9e5d516 --- /dev/null +++ b/tools/arch/s390/include/uapi/asm/perf_regs.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_S390_PERF_REGS_H +#define _ASM_S390_PERF_REGS_H + +enum perf_event_s390_regs { + PERF_REG_S390_R0, + PERF_REG_S390_R1, + PERF_REG_S390_R2, + PERF_REG_S390_R3, + PERF_REG_S390_R4, + PERF_REG_S390_R5, + PERF_REG_S390_R6, + PERF_REG_S390_R7, + PERF_REG_S390_R8, + PERF_REG_S390_R9, + PERF_REG_S390_R10, + PERF_REG_S390_R11, + PERF_REG_S390_R12, + PERF_REG_S390_R13, + PERF_REG_S390_R14, + PERF_REG_S390_R15, + PERF_REG_S390_FP0, + PERF_REG_S390_FP1, + PERF_REG_S390_FP2, + PERF_REG_S390_FP3, + PERF_REG_S390_FP4, + PERF_REG_S390_FP5, + PERF_REG_S390_FP6, + PERF_REG_S390_FP7, + PERF_REG_S390_FP8, + PERF_REG_S390_FP9, + PERF_REG_S390_FP10, + PERF_REG_S390_FP11, + PERF_REG_S390_FP12, + PERF_REG_S390_FP13, + PERF_REG_S390_FP14, + PERF_REG_S390_FP15, + PERF_REG_S390_MASK, + PERF_REG_S390_PC, + + PERF_REG_S390_MAX +}; + +#endif /* _ASM_S390_PERF_REGS_H */ diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 07a6697466ef..c8ec0ae16bf0 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -9,6 +9,35 @@ MAKE = make CFLAGS += -Wall -O2 CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + +FEATURE_USER = .bpf +FEATURE_TESTS = libbfd disassembler-four-args +FEATURE_DISPLAY = libbfd disassembler-four-args + +check_feat := 1 +NON_CHECK_FEAT_TARGETS := clean bpftool_clean +ifdef MAKECMDGOALS +ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),) + check_feat := 0 +endif +endif + +ifeq ($(check_feat),1) +ifeq ($(FEATURES_DUMP),) +include $(srctree)/tools/build/Makefile.feature +else +include $(FEATURES_DUMP) +endif +endif + +ifeq ($(feature-disassembler-four-args), 1) +CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE +endif + %.yacc.c: %.y $(YACC) -o $@ -d $< diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c index 75bf526a0168..30044bc4f389 100644 --- a/tools/bpf/bpf_jit_disasm.c +++ b/tools/bpf/bpf_jit_disasm.c @@ -72,7 +72,14 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes) disassemble_init_for_target(&info); +#ifdef DISASM_FOUR_ARGS_SIGNATURE + disassemble = disassembler(info.arch, + bfd_big_endian(bfdf), + info.mach, + bfdf); +#else disassemble = disassembler(bfdf); +#endif assert(disassemble); do { diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index 45c71b1f682b..2fe2a1bdbe3e 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst @@ -15,12 +15,12 @@ SYNOPSIS *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } } *COMMANDS* := - { **list** | **attach** | **detach** | **help** } + { **show** | **list** | **attach** | **detach** | **help** } MAP COMMANDS ============= -| **bpftool** **cgroup list** *CGROUP* +| **bpftool** **cgroup { show | list }** *CGROUP* | **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*] | **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG* | **bpftool** **cgroup help** @@ -31,7 +31,7 @@ MAP COMMANDS DESCRIPTION =========== - **bpftool cgroup list** *CGROUP* + **bpftool cgroup { show | list }** *CGROUP* List all programs attached to the cgroup *CGROUP*. Output will start with program ID followed by attach type, diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 421cabc417e6..0ab32b312aec 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -15,13 +15,13 @@ SYNOPSIS *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } } *COMMANDS* := - { **show** | **dump** | **update** | **lookup** | **getnext** | **delete** + { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete** | **pin** | **help** } MAP COMMANDS ============= -| **bpftool** **map show** [*MAP*] +| **bpftool** **map { show | list }** [*MAP*] | **bpftool** **map dump** *MAP* | **bpftool** **map update** *MAP* **key** *BYTES* **value** *VALUE* [*UPDATE_FLAGS*] | **bpftool** **map lookup** *MAP* **key** *BYTES* @@ -36,7 +36,7 @@ MAP COMMANDS DESCRIPTION =========== - **bpftool map show** [*MAP*] + **bpftool map { show | list }** [*MAP*] Show information about loaded maps. If *MAP* is specified show information only about given map, otherwise list all maps currently loaded on the system. diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 81c97c0e9b67..e4ceee7f2dff 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -15,12 +15,12 @@ SYNOPSIS *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } } *COMMANDS* := - { **show** | **dump xlated** | **dump jited** | **pin** | **load** | **help** } + { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** } MAP COMMANDS ============= -| **bpftool** **prog show** [*PROG*] +| **bpftool** **prog { show | list }** [*PROG*] | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] | **bpftool** **prog pin** *PROG* *FILE* @@ -31,7 +31,7 @@ MAP COMMANDS DESCRIPTION =========== - **bpftool prog show** [*PROG*] + **bpftool prog { show | list }** [*PROG*] Show information about loaded programs. If *PROG* is specified show information only about given program, otherwise list all programs currently loaded on the system. diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst index 6732a5a617e4..20689a321ffe 100644 --- a/tools/bpf/bpftool/Documentation/bpftool.rst +++ b/tools/bpf/bpftool/Documentation/bpftool.rst @@ -22,13 +22,13 @@ SYNOPSIS | { **-j** | **--json** } [{ **-p** | **--pretty** }] } *MAP-COMMANDS* := - { **show** | **dump** | **update** | **lookup** | **getnext** | **delete** + { **show** | **list** | **dump** | **update** | **lookup** | **getnext** | **delete** | **pin** | **help** } - *PROG-COMMANDS* := { **show** | **dump jited** | **dump xlated** | **pin** + *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** | **load** | **help** } - *CGROUP-COMMANDS* := { **list** | **attach** | **detach** | **help** } + *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** } DESCRIPTION =========== diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 3f17ad317512..2237bc43f71c 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -23,6 +23,8 @@ endif LIBBPF = $(BPF_PATH)libbpf.a +BPFTOOL_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion) + $(LIBBPF): FORCE $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) @@ -38,11 +40,36 @@ CC = gcc CFLAGS += -O2 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi -I$(srctree)/tools/include -I$(srctree)/tools/lib/bpf -I$(srctree)/kernel/bpf/ +CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"' LIBS = -lelf -lbfd -lopcodes $(LIBBPF) INSTALL ?= install RM ?= rm -f +FEATURE_USER = .bpftool +FEATURE_TESTS = libbfd disassembler-four-args +FEATURE_DISPLAY = libbfd disassembler-four-args + +check_feat := 1 +NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall +ifdef MAKECMDGOALS +ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),) + check_feat := 0 +endif +endif + +ifeq ($(check_feat),1) +ifeq ($(FEATURES_DUMP),) +include $(srctree)/tools/build/Makefile.feature +else +include $(FEATURES_DUMP) +endif +endif + +ifeq ($(feature-disassembler-four-args), 1) +CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE +endif + include $(wildcard *.d) all: $(OUTPUT)bpftool diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 7febee05c8e7..0137866bb8f6 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -197,7 +197,7 @@ _bpftool() local PROG_TYPE='id pinned tag' case $command in - show) + show|list) [[ $prev != "$command" ]] && return 0 COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) ) return 0 @@ -232,7 +232,7 @@ _bpftool() ;; *) [[ $prev == $object ]] && \ - COMPREPLY=( $( compgen -W 'dump help pin show' -- \ + COMPREPLY=( $( compgen -W 'dump help pin show list' -- \ "$cur" ) ) ;; esac @@ -240,7 +240,7 @@ _bpftool() map) local MAP_TYPE='id pinned' case $command in - show|dump) + show|list|dump) case $prev in $command) COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) @@ -343,7 +343,7 @@ _bpftool() *) [[ $prev == $object ]] && \ COMPREPLY=( $( compgen -W 'delete dump getnext help \ - lookup pin show update' -- "$cur" ) ) + lookup pin show list update' -- "$cur" ) ) ;; esac ;; diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index 34ca303d72bc..cae32a61cb18 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -41,7 +41,7 @@ static enum bpf_attach_type parse_attach_type(const char *str) return __MAX_BPF_ATTACH_TYPE; } -static int list_bpf_prog(int id, const char *attach_type_str, +static int show_bpf_prog(int id, const char *attach_type_str, const char *attach_flags_str) { struct bpf_prog_info info = {}; @@ -77,7 +77,7 @@ static int list_bpf_prog(int id, const char *attach_type_str, return 0; } -static int list_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) +static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) { __u32 prog_ids[1024] = {0}; char *attach_flags_str; @@ -111,29 +111,29 @@ static int list_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) } for (iter = 0; iter < prog_cnt; iter++) - list_bpf_prog(prog_ids[iter], attach_type_strings[type], + show_bpf_prog(prog_ids[iter], attach_type_strings[type], attach_flags_str); return 0; } -static int do_list(int argc, char **argv) +static int do_show(int argc, char **argv) { enum bpf_attach_type type; int cgroup_fd; int ret = -1; if (argc < 1) { - p_err("too few parameters for cgroup list\n"); + p_err("too few parameters for cgroup show"); goto exit; } else if (argc > 1) { - p_err("too many parameters for cgroup list\n"); + p_err("too many parameters for cgroup show"); goto exit; } cgroup_fd = open(argv[0], O_RDONLY); if (cgroup_fd < 0) { - p_err("can't open cgroup %s\n", argv[1]); + p_err("can't open cgroup %s", argv[1]); goto exit; } @@ -147,10 +147,10 @@ static int do_list(int argc, char **argv) /* * Not all attach types may be supported, so it's expected, * that some requests will fail. - * If we were able to get the list for at least one + * If we were able to get the show for at least one * attach type, let's return 0. */ - if (list_attached_bpf_progs(cgroup_fd, type) == 0) + if (show_attached_bpf_progs(cgroup_fd, type) == 0) ret = 0; } @@ -171,19 +171,19 @@ static int do_attach(int argc, char **argv) int i; if (argc < 4) { - p_err("too few parameters for cgroup attach\n"); + p_err("too few parameters for cgroup attach"); goto exit; } cgroup_fd = open(argv[0], O_RDONLY); if (cgroup_fd < 0) { - p_err("can't open cgroup %s\n", argv[1]); + p_err("can't open cgroup %s", argv[1]); goto exit; } attach_type = parse_attach_type(argv[1]); if (attach_type == __MAX_BPF_ATTACH_TYPE) { - p_err("invalid attach type\n"); + p_err("invalid attach type"); goto exit_cgroup; } @@ -199,7 +199,7 @@ static int do_attach(int argc, char **argv) } else if (is_prefix(argv[i], "override")) { attach_flags |= BPF_F_ALLOW_OVERRIDE; } else { - p_err("unknown option: %s\n", argv[i]); + p_err("unknown option: %s", argv[i]); goto exit_cgroup; } } @@ -229,13 +229,13 @@ static int do_detach(int argc, char **argv) int ret = -1; if (argc < 4) { - p_err("too few parameters for cgroup detach\n"); + p_err("too few parameters for cgroup detach"); goto exit; } cgroup_fd = open(argv[0], O_RDONLY); if (cgroup_fd < 0) { - p_err("can't open cgroup %s\n", argv[1]); + p_err("can't open cgroup %s", argv[1]); goto exit; } @@ -277,7 +277,7 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s list CGROUP\n" + "Usage: %s %s { show | list } CGROUP\n" " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n" " %s %s detach CGROUP ATTACH_TYPE PROG\n" " %s %s help\n" @@ -294,7 +294,8 @@ static int do_help(int argc, char **argv) } static const struct cmd cmds[] = { - { "list", do_list }, + { "show", do_show }, + { "list", do_show }, { "attach", do_attach }, { "detach", do_detach }, { "help", do_help }, diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index b62c94e3997a..6601c95a9258 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -44,7 +44,9 @@ #include <unistd.h> #include <linux/limits.h> #include <linux/magic.h> +#include <net/if.h> #include <sys/mount.h> +#include <sys/stat.h> #include <sys/types.h> #include <sys/vfs.h> @@ -412,3 +414,53 @@ void delete_pinned_obj_table(struct pinned_obj_table *tab) free(obj); } } + +static char * +ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf) +{ + struct stat st; + int err; + + err = stat("/proc/self/ns/net", &st); + if (err) { + p_err("Can't stat /proc/self: %s", strerror(errno)); + return NULL; + } + + if (st.st_dev != ns_dev || st.st_ino != ns_ino) + return NULL; + + return if_indextoname(ifindex, buf); +} + +void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode) +{ + char name[IF_NAMESIZE]; + + if (!ifindex) + return; + + printf(" dev "); + if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name)) + printf("%s", name); + else + printf("ifindex %u ns_dev %llu ns_ino %llu", + ifindex, ns_dev, ns_inode); +} + +void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode) +{ + char name[IF_NAMESIZE]; + + if (!ifindex) + return; + + jsonw_name(json_wtr, "dev"); + jsonw_start_object(json_wtr); + jsonw_uint_field(json_wtr, "ifindex", ifindex); + jsonw_uint_field(json_wtr, "ns_dev", ns_dev); + jsonw_uint_field(json_wtr, "ns_inode", ns_inode); + if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name)) + jsonw_string_field(json_wtr, "ifname", name); + jsonw_end_object(json_wtr); +} diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c index 1551d3918d4c..57d32e8a1391 100644 --- a/tools/bpf/bpftool/jit_disasm.c +++ b/tools/bpf/bpftool/jit_disasm.c @@ -107,7 +107,14 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes) disassemble_init_for_target(&info); +#ifdef DISASM_FOUR_ARGS_SIGNATURE + disassemble = disassembler(info.arch, + bfd_big_endian(bfdf), + info.mach, + bfdf); +#else disassemble = disassembler(bfdf); +#endif assert(disassemble); if (json_output) diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index ecd53ccf1239..3a0396d87c42 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -38,7 +38,6 @@ #include <errno.h> #include <getopt.h> #include <linux/bpf.h> -#include <linux/version.h> #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -95,21 +94,13 @@ static int do_help(int argc, char **argv) static int do_version(int argc, char **argv) { - unsigned int version[3]; - - version[0] = LINUX_VERSION_CODE >> 16; - version[1] = LINUX_VERSION_CODE >> 8 & 0xf; - version[2] = LINUX_VERSION_CODE & 0xf; - if (json_output) { jsonw_start_object(json_wtr); jsonw_name(json_wtr, "version"); - jsonw_printf(json_wtr, "\"%u.%u.%u\"", - version[0], version[1], version[2]); + jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION); jsonw_end_object(json_wtr); } else { - printf("%s v%u.%u.%u\n", bin_name, - version[0], version[1], version[2]); + printf("%s v%s\n", bin_name, BPFTOOL_VERSION); } return 0; } diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 8f6d3cac0347..65b526fe6e7e 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -96,6 +96,8 @@ struct pinned_obj { int build_pinned_obj_table(struct pinned_obj_table *table, enum bpf_obj_type type); void delete_pinned_obj_table(struct pinned_obj_table *tab); +void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); +void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); struct cmd { const char *cmd; diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index e2450c8e88e6..8d7db9d6b9cd 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -523,21 +523,23 @@ static int do_show(int argc, char **argv) break; p_err("can't get next map: %s%s", strerror(errno), errno == EINVAL ? " -- kernel too old?" : ""); - return -1; + break; } fd = bpf_map_get_fd_by_id(id); if (fd < 0) { + if (errno == ENOENT) + continue; p_err("can't get map by id (%u): %s", id, strerror(errno)); - return -1; + break; } err = bpf_obj_get_info_by_fd(fd, &info, &len); if (err) { p_err("can't get map info: %s", strerror(errno)); close(fd); - return -1; + break; } if (json_output) @@ -859,7 +861,7 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s show [MAP]\n" + "Usage: %s %s { show | list } [MAP]\n" " %s %s dump MAP\n" " %s %s update MAP key BYTES value VALUE [UPDATE_FLAGS]\n" " %s %s lookup MAP key BYTES\n" @@ -883,6 +885,7 @@ static int do_help(int argc, char **argv) static const struct cmd cmds[] = { { "show", do_show }, + { "list", do_show }, { "help", do_help }, { "dump", do_dump }, { "update", do_update }, diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 037484ceaeaf..c6a28be4665c 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -230,6 +230,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) info->tag[0], info->tag[1], info->tag[2], info->tag[3], info->tag[4], info->tag[5], info->tag[6], info->tag[7]); + print_dev_json(info->ifindex, info->netns_dev, info->netns_ino); + if (info->load_time) { char buf[32]; @@ -287,6 +289,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) printf("tag "); fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); + print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino); printf("\n"); if (info->load_time) { @@ -383,6 +386,8 @@ static int do_show(int argc, char **argv) fd = bpf_prog_get_fd_by_id(id); if (fd < 0) { + if (errno == ENOENT) + continue; p_err("can't get prog by id (%u): %s", id, strerror(errno)); err = -1; @@ -401,6 +406,88 @@ static int do_show(int argc, char **argv) return err; } +#define SYM_MAX_NAME 256 + +struct kernel_sym { + unsigned long address; + char name[SYM_MAX_NAME]; +}; + +struct dump_data { + unsigned long address_call_base; + struct kernel_sym *sym_mapping; + __u32 sym_count; + char scratch_buff[SYM_MAX_NAME]; +}; + +static int kernel_syms_cmp(const void *sym_a, const void *sym_b) +{ + return ((struct kernel_sym *)sym_a)->address - + ((struct kernel_sym *)sym_b)->address; +} + +static void kernel_syms_load(struct dump_data *dd) +{ + struct kernel_sym *sym; + char buff[256]; + void *tmp, *address; + FILE *fp; + + fp = fopen("/proc/kallsyms", "r"); + if (!fp) + return; + + while (!feof(fp)) { + if (!fgets(buff, sizeof(buff), fp)) + break; + tmp = realloc(dd->sym_mapping, + (dd->sym_count + 1) * + sizeof(*dd->sym_mapping)); + if (!tmp) { +out: + free(dd->sym_mapping); + dd->sym_mapping = NULL; + fclose(fp); + return; + } + dd->sym_mapping = tmp; + sym = &dd->sym_mapping[dd->sym_count]; + if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2) + continue; + sym->address = (unsigned long)address; + if (!strcmp(sym->name, "__bpf_call_base")) { + dd->address_call_base = sym->address; + /* sysctl kernel.kptr_restrict was set */ + if (!sym->address) + goto out; + } + if (sym->address) + dd->sym_count++; + } + + fclose(fp); + + qsort(dd->sym_mapping, dd->sym_count, + sizeof(*dd->sym_mapping), kernel_syms_cmp); +} + +static void kernel_syms_destroy(struct dump_data *dd) +{ + free(dd->sym_mapping); +} + +static struct kernel_sym *kernel_syms_search(struct dump_data *dd, + unsigned long key) +{ + struct kernel_sym sym = { + .address = key, + }; + + return dd->sym_mapping ? + bsearch(&sym, dd->sym_mapping, dd->sym_count, + sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL; +} + static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) { va_list args; @@ -410,8 +497,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) va_end(args); } -static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) +static const char *print_call_pcrel(struct dump_data *dd, + struct kernel_sym *sym, + unsigned long address, + const struct bpf_insn *insn) +{ + if (sym) + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "%+d#%s", insn->off, sym->name); + else + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "%+d#0x%lx", insn->off, address); + return dd->scratch_buff; +} + +static const char *print_call_helper(struct dump_data *dd, + struct kernel_sym *sym, + unsigned long address) +{ + if (sym) + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "%s", sym->name); + else + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "0x%lx", address); + return dd->scratch_buff; +} + +static const char *print_call(void *private_data, + const struct bpf_insn *insn) +{ + struct dump_data *dd = private_data; + unsigned long address = dd->address_call_base + insn->imm; + struct kernel_sym *sym; + + sym = kernel_syms_search(dd, address); + if (insn->src_reg == BPF_PSEUDO_CALL) + return print_call_pcrel(dd, sym, address, insn); + else + return print_call_helper(dd, sym, address); +} + +static const char *print_imm(void *private_data, + const struct bpf_insn *insn, + __u64 full_imm) { + struct dump_data *dd = private_data; + + if (insn->src_reg == BPF_PSEUDO_MAP_FD) + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "map[id:%u]", insn->imm); + else + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), + "0x%llx", (unsigned long long)full_imm); + return dd->scratch_buff; +} + +static void dump_xlated_plain(struct dump_data *dd, void *buf, + unsigned int len, bool opcodes) +{ + const struct bpf_insn_cbs cbs = { + .cb_print = print_insn, + .cb_call = print_call, + .cb_imm = print_imm, + .private_data = dd, + }; struct bpf_insn *insn = buf; bool double_insn = false; unsigned int i; @@ -425,7 +575,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); printf("% 4d: ", i); - print_bpf_insn(print_insn, NULL, insn + i, true); + print_bpf_insn(&cbs, NULL, insn + i, true); if (opcodes) { printf(" "); @@ -454,8 +604,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...) va_end(args); } -static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) +static void dump_xlated_json(struct dump_data *dd, void *buf, + unsigned int len, bool opcodes) { + const struct bpf_insn_cbs cbs = { + .cb_print = print_insn_json, + .cb_call = print_call, + .cb_imm = print_imm, + .private_data = dd, + }; struct bpf_insn *insn = buf; bool double_insn = false; unsigned int i; @@ -470,7 +627,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) jsonw_start_object(json_wtr); jsonw_name(json_wtr, "disasm"); - print_bpf_insn(print_insn_json, NULL, insn + i, true); + print_bpf_insn(&cbs, NULL, insn + i, true); if (opcodes) { jsonw_name(json_wtr, "opcodes"); @@ -505,6 +662,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) static int do_dump(int argc, char **argv) { struct bpf_prog_info info = {}; + struct dump_data dd = {}; __u32 len = sizeof(info); unsigned int buf_size; char *filepath = NULL; @@ -592,6 +750,14 @@ static int do_dump(int argc, char **argv) goto err_free; } + if ((member_len == &info.jited_prog_len && + info.jited_prog_insns == 0) || + (member_len == &info.xlated_prog_len && + info.xlated_prog_insns == 0)) { + p_err("error retrieving insn dump: kernel.kptr_restrict set?"); + goto err_free; + } + if (filepath) { fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600); if (fd < 0) { @@ -608,17 +774,19 @@ static int do_dump(int argc, char **argv) goto err_free; } } else { - if (member_len == &info.jited_prog_len) + if (member_len == &info.jited_prog_len) { disasm_print_insn(buf, *member_len, opcodes); - else + } else { + kernel_syms_load(&dd); if (json_output) - dump_xlated_json(buf, *member_len, opcodes); + dump_xlated_json(&dd, buf, *member_len, opcodes); else - dump_xlated_plain(buf, *member_len, opcodes); + dump_xlated_plain(&dd, buf, *member_len, opcodes); + kernel_syms_destroy(&dd); + } } free(buf); - return 0; err_free: @@ -645,12 +813,12 @@ static int do_load(int argc, char **argv) usage(); if (bpf_prog_load(argv[0], BPF_PROG_TYPE_UNSPEC, &obj, &prog_fd)) { - p_err("failed to load program\n"); + p_err("failed to load program"); return -1; } if (do_pin_fd(prog_fd, argv[1])) { - p_err("failed to pin program\n"); + p_err("failed to pin program"); return -1; } @@ -668,7 +836,7 @@ static int do_help(int argc, char **argv) } fprintf(stderr, - "Usage: %s %s show [PROG]\n" + "Usage: %s %s { show | list } [PROG]\n" " %s %s dump xlated PROG [{ file FILE | opcodes }]\n" " %s %s dump jited PROG [{ file FILE | opcodes }]\n" " %s %s pin PROG FILE\n" @@ -686,6 +854,7 @@ static int do_help(int argc, char **argv) static const struct cmd cmds[] = { { "show", do_show }, + { "list", do_show }, { "help", do_help }, { "dump", do_dump }, { "pin", do_pin }, diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 96982640fbf8..17f2c73fff8b 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -13,6 +13,7 @@ FILES= \ test-hello.bin \ test-libaudit.bin \ test-libbfd.bin \ + test-disassembler-four-args.bin \ test-liberty.bin \ test-liberty-z.bin \ test-cplus-demangle.bin \ @@ -188,6 +189,9 @@ $(OUTPUT)test-libpython-version.bin: $(OUTPUT)test-libbfd.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl +$(OUTPUT)test-disassembler-four-args.bin: + $(BUILD) -lbfd -lopcodes + $(OUTPUT)test-liberty.bin: $(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty diff --git a/tools/build/feature/test-disassembler-four-args.c b/tools/build/feature/test-disassembler-four-args.c new file mode 100644 index 000000000000..45ce65cfddf0 --- /dev/null +++ b/tools/build/feature/test-disassembler-four-args.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <bfd.h> +#include <dis-asm.h> + +int main(void) +{ + bfd *abfd = bfd_openr(NULL, NULL); + + disassembler(bfd_get_arch(abfd), + bfd_big_endian(abfd), + bfd_get_mach(abfd), + abfd); + + return 0; +} diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index db1b0923a308..4e8c60acfa32 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -921,6 +921,9 @@ struct bpf_prog_info { __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u64 netns_dev; + __u64 netns_ino; } __attribute__((aligned(8))); struct bpf_map_info { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5b83875b3594..e9c4b7cabcf2 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -910,8 +910,9 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, GELF_R_SYM(rel.r_info)); return -LIBBPF_ERRNO__FORMAT; } - pr_debug("relo for %ld value %ld name %d\n", - rel.r_info >> 32, sym.st_value, sym.st_name); + pr_debug("relo for %lld value %lld name %d\n", + (long long) (rel.r_info >> 32), + (long long) sym.st_value, sym.st_name); if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 8acfc47af70e..540a209b78ab 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -138,7 +138,7 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_STACK; op->src.type = OP_SRC_ADD; op->src.reg = op_to_cfi_reg[modrm_reg][rex_r]; - op->dest.type = OP_SRC_REG; + op->dest.type = OP_DEST_REG; op->dest.reg = CFI_SP; } break; diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c index 4c6b5c9ef073..91e8e19ff5e0 100644 --- a/tools/objtool/builtin-orc.c +++ b/tools/objtool/builtin-orc.c @@ -44,6 +44,9 @@ int cmd_orc(int argc, const char **argv) const char *objname; argc--; argv++; + if (argc <= 0) + usage_with_options(orc_usage, check_options); + if (!strncmp(argv[0], "gen", 3)) { argc = parse_options(argc, argv, check_options, orc_usage, 0); if (argc != 1) @@ -52,7 +55,6 @@ int cmd_orc(int argc, const char **argv) objname = argv[0]; return check(objname, no_fp, no_unreachable, true); - } if (!strcmp(argv[0], "dump")) { diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c index e5ca31429c9b..e61fe703197b 100644 --- a/tools/objtool/orc_gen.c +++ b/tools/objtool/orc_gen.c @@ -165,6 +165,8 @@ int create_orc_sections(struct objtool_file *file) /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */ sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx); + if (!sec) + return -1; ip_relasec = elf_create_rela_section(file->elf, sec); if (!ip_relasec) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index ed65e82f034e..0294bfb6c5f8 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -188,9 +188,7 @@ ifdef PYTHON_CONFIG PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS)) PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null) - ifeq ($(CC_NO_CLANG), 1) - PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) - endif + PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS)) FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS) endif @@ -576,14 +574,15 @@ ifndef NO_GTK2 endif endif - ifdef NO_LIBPERL CFLAGS += -DNO_LIBPERL else PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null) PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) - PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null` + PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null) + PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS)) + PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS)) FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) ifneq ($(feature-libperl), 1) diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h index d2df54a6bc5a..bcfbaed78cc2 100644 --- a/tools/perf/arch/s390/include/perf_regs.h +++ b/tools/perf/arch/s390/include/perf_regs.h @@ -3,7 +3,7 @@ #include <stdlib.h> #include <linux/types.h> -#include <../../../../arch/s390/include/uapi/asm/perf_regs.h> +#include <asm/perf_regs.h> void perf_regs_load(u64 *regs); diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index 6db9d809fe97..3e64f10b6d66 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -21,6 +21,7 @@ arch/x86/include/asm/cpufeatures.h arch/arm/include/uapi/asm/perf_regs.h arch/arm64/include/uapi/asm/perf_regs.h arch/powerpc/include/uapi/asm/perf_regs.h +arch/s390/include/uapi/asm/perf_regs.h arch/x86/include/uapi/asm/perf_regs.h arch/x86/include/uapi/asm/kvm.h arch/x86/include/uapi/asm/kvm_perf.h diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c index cf36de7ea255..0c6d1002b524 100644 --- a/tools/perf/jvmti/jvmti_agent.c +++ b/tools/perf/jvmti/jvmti_agent.c @@ -384,13 +384,13 @@ jvmti_write_code(void *agent, char const *sym, } int -jvmti_write_debug_info(void *agent, uint64_t code, const char *file, - jvmti_line_info_t *li, int nr_lines) +jvmti_write_debug_info(void *agent, uint64_t code, + int nr_lines, jvmti_line_info_t *li, + const char * const * file_names) { struct jr_code_debug_info rec; - size_t sret, len, size, flen; + size_t sret, len, size, flen = 0; uint64_t addr; - const char *fn = file; FILE *fp = agent; int i; @@ -405,7 +405,9 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file, return -1; } - flen = strlen(file) + 1; + for (i = 0; i < nr_lines; ++i) { + flen += strlen(file_names[i]) + 1; + } rec.p.id = JIT_CODE_DEBUG_INFO; size = sizeof(rec); @@ -421,7 +423,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file, * file[] : source file name */ size += nr_lines * sizeof(struct debug_entry); - size += flen * nr_lines; + size += flen; rec.p.total_size = size; /* @@ -452,7 +454,7 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file, if (sret != 1) goto error; - sret = fwrite_unlocked(fn, flen, 1, fp); + sret = fwrite_unlocked(file_names[i], strlen(file_names[i]) + 1, 1, fp); if (sret != 1) goto error; } diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h index fe32d8344a82..6ed82f6c06dd 100644 --- a/tools/perf/jvmti/jvmti_agent.h +++ b/tools/perf/jvmti/jvmti_agent.h @@ -14,6 +14,7 @@ typedef struct { unsigned long pc; int line_number; int discrim; /* discriminator -- 0 for now */ + jmethodID methodID; } jvmti_line_info_t; void *jvmti_open(void); @@ -22,11 +23,9 @@ int jvmti_write_code(void *agent, char const *symbol_name, uint64_t vma, void const *code, const unsigned int code_size); -int jvmti_write_debug_info(void *agent, - uint64_t code, - const char *file, +int jvmti_write_debug_info(void *agent, uint64_t code, int nr_lines, jvmti_line_info_t *li, - int nr_lines); + const char * const * file_names); #if defined(__cplusplus) } diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c index c62c9fc9a525..6add3e982614 100644 --- a/tools/perf/jvmti/libjvmti.c +++ b/tools/perf/jvmti/libjvmti.c @@ -47,6 +47,7 @@ do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci, tab[lines].pc = (unsigned long)pc; tab[lines].line_number = loc_tab[i].line_number; tab[lines].discrim = 0; /* not yet used */ + tab[lines].methodID = m; lines++; } else { break; @@ -125,6 +126,99 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t ** return JVMTI_ERROR_NONE; } +static void +copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length) +{ + /* + * Assume path name is class hierarchy, this is a common practice with Java programs + */ + if (*class_sign == 'L') { + int j, i = 0; + char *p = strrchr(class_sign, '/'); + if (p) { + /* drop the 'L' prefix and copy up to the final '/' */ + for (i = 0; i < (p - class_sign); i++) + result[i] = class_sign[i+1]; + } + /* + * append file name, we use loops and not string ops to avoid modifying + * class_sign which is used later for the symbol name + */ + for (j = 0; i < (max_length - 1) && file_name && j < strlen(file_name); j++, i++) + result[i] = file_name[j]; + + result[i] = '\0'; + } else { + /* fallback case */ + size_t file_name_len = strlen(file_name); + strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length); + } +} + +static jvmtiError +get_source_filename(jvmtiEnv *jvmti, jmethodID methodID, char ** buffer) +{ + jvmtiError ret; + jclass decl_class; + char *file_name = NULL; + char *class_sign = NULL; + char fn[PATH_MAX]; + size_t len; + + ret = (*jvmti)->GetMethodDeclaringClass(jvmti, methodID, &decl_class); + if (ret != JVMTI_ERROR_NONE) { + print_error(jvmti, "GetMethodDeclaringClass", ret); + return ret; + } + + ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name); + if (ret != JVMTI_ERROR_NONE) { + print_error(jvmti, "GetSourceFileName", ret); + return ret; + } + + ret = (*jvmti)->GetClassSignature(jvmti, decl_class, &class_sign, NULL); + if (ret != JVMTI_ERROR_NONE) { + print_error(jvmti, "GetClassSignature", ret); + goto free_file_name_error; + } + + copy_class_filename(class_sign, file_name, fn, PATH_MAX); + len = strlen(fn); + *buffer = malloc((len + 1) * sizeof(char)); + if (!*buffer) { + print_error(jvmti, "GetClassSignature", ret); + ret = JVMTI_ERROR_OUT_OF_MEMORY; + goto free_class_sign_error; + } + strcpy(*buffer, fn); + ret = JVMTI_ERROR_NONE; + +free_class_sign_error: + (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign); +free_file_name_error: + (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name); + + return ret; +} + +static jvmtiError +fill_source_filenames(jvmtiEnv *jvmti, int nr_lines, + const jvmti_line_info_t * line_tab, + char ** file_names) +{ + int index; + jvmtiError ret; + + for (index = 0; index < nr_lines; ++index) { + ret = get_source_filename(jvmti, line_tab[index].methodID, &(file_names[index])); + if (ret != JVMTI_ERROR_NONE) + return ret; + } + + return JVMTI_ERROR_NONE; +} + static void JNICALL compiled_method_load_cb(jvmtiEnv *jvmti, jmethodID method, @@ -135,16 +229,18 @@ compiled_method_load_cb(jvmtiEnv *jvmti, const void *compile_info) { jvmti_line_info_t *line_tab = NULL; + char ** line_file_names = NULL; jclass decl_class; char *class_sign = NULL; char *func_name = NULL; char *func_sign = NULL; - char *file_name= NULL; + char *file_name = NULL; char fn[PATH_MAX]; uint64_t addr = (uint64_t)(uintptr_t)code_addr; jvmtiError ret; int nr_lines = 0; /* in line_tab[] */ size_t len; + int output_debug_info = 0; ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method, &decl_class); @@ -158,6 +254,19 @@ compiled_method_load_cb(jvmtiEnv *jvmti, if (ret != JVMTI_ERROR_NONE) { warnx("jvmti: cannot get line table for method"); nr_lines = 0; + } else if (nr_lines > 0) { + line_file_names = malloc(sizeof(char*) * nr_lines); + if (!line_file_names) { + warnx("jvmti: cannot allocate space for line table method names"); + } else { + memset(line_file_names, 0, sizeof(char*) * nr_lines); + ret = fill_source_filenames(jvmti, nr_lines, line_tab, line_file_names); + if (ret != JVMTI_ERROR_NONE) { + warnx("jvmti: fill_source_filenames failed"); + } else { + output_debug_info = 1; + } + } } } @@ -181,33 +290,14 @@ compiled_method_load_cb(jvmtiEnv *jvmti, goto error; } - /* - * Assume path name is class hierarchy, this is a common practice with Java programs - */ - if (*class_sign == 'L') { - int j, i = 0; - char *p = strrchr(class_sign, '/'); - if (p) { - /* drop the 'L' prefix and copy up to the final '/' */ - for (i = 0; i < (p - class_sign); i++) - fn[i] = class_sign[i+1]; - } - /* - * append file name, we use loops and not string ops to avoid modifying - * class_sign which is used later for the symbol name - */ - for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++) - fn[i] = file_name[j]; - fn[i] = '\0'; - } else { - /* fallback case */ - strcpy(fn, file_name); - } + copy_class_filename(class_sign, file_name, fn, PATH_MAX); + /* * write source line info record if we have it */ - if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines)) - warnx("jvmti: write_debug_info() failed"); + if (output_debug_info) + if (jvmti_write_debug_info(jvmti_agent, addr, nr_lines, line_tab, (const char * const *) line_file_names)) + warnx("jvmti: write_debug_info() failed"); len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2; { @@ -223,6 +313,13 @@ error: (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign); (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name); free(line_tab); + while (line_file_names && (nr_lines > 0)) { + if (line_file_names[nr_lines - 1]) { + free(line_file_names[nr_lines - 1]); + } + nr_lines -= 1; + } + free(line_file_names); } static void JNICALL diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index f1fdb36269f2..a8aa7e251c8e 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -19,7 +19,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \ - test_l4lb_noinline.o test_xdp_noinline.o + test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \ test_offload.py @@ -41,7 +41,7 @@ $(BPFOBJ): force CLANG ?= clang LLC ?= llc -PROBE := $(shell llc -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) +PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1) # Let newer LLVM versions transparently probe the kernel for availability # of full BPF instruction set. diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 9d4897317c77..983dd25d49f4 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -4,3 +4,4 @@ CONFIG_NET_CLS_BPF=m CONFIG_BPF_EVENTS=y CONFIG_TEST_BPF=m CONFIG_CGROUP_BPF=y +CONFIG_NETDEVSIM=m diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index 02c85d6c89b0..c1535b34f14f 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c @@ -10,6 +10,8 @@ #include <string.h> #include <errno.h> #include <assert.h> +#include <sys/time.h> +#include <sys/resource.h> #include <linux/bpf.h> #include <bpf/bpf.h> @@ -23,15 +25,19 @@ int main(int argc, char **argv) { + struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; struct bpf_object *obj; int error = EXIT_FAILURE; int prog_fd, cgroup_fd; __u32 prog_cnt; + if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) + perror("Unable to lift memlock rlimit"); + if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, &obj, &prog_fd)) { printf("Failed to load DEV_CGROUP program\n"); - goto err; + goto out; } if (setup_cgroup_environment()) { @@ -89,5 +95,6 @@ int main(int argc, char **argv) err: cleanup_cgroup_environment(); +out: return error; } diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index c940505c2978..e3c750f17cb8 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -18,6 +18,8 @@ import argparse import json import os import pprint +import random +import string import subprocess import time @@ -27,6 +29,7 @@ bpf_test_dir = os.path.dirname(os.path.realpath(__file__)) pp = pprint.PrettyPrinter() devs = [] # devices we created for clean up files = [] # files to be removed +netns = [] # net namespaces to be removed def log_get_sec(level=0): return "*" * (log_level + level) @@ -128,22 +131,25 @@ def rm(f): if f in files: files.remove(f) -def tool(name, args, flags, JSON=True, fail=True): +def tool(name, args, flags, JSON=True, ns="", fail=True): params = "" if JSON: params += "%s " % (flags["json"]) - ret, out = cmd(name + " " + params + args, fail=fail) + if ns != "": + ns = "ip netns exec %s " % (ns) + + ret, out = cmd(ns + name + " " + params + args, fail=fail) if JSON and len(out.strip()) != 0: return ret, json.loads(out) else: return ret, out -def bpftool(args, JSON=True, fail=True): - return tool("bpftool", args, {"json":"-p"}, JSON=JSON, fail=fail) +def bpftool(args, JSON=True, ns="", fail=True): + return tool("bpftool", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail) -def bpftool_prog_list(expected=None): - _, progs = bpftool("prog show", JSON=True, fail=True) +def bpftool_prog_list(expected=None, ns=""): + _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True) if expected is not None: if len(progs) != expected: fail(True, "%d BPF programs loaded, expected %d" % @@ -158,13 +164,13 @@ def bpftool_prog_list_wait(expected=0, n_retry=20): time.sleep(0.05) raise Exception("Time out waiting for program counts to stabilize want %d, have %d" % (expected, nprogs)) -def ip(args, force=False, JSON=True, fail=True): +def ip(args, force=False, JSON=True, ns="", fail=True): if force: args = "-force " + args - return tool("ip", args, {"json":"-j"}, JSON=JSON, fail=fail) + return tool("ip", args, {"json":"-j"}, JSON=JSON, ns=ns, fail=fail) -def tc(args, JSON=True, fail=True): - return tool("tc", args, {"json":"-p"}, JSON=JSON, fail=fail) +def tc(args, JSON=True, ns="", fail=True): + return tool("tc", args, {"json":"-p"}, JSON=JSON, ns=ns, fail=fail) def ethtool(dev, opt, args, fail=True): return cmd("ethtool %s %s %s" % (opt, dev["ifname"], args), fail=fail) @@ -178,6 +184,15 @@ def bpf_pinned(name): def bpf_bytecode(bytecode): return "bytecode \"%s\"" % (bytecode) +def mknetns(n_retry=10): + for i in range(n_retry): + name = ''.join([random.choice(string.ascii_letters) for i in range(8)]) + ret, _ = ip("netns add %s" % (name), fail=False) + if ret == 0: + netns.append(name) + return name + return None + class DebugfsDir: """ Class for accessing DebugFS directories as a dictionary. @@ -237,6 +252,8 @@ class NetdevSim: self.dev = self._netdevsim_create() devs.append(self) + self.ns = "" + self.dfs_dir = '/sys/kernel/debug/netdevsim/%s' % (self.dev['ifname']) self.dfs_refresh() @@ -257,7 +274,7 @@ class NetdevSim: def remove(self): devs.remove(self) - ip("link del dev %s" % (self.dev["ifname"])) + ip("link del dev %s" % (self.dev["ifname"]), ns=self.ns) def dfs_refresh(self): self.dfs = DebugfsDir(self.dfs_dir) @@ -285,6 +302,11 @@ class NetdevSim: time.sleep(0.05) raise Exception("Time out waiting for program counts to stabilize want %d/%d, have %d bound, %d loaded" % (bound, total, nbound, nprogs)) + def set_ns(self, ns): + name = "1" if ns == "" else ns + ip("link set dev %s netns %s" % (self.dev["ifname"], name), ns=self.ns) + self.ns = ns + def set_mtu(self, mtu, fail=True): return ip("link set dev %s mtu %d" % (self.dev["ifname"], mtu), fail=fail) @@ -372,6 +394,8 @@ def clean_up(): dev.remove() for f in files: cmd("rm -f %s" % (f)) + for ns in netns: + cmd("ip netns delete %s" % (ns)) def pin_prog(file_name, idx=0): progs = bpftool_prog_list(expected=(idx + 1)) @@ -381,6 +405,35 @@ def pin_prog(file_name, idx=0): return file_name, bpf_pinned(file_name) +def check_dev_info(other_ns, ns, pin_file=None, removed=False): + if removed: + bpftool_prog_list(expected=0) + ret, err = bpftool("prog show pin %s" % (pin_file), fail=False) + fail(ret == 0, "Showing prog with removed device did not fail") + fail(err["error"].find("No such device") == -1, + "Showing prog with removed device expected ENODEV, error is %s" % + (err["error"])) + return + progs = bpftool_prog_list(expected=int(not removed), ns=ns) + prog = progs[0] + + fail("dev" not in prog.keys(), "Device parameters not reported") + dev = prog["dev"] + fail("ifindex" not in dev.keys(), "Device parameters not reported") + fail("ns_dev" not in dev.keys(), "Device parameters not reported") + fail("ns_inode" not in dev.keys(), "Device parameters not reported") + + if not removed and not other_ns: + fail("ifname" not in dev.keys(), "Ifname not reported") + fail(dev["ifname"] != sim["ifname"], + "Ifname incorrect %s vs %s" % (dev["ifname"], sim["ifname"])) + else: + fail("ifname" in dev.keys(), "Ifname is reported for other ns") + if removed: + fail(dev["ifindex"] != 0, "Device perameters not zero on removed") + fail(dev["ns_dev"] != 0, "Device perameters not zero on removed") + fail(dev["ns_inode"] != 0, "Device perameters not zero on removed") + # Parse command line parser = argparse.ArgumentParser() parser.add_argument("--log", help="output verbose log to given file") @@ -417,6 +470,12 @@ for s in samples: skip(ret != 0, "sample %s/%s not found, please compile it" % (bpf_test_dir, s)) +# Check if net namespaces seem to work +ns = mknetns() +skip(ns is None, "Could not create a net namespace") +cmd("ip netns delete %s" % (ns)) +netns = [] + try: obj = bpf_obj("sample_ret0.o") bytecode = bpf_bytecode("1,6 0 0 4294967295,") @@ -549,6 +608,8 @@ try: progs = bpftool_prog_list(expected=1) fail(ipl["xdp"]["prog"]["id"] != progs[0]["id"], "Loaded program has wrong ID") + fail("dev" in progs[0].keys(), + "Device parameters reported for non-offloaded program") start_test("Test XDP prog replace with bad flags...") ret, _ = sim.set_xdp(obj, "offload", force=True, fail=False) @@ -673,6 +734,35 @@ try: fail(time_diff < delay_sec, "Removal process took %s, expected %s" % (time_diff, delay_sec)) + # Remove all pinned files and reinstantiate the netdev + clean_up() + bpftool_prog_list_wait(expected=0) + + sim = NetdevSim() + sim.set_ethtool_tc_offloads(True) + sim.set_xdp(obj, "offload") + + start_test("Test bpftool bound info reporting (own ns)...") + check_dev_info(False, "") + + start_test("Test bpftool bound info reporting (other ns)...") + ns = mknetns() + sim.set_ns(ns) + check_dev_info(True, "") + + start_test("Test bpftool bound info reporting (remote ns)...") + check_dev_info(False, ns) + + start_test("Test bpftool bound info reporting (back to own ns)...") + sim.set_ns("") + check_dev_info(False, "") + + pin_file, _ = pin_prog("/sys/fs/bpf/tmp") + sim.remove() + + start_test("Test bpftool bound info reporting (removed dev)...") + check_dev_info(True, "", pin_file=pin_file, removed=True) + print("%s: OK" % (os.path.basename(__file__))) finally: diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 09087ab12293..b549308abd19 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -837,6 +837,132 @@ static void test_tp_attach_query(void) free(query); } +static int compare_map_keys(int map1_fd, int map2_fd) +{ + __u32 key, next_key; + char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)]; + int err; + + err = bpf_map_get_next_key(map1_fd, NULL, &key); + if (err) + return err; + err = bpf_map_lookup_elem(map2_fd, &key, val_buf); + if (err) + return err; + + while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { + err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); + if (err) + return err; + + key = next_key; + } + if (errno != ENOENT) + return -1; + + return 0; +} + +static void test_stacktrace_map() +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd; + const char *file = "./test_stacktrace_map.o"; + int bytes, efd, err, pmu_fd, prog_fd; + struct perf_event_attr attr = {}; + __u32 key, val, duration = 0; + struct bpf_object *obj; + char buf[256]; + + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + goto out; + + /* Get the ID for the sched/sched_switch tracepoint */ + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); + if (CHECK(bytes <= 0 || bytes >= sizeof(buf), + "read", "bytes %d errno %d\n", bytes, errno)) + goto close_prog; + + /* Open the perf event and attach bpf progrram */ + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; + attr.sample_period = 1; + attr.wakeup_events = 1; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto close_prog; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", + err, errno)) + goto close_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", + err, errno)) + goto disable_pmu; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (CHECK(control_map_fd < 0, "bpf_find_map control_map", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", + err, errno)) + goto disable_pmu; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + ; /* fall through */ + +disable_pmu: + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + +close_pmu: + close(pmu_fd); + +close_prog: + bpf_object__close(obj); + +out: + return; +} + int main(void) { struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; @@ -852,6 +978,7 @@ int main(void) test_pkt_md_access(); test_obj_name(); test_tp_attach_query(); + test_stacktrace_map(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_stacktrace_map.c b/tools/testing/selftests/bpf/test_stacktrace_map.c new file mode 100644 index 000000000000..76d85c5d08bd --- /dev/null +++ b/tools/testing/selftests/bpf/test_stacktrace_map.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <linux/bpf.h> +#include "bpf_helpers.h" + +#ifndef PERF_MAX_STACK_DEPTH +#define PERF_MAX_STACK_DEPTH 127 +#endif + +struct bpf_map_def SEC("maps") control_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") stackid_hmap = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 10000, +}; + +struct bpf_map_def SEC("maps") stackmap = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(__u32), + .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH, + .max_entries = 10000, +}; + +/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */ +struct sched_switch_args { + unsigned long long pad; + char prev_comm[16]; + int prev_pid; + int prev_prio; + long long prev_state; + char next_comm[16]; + int next_pid; + int next_prio; +}; + +SEC("tracepoint/sched/sched_switch") +int oncpu(struct sched_switch_args *ctx) +{ + __u32 key = 0, val = 0, *value_p; + + value_p = bpf_map_lookup_elem(&control_map, &key); + if (value_p && *value_p) + return 0; /* skip if non-zero *value_p */ + + /* The size of stackmap and stackid_hmap should be the same */ + key = bpf_get_stackid(ctx, &stackmap, 0); + if ((int)key >= 0) + bpf_map_update_elem(&stackid_hmap, &key, &val, 0); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index d38334abb990..543847957fdd 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -9273,6 +9273,196 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { + "calls: stack overflow using two frames (pre-call access)", + .insns = { + /* prog 1 */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), + BPF_EXIT_INSN(), + + /* prog 2 */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "combined stack size", + .result = REJECT, + }, + { + "calls: stack overflow using two frames (post-call access)", + .insns = { + /* prog 1 */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_EXIT_INSN(), + + /* prog 2 */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "combined stack size", + .result = REJECT, + }, + { + "calls: stack depth check using three frames. test1", + .insns = { + /* main */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), + BPF_EXIT_INSN(), + /* B */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + /* stack_main=32, stack_A=256, stack_B=64 + * and max(main+A, main+A+B) < 512 + */ + .result = ACCEPT, + }, + { + "calls: stack depth check using three frames. test2", + .insns = { + /* main */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), + BPF_EXIT_INSN(), + /* B */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + /* stack_main=32, stack_A=64, stack_B=256 + * and max(main+A, main+A+B) < 512 + */ + .result = ACCEPT, + }, + { + "calls: stack depth check using three frames. test3", + .insns = { + /* main */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */ + BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1), + BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1), + BPF_EXIT_INSN(), + BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -3), + /* B */ + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */ + BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + /* stack_main=64, stack_A=224, stack_B=256 + * and max(main+A, main+A+B) > 512 + */ + .errstr = "combined stack", + .result = REJECT, + }, + { + "calls: stack depth check using three frames. test4", + /* void main(void) { + * func1(0); + * func1(1); + * func2(1); + * } + * void func1(int alloc_or_recurse) { + * if (alloc_or_recurse) { + * frame_pointer[-300] = 1; + * } else { + * func2(alloc_or_recurse); + * } + * } + * void func2(int alloc_or_recurse) { + * if (alloc_or_recurse) { + * frame_pointer[-300] = 1; + * } + * } + */ + .insns = { + /* main */ + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */ + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */ + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + /* A */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2), + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_EXIT_INSN(), + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ + BPF_EXIT_INSN(), + /* B */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), + BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = REJECT, + .errstr = "combined stack", + }, + { + "calls: stack depth check using three frames. test5", + .insns = { + /* main */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */ + BPF_EXIT_INSN(), + /* A */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */ + BPF_EXIT_INSN(), + /* B */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */ + BPF_EXIT_INSN(), + /* C */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */ + BPF_EXIT_INSN(), + /* D */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */ + BPF_EXIT_INSN(), + /* E */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */ + BPF_EXIT_INSN(), + /* F */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */ + BPF_EXIT_INSN(), + /* G */ + BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */ + BPF_EXIT_INSN(), + /* H */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_XDP, + .errstr = "call stack", + .result = REJECT, + }, + { "calls: spill into caller stack frame", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), @@ -10258,6 +10448,57 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_XDP, }, + { + "search pruning: all branches should be verified (nop operation)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_A(1), + BPF_MOV64_IMM(BPF_REG_4, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2), + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "R6 invalid mem access 'inv'", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "search pruning: all branches should be verified (invalid stack access)", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16), + BPF_JMP_A(1), + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24), + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), + BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .errstr = "invalid read from stack off -16+0 size 8", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, }; static int probe_filter_length(const struct bpf_insn *fp) diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 500c74db746c..d7c30d366935 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -5,6 +5,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g CFLAGS += -I../../../../usr/include/ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh +TEST_PROGS += fib_tests.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh new file mode 100755 index 000000000000..a9154eefb2e2 --- /dev/null +++ b/tools/testing/selftests/net/fib_tests.sh @@ -0,0 +1,429 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test is for checking IPv4 and IPv6 FIB behavior in response to +# different events. + +ret=0 + +check_err() +{ + if [ $ret -eq 0 ]; then + ret=$1 + fi +} + +check_fail() +{ + if [ $1 -eq 0 ]; then + ret=1 + fi +} + +netns_create() +{ + local testns=$1 + + ip netns add $testns + ip netns exec $testns ip link set dev lo up +} + +fib_unreg_unicast_test() +{ + ret=0 + + netns_create "testns" + + ip netns exec testns ip link add dummy0 type dummy + ip netns exec testns ip link set dev dummy0 up + + ip netns exec testns ip address add 198.51.100.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0 + + ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null + check_err $? + + ip netns exec testns ip link del dev dummy0 + check_err $? + + ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null + check_fail $? + + ip netns del testns + + if [ $ret -ne 0 ]; then + echo "FAIL: unicast route test" + return 1 + fi + echo "PASS: unicast route test" +} + +fib_unreg_multipath_test() +{ + ret=0 + + netns_create "testns" + + ip netns exec testns ip link add dummy0 type dummy + ip netns exec testns ip link set dev dummy0 up + + ip netns exec testns ip link add dummy1 type dummy + ip netns exec testns ip link set dev dummy1 up + + ip netns exec testns ip address add 198.51.100.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0 + + ip netns exec testns ip address add 192.0.2.1/24 dev dummy1 + ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1 + + ip netns exec testns ip route add 203.0.113.0/24 \ + nexthop via 198.51.100.2 dev dummy0 \ + nexthop via 192.0.2.2 dev dummy1 + ip netns exec testns ip -6 route add 2001:db8:3::/64 \ + nexthop via 2001:db8:1::2 dev dummy0 \ + nexthop via 2001:db8:2::2 dev dummy1 + + ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null + check_err $? + + ip netns exec testns ip link del dev dummy0 + check_err $? + + ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null + # In IPv6 we do not flush the entire multipath route. + check_err $? + + ip netns exec testns ip link del dev dummy1 + + ip netns del testns + + if [ $ret -ne 0 ]; then + echo "FAIL: multipath route test" + return 1 + fi + echo "PASS: multipath route test" +} + +fib_unreg_test() +{ + echo "Running netdev unregister tests" + + fib_unreg_unicast_test + fib_unreg_multipath_test +} + +fib_down_unicast_test() +{ + ret=0 + + netns_create "testns" + + ip netns exec testns ip link add dummy0 type dummy + ip netns exec testns ip link set dev dummy0 up + + ip netns exec testns ip address add 198.51.100.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0 + + ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null + check_err $? + + ip netns exec testns ip link set dev dummy0 down + check_err $? + + ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null + check_fail $? + + ip netns exec testns ip link del dev dummy0 + + ip netns del testns + + if [ $ret -ne 0 ]; then + echo "FAIL: unicast route test" + return 1 + fi + echo "PASS: unicast route test" +} + +fib_down_multipath_test_do() +{ + local down_dev=$1 + local up_dev=$2 + + ip netns exec testns ip route get fibmatch 203.0.113.1 \ + oif $down_dev &> /dev/null + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \ + oif $down_dev &> /dev/null + check_fail $? + + ip netns exec testns ip route get fibmatch 203.0.113.1 \ + oif $up_dev &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 \ + oif $up_dev &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 203.0.113.1 | \ + grep $down_dev | grep -q "dead linkdown" + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \ + grep $down_dev | grep -q "dead linkdown" + check_err $? + + ip netns exec testns ip route get fibmatch 203.0.113.1 | \ + grep $up_dev | grep -q "dead linkdown" + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 | \ + grep $up_dev | grep -q "dead linkdown" + check_fail $? +} + +fib_down_multipath_test() +{ + ret=0 + + netns_create "testns" + + ip netns exec testns ip link add dummy0 type dummy + ip netns exec testns ip link set dev dummy0 up + + ip netns exec testns ip link add dummy1 type dummy + ip netns exec testns ip link set dev dummy1 up + + ip netns exec testns ip address add 198.51.100.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0 + + ip netns exec testns ip address add 192.0.2.1/24 dev dummy1 + ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy1 + + ip netns exec testns ip route add 203.0.113.0/24 \ + nexthop via 198.51.100.2 dev dummy0 \ + nexthop via 192.0.2.2 dev dummy1 + ip netns exec testns ip -6 route add 2001:db8:3::/64 \ + nexthop via 2001:db8:1::2 dev dummy0 \ + nexthop via 2001:db8:2::2 dev dummy1 + + ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null + check_err $? + + ip netns exec testns ip link set dev dummy0 down + check_err $? + + fib_down_multipath_test_do "dummy0" "dummy1" + + ip netns exec testns ip link set dev dummy0 up + check_err $? + ip netns exec testns ip link set dev dummy1 down + check_err $? + + fib_down_multipath_test_do "dummy1" "dummy0" + + ip netns exec testns ip link set dev dummy0 down + check_err $? + + ip netns exec testns ip route get fibmatch 203.0.113.1 &> /dev/null + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:3::1 &> /dev/null + check_fail $? + + ip netns exec testns ip link del dev dummy1 + ip netns exec testns ip link del dev dummy0 + + ip netns del testns + + if [ $ret -ne 0 ]; then + echo "FAIL: multipath route test" + return 1 + fi + echo "PASS: multipath route test" +} + +fib_down_test() +{ + echo "Running netdev down tests" + + fib_down_unicast_test + fib_down_multipath_test +} + +fib_carrier_local_test() +{ + ret=0 + + # Local routes should not be affected when carrier changes. + netns_create "testns" + + ip netns exec testns ip link add dummy0 type dummy + ip netns exec testns ip link set dev dummy0 up + + ip netns exec testns ip link set dev dummy0 carrier on + + ip netns exec testns ip address add 198.51.100.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0 + + ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 198.51.100.1 | \ + grep -q "linkdown" + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \ + grep -q "linkdown" + check_fail $? + + ip netns exec testns ip link set dev dummy0 carrier off + + ip netns exec testns ip route get fibmatch 198.51.100.1 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 198.51.100.1 | \ + grep -q "linkdown" + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::1 | \ + grep -q "linkdown" + check_fail $? + + ip netns exec testns ip address add 192.0.2.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0 + + ip netns exec testns ip route get fibmatch 192.0.2.1 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 192.0.2.1 | \ + grep -q "linkdown" + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:2::1 | \ + grep -q "linkdown" + check_fail $? + + ip netns exec testns ip link del dev dummy0 + + ip netns del testns + + if [ $ret -ne 0 ]; then + echo "FAIL: local route carrier test" + return 1 + fi + echo "PASS: local route carrier test" +} + +fib_carrier_unicast_test() +{ + ret=0 + + netns_create "testns" + + ip netns exec testns ip link add dummy0 type dummy + ip netns exec testns ip link set dev dummy0 up + + ip netns exec testns ip link set dev dummy0 carrier on + + ip netns exec testns ip address add 198.51.100.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:1::1/64 dev dummy0 + + ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 198.51.100.2 | \ + grep -q "linkdown" + check_fail $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \ + grep -q "linkdown" + check_fail $? + + ip netns exec testns ip link set dev dummy0 carrier off + + ip netns exec testns ip route get fibmatch 198.51.100.2 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 198.51.100.2 | \ + grep -q "linkdown" + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:1::2 | \ + grep -q "linkdown" + check_err $? + + ip netns exec testns ip address add 192.0.2.1/24 dev dummy0 + ip netns exec testns ip -6 address add 2001:db8:2::1/64 dev dummy0 + + ip netns exec testns ip route get fibmatch 192.0.2.2 &> /dev/null + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 &> /dev/null + check_err $? + + ip netns exec testns ip route get fibmatch 192.0.2.2 | \ + grep -q "linkdown" + check_err $? + ip netns exec testns ip -6 route get fibmatch 2001:db8:2::2 | \ + grep -q "linkdown" + check_err $? + + ip netns exec testns ip link del dev dummy0 + + ip netns del testns + + if [ $ret -ne 0 ]; then + echo "FAIL: unicast route carrier test" + return 1 + fi + echo "PASS: unicast route carrier test" +} + +fib_carrier_test() +{ + echo "Running netdev carrier change tests" + + fib_carrier_local_test + fib_carrier_unicast_test +} + +fib_test() +{ + fib_unreg_test + fib_down_test + fib_carrier_test +} + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit 0 +fi + +if [ ! -x "$(command -v ip)" ]; then + echo "SKIP: Could not run test without ip tool" + exit 0 +fi + +ip route help 2>&1 | grep -q fibmatch +if [ $? -ne 0 ]; then + echo "SKIP: iproute2 too old, missing fibmatch" + exit 0 +fi + +fib_test + +exit $ret diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 3ab6ec403905..e11fe84de0fd 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -259,22 +259,28 @@ static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len) return sizeof(*ip6h); } -static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr) + +static void setup_sockaddr(int domain, const char *str_addr, + struct sockaddr_storage *sockaddr) { struct sockaddr_in6 *addr6 = (void *) sockaddr; struct sockaddr_in *addr4 = (void *) sockaddr; switch (domain) { case PF_INET: + memset(addr4, 0, sizeof(*addr4)); addr4->sin_family = AF_INET; addr4->sin_port = htons(cfg_port); - if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1) + if (str_addr && + inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1) error(1, 0, "ipv4 parse error: %s", str_addr); break; case PF_INET6: + memset(addr6, 0, sizeof(*addr6)); addr6->sin6_family = AF_INET6; addr6->sin6_port = htons(cfg_port); - if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1) + if (str_addr && + inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1) error(1, 0, "ipv6 parse error: %s", str_addr); break; default: @@ -603,6 +609,7 @@ static void parse_opts(int argc, char **argv) sizeof(struct tcphdr) - 40 /* max tcp options */; int c; + char *daddr = NULL, *saddr = NULL; cfg_payload_len = max_payload_len; @@ -627,7 +634,7 @@ static void parse_opts(int argc, char **argv) cfg_cpu = strtol(optarg, NULL, 0); break; case 'D': - setup_sockaddr(cfg_family, optarg, &cfg_dst_addr); + daddr = optarg; break; case 'i': cfg_ifindex = if_nametoindex(optarg); @@ -638,7 +645,7 @@ static void parse_opts(int argc, char **argv) cfg_cork_mixed = true; break; case 'p': - cfg_port = htons(strtoul(optarg, NULL, 0)); + cfg_port = strtoul(optarg, NULL, 0); break; case 'r': cfg_rx = true; @@ -647,7 +654,7 @@ static void parse_opts(int argc, char **argv) cfg_payload_len = strtoul(optarg, NULL, 0); break; case 'S': - setup_sockaddr(cfg_family, optarg, &cfg_src_addr); + saddr = optarg; break; case 't': cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000; @@ -660,6 +667,8 @@ static void parse_opts(int argc, char **argv) break; } } + setup_sockaddr(cfg_family, daddr, &cfg_dst_addr); + setup_sockaddr(cfg_family, saddr, &cfg_src_addr); if (cfg_payload_len > max_payload_len) error(1, 0, "-s: payload exceeds max (%d)", max_payload_len); diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index dada4ab69142..a622eeecc3a6 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -598,6 +598,135 @@ kci_test_ip6gretap() ip netns del "$testns" } +kci_test_erspan() +{ + testns="testns" + DEV_NS=erspan00 + ret=0 + + ip link help erspan 2>&1 | grep -q "^Usage:" + if [ $? -ne 0 ];then + echo "SKIP: erspan: iproute2 too old" + return 1 + fi + + ip netns add "$testns" + if [ $? -ne 0 ]; then + echo "SKIP erspan tests: cannot add net namespace $testns" + return 1 + fi + + # test native tunnel erspan v1 + ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \ + key 102 local 172.16.1.100 remote 172.16.1.200 \ + erspan_ver 1 erspan 488 + check_err $? + + ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24 + check_err $? + + ip netns exec "$testns" ip link set dev $DEV_NS up + check_err $? + + ip netns exec "$testns" ip link del "$DEV_NS" + check_err $? + + # test native tunnel erspan v2 + ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan seq \ + key 102 local 172.16.1.100 remote 172.16.1.200 \ + erspan_ver 2 erspan_dir ingress erspan_hwid 7 + check_err $? + + ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24 + check_err $? + + ip netns exec "$testns" ip link set dev $DEV_NS up + check_err $? + + ip netns exec "$testns" ip link del "$DEV_NS" + check_err $? + + # test external mode + ip netns exec "$testns" ip link add dev "$DEV_NS" type erspan external + check_err $? + + ip netns exec "$testns" ip link del "$DEV_NS" + check_err $? + + if [ $ret -ne 0 ]; then + echo "FAIL: erspan" + return 1 + fi + echo "PASS: erspan" + + ip netns del "$testns" +} + +kci_test_ip6erspan() +{ + testns="testns" + DEV_NS=ip6erspan00 + ret=0 + + ip link help ip6erspan 2>&1 | grep -q "^Usage:" + if [ $? -ne 0 ];then + echo "SKIP: ip6erspan: iproute2 too old" + return 1 + fi + + ip netns add "$testns" + if [ $? -ne 0 ]; then + echo "SKIP ip6erspan tests: cannot add net namespace $testns" + return 1 + fi + + # test native tunnel ip6erspan v1 + ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \ + key 102 local fc00:100::1 remote fc00:100::2 \ + erspan_ver 1 erspan 488 + check_err $? + + ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24 + check_err $? + + ip netns exec "$testns" ip link set dev $DEV_NS up + check_err $? + + ip netns exec "$testns" ip link del "$DEV_NS" + check_err $? + + # test native tunnel ip6erspan v2 + ip netns exec "$testns" ip link add dev "$DEV_NS" type ip6erspan seq \ + key 102 local fc00:100::1 remote fc00:100::2 \ + erspan_ver 2 erspan_dir ingress erspan_hwid 7 + check_err $? + + ip netns exec "$testns" ip addr add dev "$DEV_NS" 10.1.1.100/24 + check_err $? + + ip netns exec "$testns" ip link set dev $DEV_NS up + check_err $? + + ip netns exec "$testns" ip link del "$DEV_NS" + check_err $? + + # test external mode + ip netns exec "$testns" ip link add dev "$DEV_NS" \ + type ip6erspan external + check_err $? + + ip netns exec "$testns" ip link del "$DEV_NS" + check_err $? + + if [ $ret -ne 0 ]; then + echo "FAIL: ip6erspan" + return 1 + fi + echo "PASS: ip6erspan" + + ip netns del "$testns" +} + kci_test_rtnl() { kci_add_dummy @@ -612,6 +741,8 @@ kci_test_rtnl() kci_test_gre kci_test_gretap kci_test_ip6gretap + kci_test_erspan + kci_test_ip6erspan kci_test_bridge kci_test_addrlabel kci_test_ifalias diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c index 66e5ce5b91f0..1aef72df20a1 100644 --- a/tools/testing/selftests/x86/ldt_gdt.c +++ b/tools/testing/selftests/x86/ldt_gdt.c @@ -122,8 +122,7 @@ static void check_valid_segment(uint16_t index, int ldt, * NB: Different Linux versions do different things with the * accessed bit in set_thread_area(). */ - if (ar != expected_ar && - (ldt || ar != (expected_ar | AR_ACCESSED))) { + if (ar != expected_ar && ar != (expected_ar | AR_ACCESSED)) { printf("[FAIL]\t%s entry %hu has AR 0x%08X but expected 0x%08X\n", (ldt ? "LDT" : "GDT"), index, ar, expected_ar); nerrs++; @@ -627,13 +626,10 @@ static void do_multicpu_tests(void) static int finish_exec_test(void) { /* - * In a sensible world, this would be check_invalid_segment(0, 1); - * For better or for worse, though, the LDT is inherited across exec. - * We can probably change this safely, but for now we test it. + * Older kernel versions did inherit the LDT on exec() which is + * wrong because exec() starts from a clean state. */ - check_valid_segment(0, 1, - AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB, - 42, true); + check_invalid_segment(0, 1); return nerrs ? 1 : 0; } diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c index 2b3d6d235015..3d7b42e77299 100644 --- a/tools/usb/usbip/src/utils.c +++ b/tools/usb/usbip/src/utils.c @@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add) char command[SYSFS_BUS_ID_SIZE + 4]; char match_busid_attr_path[SYSFS_PATH_MAX]; int rc; + int cmd_size; snprintf(match_busid_attr_path, sizeof(match_busid_attr_path), "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME, @@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add) attr_name); if (add) - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid); + cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", + busid); else - snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid); + cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", + busid); rc = write_sysfs_attribute(match_busid_attr_path, command, - sizeof(command)); + cmd_size); if (rc < 0) { dbg("failed to write match_busid: %s", strerror(errno)); return -1; |
