summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/gtdt.c29
-rw-r--r--drivers/base/devtmpfs.c22
-rw-r--r--drivers/block/drbd/drbd_nl.c1
-rw-r--r--drivers/block/rnull.rs2
-rw-r--r--drivers/block/zram/zram_drv.c8
-rw-r--r--drivers/bluetooth/Kconfig6
-rw-r--r--drivers/bluetooth/hci_uart.h8
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c2
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c2
-rw-r--r--drivers/cdx/controller/mcdi.c43
-rw-r--r--drivers/cdx/controller/mcdi.h242
-rw-r--r--drivers/cdx/controller/mcdi_functions.c1
-rw-r--r--drivers/cdx/controller/mcdi_functions.h3
-rw-r--r--drivers/cdx/controller/mcdid.h63
-rw-r--r--drivers/clk/renesas/clk-mstp.c20
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clocksource/Kconfig13
-rw-r--r--drivers/clocksource/Makefile3
-rw-r--r--drivers/clocksource/arm_arch_timer.c686
-rw-r--r--drivers/clocksource/arm_arch_timer_mmio.c440
-rw-r--r--drivers/clocksource/arm_global_timer.c44
-rw-r--r--drivers/clocksource/clps711x-timer.c23
-rw-r--r--drivers/clocksource/ingenic-sysost.c27
-rw-r--r--drivers/clocksource/scx200_hrt.c1
-rw-r--r--drivers/clocksource/sh_cmt.c84
-rw-r--r--drivers/clocksource/timer-cs5535.c1
-rw-r--r--drivers/clocksource/timer-econet-en751221.c2
-rw-r--r--drivers/clocksource/timer-nxp-pit.c382
-rw-r--r--drivers/clocksource/timer-nxp-stm.c2
-rw-r--r--drivers/clocksource/timer-rtl-otto.c42
-rw-r--r--drivers/clocksource/timer-stm32-lp.c1
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra186.c38
-rw-r--r--drivers/clocksource/timer-ti-dm.c119
-rw-r--r--drivers/clocksource/timer-vf-pit.c194
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c403
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dpll/dpll_netlink.c4
-rw-r--r--drivers/edac/Kconfig16
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/a72_edac.c225
-rw-r--r--drivers/edac/altera_edac.c4
-rw-r--r--drivers/edac/amd64_edac.c20
-rw-r--r--drivers/edac/amd64_edac.h2
-rw-r--r--[-rwxr-xr-x]drivers/edac/ecs.c0
-rw-r--r--drivers/edac/edac_mc_sysfs.c24
-rw-r--r--drivers/edac/i10nm_base.c27
-rw-r--r--drivers/edac/ie31200_edac.c4
-rw-r--r--[-rwxr-xr-x]drivers/edac/mem_repair.c0
-rw-r--r--[-rwxr-xr-x]drivers/edac/scrub.c0
-rw-r--r--drivers/edac/skx_base.c33
-rw-r--r--drivers/edac/skx_common.c54
-rw-r--r--drivers/edac/skx_common.h28
-rw-r--r--drivers/edac/versalnet_edac.c960
-rw-r--r--drivers/firewire/core-cdev.c2
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c5
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-mpc5200.c78
-rw-r--r--drivers/gpio/gpio-regmap.c2
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c11
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c12
-rw-r--r--drivers/gpio/gpiolib.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c6
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c6
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c2
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c7
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c8
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h25
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c10
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c22
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c25
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c118
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c35
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c5
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c4
-rw-r--r--drivers/gpu/nova-core/fb.rs6
-rw-r--r--drivers/gpu/nova-core/gpu.rs3
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs6
-rw-r--r--drivers/gpu/nova-core/vbios.rs4
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-cp2112.c10
-rw-r--r--drivers/hid/hid-lenovo.c4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c3
-rw-r--r--drivers/i2c/busses/i2c-riic.c2
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c3
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c9
-rw-r--r--drivers/iommu/amd/io_pgtable.c25
-rw-r--r--drivers/iommu/intel/iommu.c7
-rw-r--r--drivers/iommu/iommufd/device.c3
-rw-r--r--drivers/iommu/iommufd/eventq.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h3
-rw-r--r--drivers/iommu/iommufd/main.c59
-rw-r--r--drivers/iommu/s390-iommu.c29
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c256
-rw-r--r--drivers/irqchip/irq-gic-v2m.c13
-rw-r--r--drivers/irqchip/irq-gic-v3.c3
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c2
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c26
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c105
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c9
-rw-r--r--drivers/irqchip/irq-msi-lib.c14
-rw-r--r--drivers/irqchip/irq-nvic.c3
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c3
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c2
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c26
-rw-r--r--drivers/irqchip/irq-sifive-plic.c10
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/md-cluster.c4
-rw-r--r--drivers/md/md-linear.c1
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/media/rc/pwm-ir-tx.c5
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c68
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c3
-rw-r--r--drivers/mmc/host/sdhci.c34
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c7
-rw-r--r--drivers/net/can/spi/hi311x.c34
-rw-r--r--drivers/net/can/sun4i_can.c1
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c3
-rw-r--r--drivers/net/can/usb/mcba_usb.c1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/dsa/lantiq_gswip.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c110
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h3
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c7
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c6
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c2
-rw-r--r--drivers/of/irq.c1
-rw-r--r--drivers/pci/msi/irqdomain.c53
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm-cmn.c9
-rw-r--r--drivers/perf/arm_pmuv3.c29
-rw-r--r--drivers/perf/arm_spe_pmu.c114
-rw-r--r--drivers/perf/dwc_pcie_pmu.c161
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c528
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h6
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c31
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c15
-rw-r--r--drivers/platform/x86/amd/pmf/core.c1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c1
-rw-r--r--drivers/platform/x86/dell/dell-pc.c9
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c2
-rw-r--r--drivers/platform/x86/lg-laptop.c34
-rw-r--r--drivers/platform/x86/oxpec.c14
-rw-r--r--drivers/pmdomain/core.c20
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h1
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c1
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c3
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c3
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c2
-rw-r--r--drivers/power/supply/bq27xxx_battery.c4
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ras/ras.c1
-rw-r--r--drivers/reset/reset-eyeq.c11
-rw-r--r--drivers/s390/block/Kconfig12
-rw-r--r--drivers/s390/block/dcssblk.c35
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/hmcdrv_dev.c19
-rw-r--r--drivers/s390/char/sclp_cmd.c478
-rw-r--r--drivers/s390/char/sclp_mem.c399
-rw-r--r--drivers/s390/char/tape_3590.c2
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c4
-rw-r--r--drivers/spi/spi-cadence-quadspi.c53
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1
-rw-r--r--drivers/ufs/core/ufs-mcq.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/vhost/net.c40
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c13
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig2
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/events/events_base.c37
-rw-r--r--drivers/xen/gntdev-dmabuf.c7
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c33
-rw-r--r--drivers/xen/grant-table.c6
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/xen/privcmd.c14
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/zorro/names.c12
285 files changed, 7233 insertions, 3154 deletions
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 70f8290b659d..fd995a1d3d24 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -388,11 +388,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
return 0;
}
-static int __init gtdt_sbsa_gwdt_init(void)
+static int __init gtdt_platform_timer_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
- int ret, timer_count, gwdt_count = 0;
+ int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0;
if (acpi_disabled)
return 0;
@@ -414,20 +414,41 @@ static int __init gtdt_sbsa_gwdt_init(void)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
+ ret = 0;
+
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
- break;
+ continue;
gwdt_count++;
+ } else if (is_timer_block(platform_timer)) {
+ struct arch_timer_mem atm = {};
+ struct platform_device *pdev;
+
+ ret = gtdt_parse_timer_block(platform_timer, &atm);
+ if (ret)
+ continue;
+
+ pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
+ gwdt_count, &atm,
+ sizeof(atm));
+ if (IS_ERR(pdev)) {
+ pr_err("Can't register timer %d\n", gwdt_count);
+ continue;
+ }
+
+ mmio_timer_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+ if (mmio_timer_count)
+ pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
-device_initcall(gtdt_sbsa_gwdt_init);
+device_initcall(gtdt_platform_timer_init);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 31bfb3194b4c..9d4e46ad8352 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -176,7 +176,7 @@ static int dev_mkdir(const char *name, umode_t mode)
struct dentry *dentry;
struct path path;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -184,7 +184,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return PTR_ERR_OR_ZERO(dentry);
}
@@ -222,10 +222,10 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -246,7 +246,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return err;
}
@@ -256,7 +256,7 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- dentry = kern_path_locked(name, &parent);
+ dentry = start_removing_path(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (d_inode(dentry)->i_private == &thread)
@@ -265,9 +265,7 @@ static int dev_rmdir(const char *name)
else
err = -EPERM;
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
- path_put(&parent);
+ end_removing_path(&parent, dentry);
return err;
}
@@ -325,7 +323,7 @@ static int handle_remove(const char *nodename, struct device *dev)
int deleted = 0;
int err = 0;
- dentry = kern_path_locked(nodename, &parent);
+ dentry = start_removing_path(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -349,10 +347,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (!err || err == -ENOENT)
deleted = 1;
}
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
+ end_removing_path(&parent, dentry);
- path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e09930c2b226..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
index d07e76ae2c13..6366da12c5a5 100644
--- a/drivers/block/rnull.rs
+++ b/drivers/block/rnull.rs
@@ -51,7 +51,7 @@ impl kernel::InPlaceModule for NullBlkModule {
.logical_block_size(4096)?
.physical_block_size(4096)?
.rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)
+ .build(fmt!("rnullb{}", 0), tagset)
})();
try_pin_init!(Self {
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8acad3cc6e6e..f31652085adc 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1795,6 +1795,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
u32 index)
{
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_SAME);
zram_set_handle(zram, index, fill);
zram_slot_unlock(zram, index);
@@ -1832,6 +1833,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
kunmap_local(src);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
@@ -1855,11 +1857,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element;
bool same_filled;
- /* First, free memory allocated to this slot (if any) */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_slot_unlock(zram, index);
-
mem = kmap_local_page(page);
same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
@@ -1901,6 +1898,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
zcomp_stream_put(zstrm);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..7df69ccb6600 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -312,7 +312,9 @@ config BT_HCIBCM4377
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
+ depends on BT_HCIUART
depends on USB
+ select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -437,8 +439,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
+ select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@@ -483,7 +487,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ select BT_HCIUART_H4
select CRC32
select CRC8
help
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 5ea5dd80e297..cbbe79b241ce 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -121,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
-#ifdef CONFIG_BT_HCIUART_H4
-int h4_init(void);
-int h4_deinit(void);
-
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@@ -162,6 +158,10 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
+#ifdef CONFIG_BT_HCIUART_H4
+int h4_init(void);
+int h4_deinit(void);
+
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
deleted file mode 100644
index 567f8ec47582..000000000000
--- a/drivers/cdx/controller/bitfield.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_BITFIELD_H
-#define CDX_BITFIELD_H
-
-#include <linux/bitfield.h>
-
-/* Lowest bit numbers and widths */
-#define CDX_DWORD_LBN 0
-#define CDX_DWORD_WIDTH 32
-
-/* Specified attribute (e.g. LBN) of the specified field */
-#define CDX_VAL(field, attribute) field ## _ ## attribute
-/* Low bit number of the specified field */
-#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
-/* Bit width of the specified field */
-#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
-/* High bit number of the specified field */
-#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
-
-/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
-struct cdx_dword {
- __le32 cdx_u32;
-};
-
-/* Value expanders for printk */
-#define CDX_DWORD_VAL(dword) \
- ((unsigned int)le32_to_cpu((dword).cdx_u32))
-
-/*
- * Extract bit field portion [low,high) from the 32-bit little-endian
- * element which contains bits [min,max)
- */
-#define CDX_DWORD_FIELD(dword, field) \
- (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
- le32_to_cpu((dword).cdx_u32)))
-
-/*
- * Creates the portion of the named bit field that lies within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELD(field, value) \
- (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
- CDX_LOW_BIT(field)), value))
-
-/*
- * Creates the portion of the named bit fields that lie within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELDS(field1, value1, \
- field2, value2, \
- field3, value3, \
- field4, value4, \
- field5, value5, \
- field6, value6, \
- field7, value7) \
- (CDX_INSERT_FIELD(field1, (value1)) | \
- CDX_INSERT_FIELD(field2, (value2)) | \
- CDX_INSERT_FIELD(field3, (value3)) | \
- CDX_INSERT_FIELD(field4, (value4)) | \
- CDX_INSERT_FIELD(field5, (value5)) | \
- CDX_INSERT_FIELD(field6, (value6)) | \
- CDX_INSERT_FIELD(field7, (value7)))
-
-#define CDX_POPULATE_DWORD(dword, ...) \
- (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
-
-/* Populate a dword field with various numbers of arguments */
-#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
-#define CDX_POPULATE_DWORD_6(dword, ...) \
- CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_5(dword, ...) \
- CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_4(dword, ...) \
- CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_3(dword, ...) \
- CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_2(dword, ...) \
- CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_1(dword, ...) \
- CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_SET_DWORD(dword) \
- CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
-
-#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index fca83141e3e6..3f8b9041babf 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -14,7 +14,7 @@
#include "cdx_controller.h"
#include "../cdx.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
{
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 61f1a290ff08..59aabd99fa8f 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -15,7 +15,7 @@
#include "../cdx.h"
#include "cdx_controller.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
{ .name = "mcdi_ipc" },
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index e760f8d347cc..2e82ffc18d89 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -23,9 +23,10 @@
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
-#include "bitfield.h"
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
@@ -99,6 +100,19 @@ static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd
return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
}
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
int cdx_mcdi_init(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -128,7 +142,16 @@ fail2:
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
void cdx_mcdi_finish(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -143,6 +166,7 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
kfree(cdx->mcdi);
cdx->mcdi = NULL;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
{
@@ -553,6 +577,19 @@ static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
}
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
{
struct cdx_mcdi_iface *mcdi;
@@ -590,6 +627,7 @@ void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int le
cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
static void cdx_mcdi_cmd_work(struct work_struct *context)
{
@@ -757,6 +795,7 @@ int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
/**
* cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
deleted file mode 100644
index 54a65e9760ae..000000000000
--- a/drivers/cdx/controller/mcdi.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2008-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_MCDI_H
-#define CDX_MCDI_H
-
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/rpmsg.h>
-
-#include "bitfield.h"
-#include "mc_cdx_pcol.h"
-
-#ifdef DEBUG
-#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
-#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
-#else
-#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
-#define CDX_WARN_ON_PARANOID(x) do {} while (0)
-#endif
-
-/**
- * enum cdx_mcdi_mode - MCDI transaction mode
- * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
- * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
- */
-enum cdx_mcdi_mode {
- MCDI_MODE_EVENTS,
- MCDI_MODE_FAIL,
-};
-
-#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
-#define MCDI_RPC_POST_RST_TIME (10 * HZ)
-
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
-
-/**
- * enum cdx_mcdi_cmd_state - State for an individual MCDI command
- * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
- * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
- * as MC have too many outstanding commands. Command will be retried once
- * another command returns.
- * @MCDI_STATE_RUNNING: Command was accepted and is running.
- * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
- * the command.
- * @MCDI_STATE_FINISHED: Processing of this command has completed.
- */
-
-enum cdx_mcdi_cmd_state {
- MCDI_STATE_QUEUED,
- MCDI_STATE_RETRY,
- MCDI_STATE_RUNNING,
- MCDI_STATE_RUNNING_CANCELLED,
- MCDI_STATE_FINISHED,
-};
-
-/**
- * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
- * with CDX controller.
- * @mcdi: MCDI interface
- * @mcdi_ops: MCDI operations
- * @r5_rproc : R5 Remoteproc device handle
- * @rpdev: RPMsg device
- * @ept: RPMsg endpoint
- * @work: Post probe work
- */
-struct cdx_mcdi {
- /* MCDI interface */
- struct cdx_mcdi_data *mcdi;
- const struct cdx_mcdi_ops *mcdi_ops;
-
- struct rproc *r5_rproc;
- struct rpmsg_device *rpdev;
- struct rpmsg_endpoint *ept;
- struct work_struct work;
-};
-
-struct cdx_mcdi_ops {
- void (*mcdi_request)(struct cdx_mcdi *cdx,
- const struct cdx_dword *hdr, size_t hdr_len,
- const struct cdx_dword *sdu, size_t sdu_len);
- unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
-};
-
-typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
- unsigned long cookie, int rc,
- struct cdx_dword *outbuf,
- size_t outlen_actual);
-
-/**
- * struct cdx_mcdi_cmd - An outstanding MCDI command
- * @ref: Reference count. There will be one reference if the command is
- * in the mcdi_iface cmd_list, another if it's on a cleanup list,
- * and a third if it's queued in the work queue.
- * @list: The data for this entry in mcdi->cmd_list
- * @cleanup_list: The data for this entry in a cleanup list
- * @work: The work item for this command, queued in mcdi->workqueue
- * @mcdi: The mcdi_iface for this command
- * @state: The state of this command
- * @inlen: inbuf length
- * @inbuf: Input buffer
- * @quiet: Whether to silence errors
- * @reboot_seen: Whether a reboot has been seen during this command,
- * to prevent duplicates
- * @seq: Sequence number
- * @started: Jiffies this command was started at
- * @cookie: Context for completion function
- * @completer: Completion function
- * @handle: Command handle
- * @cmd: Command number
- * @rc: Return code
- * @outlen: Length of output buffer
- * @outbuf: Output buffer
- */
-struct cdx_mcdi_cmd {
- struct kref ref;
- struct list_head list;
- struct list_head cleanup_list;
- struct work_struct work;
- struct cdx_mcdi_iface *mcdi;
- enum cdx_mcdi_cmd_state state;
- size_t inlen;
- const struct cdx_dword *inbuf;
- bool quiet;
- bool reboot_seen;
- u8 seq;
- unsigned long started;
- unsigned long cookie;
- cdx_mcdi_async_completer *completer;
- unsigned int handle;
- unsigned int cmd;
- int rc;
- size_t outlen;
- struct cdx_dword *outbuf;
- /* followed by inbuf data if necessary */
-};
-
-/**
- * struct cdx_mcdi_iface - MCDI protocol context
- * @cdx: The associated NIC
- * @iface_lock: Serialise access to this structure
- * @outstanding_cleanups: Count of cleanups
- * @cmd_list: List of outstanding and running commands
- * @workqueue: Workqueue used for delayed processing
- * @cmd_complete_wq: Waitqueue for command completion
- * @db_held_by: Command the MC doorbell is in use by
- * @seq_held_by: Command each sequence number is in use by
- * @prev_handle: The last used command handle
- * @mode: Poll for mcdi completion, or wait for an mcdi_event
- * @prev_seq: The last used sequence number
- * @new_epoch: Indicates start of day or start of MC reboot recovery
- */
-struct cdx_mcdi_iface {
- struct cdx_mcdi *cdx;
- /* Serialise access */
- struct mutex iface_lock;
- unsigned int outstanding_cleanups;
- struct list_head cmd_list;
- struct workqueue_struct *workqueue;
- wait_queue_head_t cmd_complete_wq;
- struct cdx_mcdi_cmd *db_held_by;
- struct cdx_mcdi_cmd *seq_held_by[16];
- unsigned int prev_handle;
- enum cdx_mcdi_mode mode;
- u8 prev_seq;
- bool new_epoch;
-};
-
-/**
- * struct cdx_mcdi_data - extra state for NICs that implement MCDI
- * @iface: Interface/protocol state
- * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
- */
-struct cdx_mcdi_data {
- struct cdx_mcdi_iface iface;
- u32 fn_flags;
-};
-
-static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
-{
- return cdx->mcdi ? &cdx->mcdi->iface : NULL;
-}
-
-int cdx_mcdi_init(struct cdx_mcdi *cdx);
-void cdx_mcdi_finish(struct cdx_mcdi *cdx);
-
-void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
-int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
-int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- cdx_mcdi_async_completer *complete,
- unsigned long cookie);
-int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
- unsigned int timeout_jiffies);
-
-/*
- * We expect that 16- and 32-bit fields in MCDI requests and responses
- * are appropriately aligned, but 64-bit fields are only
- * 32-bit-aligned.
- */
-#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
-#define _MCDI_PTR(_buf, _offset) \
- ((u8 *)(_buf) + (_offset))
-#define MCDI_PTR(_buf, _field) \
- _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
-#define _MCDI_CHECK_ALIGN(_ofst, _align) \
- ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
- (_ofst))
-#define _MCDI_DWORD(_buf, _field) \
- ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
-
-#define MCDI_BYTE(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
- *MCDI_PTR(_buf, _field))
-#define MCDI_WORD(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
- le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
-#define MCDI_SET_DWORD(_buf, _field, _value) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
-#define MCDI_DWORD(_buf, _field) \
- CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
-#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
- MC_CMD_ ## _name1, _value1)
-#define MCDI_SET_QWORD(_buf, _field, _value) \
- do { \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
- CDX_DWORD, (u32)(_value)); \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
- CDX_DWORD, (u64)(_value) >> 32); \
- } while (0)
-#define MCDI_QWORD(_buf, _field) \
- (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
- (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
-
-#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index 885c69e6ebe5..8ae2d99be81e 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
-#include "mcdi.h"
#include "mcdi_functions.h"
int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index b9942affdc6b..57fd1bae706b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -8,7 +8,8 @@
#ifndef CDX_MCDI_FUNCTIONS_H
#define CDX_MCDI_FUNCTIONS_H
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
#include "../cdx.h"
/**
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5bc473c2adb3..2f65fe2c6bdf 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static struct device_node *cpg_mstp_pd_np __initdata = NULL;
+static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
+
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
- of_genpd_add_provider_simple(np, pd);
+ cpg_mstp_pd_np = of_node_get(np);
+ cpg_mstp_pd_genpd = pd;
+}
+
+static int __init cpg_mstp_pd_init_provider(void)
+{
+ int error;
+
+ if (!cpg_mstp_pd_np)
+ return -ENODEV;
+
+ error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
+
+ of_node_put(cpg_mstp_pd_np);
+ return error;
}
+postcore_initcall(cpg_mstp_pd_init_provider);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 354c981943b6..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
- rate = (parent_rate / p) / m;
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 645f517a1ac2..ffcd23668763 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -395,8 +395,7 @@ config ARM_GLOBAL_TIMER
config ARM_GT_INITIAL_PRESCALER_VAL
int "ARM global timer initial prescaler value"
- default 2 if ARCH_ZYNQ
- default 1
+ default 0
depends on ARM_GLOBAL_TIMER
help
When the ARM global timer initializes, its current rate is declared
@@ -406,6 +405,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL
bounds about how much the parent clock is allowed to decrease or
increase wrt the initial clock value.
This affects CPU_FREQ max delta from the initial frequency.
+ Use 0 to use auto-detection in the driver.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module"
@@ -474,11 +474,14 @@ config FSL_FTM_TIMER
help
Support for Freescale FlexTimer Module (FTM) timer.
-config VF_PIT_TIMER
- bool
+config NXP_PIT_TIMER
+ bool "NXP Periodic Interrupt Timer" if COMPILE_TEST
select CLKSRC_MMIO
help
- Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale / NXP
+ SoCs. This periodic timer is found on the Vybrid Family and
+ the Automotive S32G2/3 platforms. It contains 4 channels
+ where two can be coupled to form a 64 bits channel.
config SYS_SUPPORTS_SH_CMT
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 205bf3b0a8f3..ec4452ee958f 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
-obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_NXP_PIT_TIMER) += timer-nxp-pit.o
obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
@@ -64,6 +64,7 @@ obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer_mmio.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 80ba6a54248c..90aeff44a276 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -34,42 +34,12 @@
#include <clocksource/arm_arch_timer.h>
-#define CNTTIDR 0x08
-#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
-
-#define CNTACR(n) (0x40 + ((n) * 4))
-#define CNTACR_RPCT BIT(0)
-#define CNTACR_RVCT BIT(1)
-#define CNTACR_RFRQ BIT(2)
-#define CNTACR_RVOFF BIT(3)
-#define CNTACR_RWVT BIT(4)
-#define CNTACR_RWPT BIT(5)
-
-#define CNTPCT_LO 0x00
-#define CNTVCT_LO 0x08
-#define CNTFRQ 0x10
-#define CNTP_CVAL_LO 0x20
-#define CNTP_CTL 0x2c
-#define CNTV_CVAL_LO 0x30
-#define CNTV_CTL 0x3c
-
/*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static unsigned arch_timers_present __initdata;
-
-struct arch_timer {
- void __iomem *base;
- struct clock_event_device evt;
-};
-
-static struct arch_timer *arch_timer_mem __ro_after_init;
-
-#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-
static u32 arch_timer_rate __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
@@ -85,7 +55,6 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop __ro_after_init;
-static bool arch_timer_mem_use_virtual __ro_after_init;
static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
@@ -121,76 +90,6 @@ static int arch_counter_get_width(void)
/*
* Architected system timer support.
*/
-
-static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
- struct clock_event_device *clk)
-{
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTP_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /*
- * Not guaranteed to be atomic, so the timer
- * must be disabled at this point.
- */
- writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTV_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /* Same restriction as above */
- writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- arch_timer_reg_write_cp15(access, reg, val);
- }
-}
-
-static __always_inline
-u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
- struct clock_event_device *clk)
-{
- u32 val;
-
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTP_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTV_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- val = arch_timer_reg_read_cp15(access, reg);
- }
-
- return val;
-}
-
static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
@@ -424,7 +323,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
unsigned long ctrl;
u64 cval;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -436,7 +335,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
write_sysreg(cval, cntv_cval_el0);
}
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
@@ -667,10 +566,10 @@ static __always_inline irqreturn_t timer_handler(const int access,
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -692,28 +591,14 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
-}
-
-static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
-}
-
static __always_inline int arch_timer_shutdown(const int access,
struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
@@ -728,23 +613,13 @@ static int arch_timer_shutdown_phys(struct clock_event_device *clk)
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
-static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
-}
-
-static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
-}
-
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cnt;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -753,8 +628,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
else
cnt = __arch_counter_get_cntvct();
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
@@ -771,60 +646,6 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
-static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
-{
- u32 cnt_lo, cnt_hi, tmp_hi;
-
- do {
- cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
- tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- } while (cnt_hi != tmp_hi);
-
- return ((u64) cnt_hi << 32) | cnt_lo;
-}
-
-static __always_inline void set_next_event_mem(const int access, unsigned long evt,
- struct clock_event_device *clk)
-{
- struct arch_timer *timer = to_arch_timer(clk);
- unsigned long ctrl;
- u64 cnt;
-
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
-
- /* Timer must be disabled before programming CVAL */
- if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
- }
-
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
-
- if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
- cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
- else
- cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
-
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
-}
-
-static int arch_timer_set_next_event_virt_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
- return 0;
-}
-
-static int arch_timer_set_next_event_phys_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
- return 0;
-}
-
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
@@ -850,63 +671,41 @@ static u64 __arch_timer_check_delta(void)
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
-static void __arch_timer_setup(unsigned type,
- struct clock_event_device *clk)
+static void __arch_timer_setup(struct clock_event_device *clk)
{
+ typeof(clk->set_next_event) sne;
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT;
- if (type == ARCH_TIMER_TYPE_CP15) {
- typeof(clk->set_next_event) sne;
-
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
-
- if (arch_timer_c3stop)
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 450;
- clk->cpumask = cpumask_of(smp_processor_id());
- clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
- switch (arch_timer_uses_ppi) {
- case ARCH_TIMER_VIRT_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_virt;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- sne = erratum_handler(set_next_event_virt);
- break;
- case ARCH_TIMER_PHYS_SECURE_PPI:
- case ARCH_TIMER_PHYS_NONSECURE_PPI:
- case ARCH_TIMER_HYP_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_phys;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- sne = erratum_handler(set_next_event_phys);
- break;
- default:
- BUG();
- }
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
- clk->set_next_event = sne;
- max_delta = __arch_timer_check_delta();
- } else {
- clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
- clk->name = "arch_mem_timer";
- clk->rating = 400;
- clk->cpumask = cpu_possible_mask;
- if (arch_timer_mem_use_virtual) {
- clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
- clk->set_next_event =
- arch_timer_set_next_event_virt_mem;
- } else {
- clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
- clk->set_next_event =
- arch_timer_set_next_event_phys_mem;
- }
-
- max_delta = CLOCKSOURCE_MASK(56);
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ clk->cpumask = cpumask_of(smp_processor_id());
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case ARCH_TIMER_VIRT_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
+ sne = erratum_handler(set_next_event_virt);
+ break;
+ case ARCH_TIMER_PHYS_SECURE_PPI:
+ case ARCH_TIMER_PHYS_NONSECURE_PPI:
+ case ARCH_TIMER_HYP_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
+ sne = erratum_handler(set_next_event_phys);
+ break;
+ default:
+ BUG();
}
+ clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
+
clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
@@ -1029,7 +828,7 @@ static int arch_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
u32 flags;
- __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
+ __arch_timer_setup(clk);
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
@@ -1075,22 +874,12 @@ static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np
pr_warn("frequency not available\n");
}
-static void __init arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(void)
{
- pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
- type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
- " and " : "",
- type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
+ pr_info("cp15 timer running at %lu.%02luMHz (%s).\n",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
- type & ARCH_TIMER_TYPE_CP15 ?
- (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
- "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
- type & ARCH_TIMER_TYPE_MEM ?
- arch_timer_mem_use_virtual ? "virt" : "phys" :
- "");
+ (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys");
}
u32 arch_timer_get_rate(void)
@@ -1108,11 +897,6 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static noinstr u64 arch_counter_get_cntvct_mem(void)
-{
- return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
-}
-
static struct arch_timer_kvm_info arch_timer_kvm_info;
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
@@ -1120,42 +904,35 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
return &arch_timer_kvm_info;
}
-static void __init arch_counter_register(unsigned type)
+static void __init arch_counter_register(void)
{
u64 (*scr)(void);
+ u64 (*rd)(void);
u64 start_count;
int width;
- /* Register the CP15 based counter if we have one */
- if (type & ARCH_TIMER_TYPE_CP15) {
- u64 (*rd)(void);
-
- if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntvct_stable;
- scr = raw_counter_get_cntvct_stable;
- } else {
- rd = arch_counter_get_cntvct;
- scr = arch_counter_get_cntvct;
- }
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntvct_stable;
+ scr = raw_counter_get_cntvct_stable;
} else {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntpct_stable;
- scr = raw_counter_get_cntpct_stable;
- } else {
- rd = arch_counter_get_cntpct;
- scr = arch_counter_get_cntpct;
- }
+ rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
}
-
- arch_timer_read_counter = rd;
- clocksource_counter.vdso_clock_mode = vdso_default;
} else {
- arch_timer_read_counter = arch_counter_get_cntvct_mem;
- scr = arch_counter_get_cntvct_mem;
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntpct_stable;
+ scr = raw_counter_get_cntpct_stable;
+ } else {
+ rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
+ arch_timer_read_counter = rd;
+ clocksource_counter.vdso_clock_mode = vdso_default;
+
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
@@ -1303,76 +1080,10 @@ out:
return err;
}
-static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
-{
- int ret;
- irq_handler_t func;
-
- arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
- if (!arch_timer_mem)
- return -ENOMEM;
-
- arch_timer_mem->base = base;
- arch_timer_mem->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
-
- if (arch_timer_mem_use_virtual)
- func = arch_timer_handler_virt_mem;
- else
- func = arch_timer_handler_phys_mem;
-
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
- if (ret) {
- pr_err("Failed to request mem timer irq\n");
- kfree(arch_timer_mem);
- arch_timer_mem = NULL;
- }
-
- return ret;
-}
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer", },
- { .compatible = "arm,armv8-timer", },
- {},
-};
-
-static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer-mem", },
- {},
-};
-
-static bool __init arch_timer_needs_of_probing(void)
-{
- struct device_node *dn;
- bool needs_probing = false;
- unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
-
- /* We have two timers, and both device-tree nodes are probed. */
- if ((arch_timers_present & mask) == mask)
- return false;
-
- /*
- * Only one type of timer is probed,
- * check if we have another type of timer node in device-tree.
- */
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
- dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
- else
- dn = of_find_matching_node(NULL, arch_timer_of_match);
-
- if (dn && of_device_is_available(dn))
- needs_probing = true;
-
- of_node_put(dn);
-
- return needs_probing;
-}
-
static int __init arch_timer_common_init(void)
{
- arch_timer_banner(arch_timers_present);
- arch_counter_register(arch_timers_present);
+ arch_timer_banner();
+ arch_counter_register();
return arch_timer_arch_init();
}
@@ -1421,13 +1132,11 @@ static int __init arch_timer_of_init(struct device_node *np)
u32 rate;
bool has_names;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("multiple nodes in dt, skipping\n");
return 0;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
@@ -1472,283 +1181,22 @@ static int __init arch_timer_of_init(struct device_node *np)
if (ret)
return ret;
- if (arch_timer_needs_of_probing())
- return 0;
-
return arch_timer_common_init();
}
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static u32 __init
-arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- u32 rate;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
- return 0;
- }
-
- rate = readl_relaxed(base + CNTFRQ);
-
- iounmap(base);
-
- return rate;
-}
-
-static struct arch_timer_mem_frame * __init
-arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- void __iomem *cntctlbase;
- u32 cnttidr;
- int i;
-
- cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
- if (!cntctlbase) {
- pr_err("Can't map CNTCTLBase @ %pa\n",
- &timer_mem->cntctlbase);
- return NULL;
- }
-
- cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
-
- /*
- * Try to find a virtual capable frame. Otherwise fall back to a
- * physical capable frame.
- */
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
- CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
-
- frame = &timer_mem->frame[i];
- if (!frame->valid)
- continue;
-
- /* Try enabling everything, and see what sticks */
- writel_relaxed(cntacr, cntctlbase + CNTACR(i));
- cntacr = readl_relaxed(cntctlbase + CNTACR(i));
-
- if ((cnttidr & CNTTIDR_VIRT(i)) &&
- !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
- best_frame = frame;
- arch_timer_mem_use_virtual = true;
- break;
- }
-
- if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
- continue;
-
- best_frame = frame;
- }
-
- iounmap(cntctlbase);
-
- return best_frame;
-}
-
-static int __init
-arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- int ret, irq;
-
- if (arch_timer_mem_use_virtual)
- irq = frame->virt_irq;
- else
- irq = frame->phys_irq;
-
- if (!irq) {
- pr_err("Frame missing %s irq.\n",
- arch_timer_mem_use_virtual ? "virt" : "phys");
- return -EINVAL;
- }
-
- if (!request_mem_region(frame->cntbase, frame->size,
- "arch_mem_timer"))
- return -EBUSY;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Can't map frame's registers\n");
- return -ENXIO;
- }
-
- ret = arch_timer_mem_register(base, irq);
- if (ret) {
- iounmap(base);
- return ret;
- }
-
- arch_timers_present |= ARCH_TIMER_TYPE_MEM;
-
- return 0;
-}
-
-static int __init arch_timer_mem_of_init(struct device_node *np)
-{
- struct arch_timer_mem *timer_mem;
- struct arch_timer_mem_frame *frame;
- struct resource res;
- int ret = -EINVAL;
- u32 rate;
-
- timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
- if (!timer_mem)
- return -ENOMEM;
-
- if (of_address_to_resource(np, 0, &res))
- goto out;
- timer_mem->cntctlbase = res.start;
- timer_mem->size = resource_size(&res);
-
- for_each_available_child_of_node_scoped(np, frame_node) {
- u32 n;
- struct arch_timer_mem_frame *frame;
-
- if (of_property_read_u32(frame_node, "frame-number", &n)) {
- pr_err(FW_BUG "Missing frame-number.\n");
- goto out;
- }
- if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
- pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
- ARCH_TIMER_MEM_MAX_FRAMES - 1);
- goto out;
- }
- frame = &timer_mem->frame[n];
-
- if (frame->valid) {
- pr_err(FW_BUG "Duplicated frame-number.\n");
- goto out;
- }
-
- if (of_address_to_resource(frame_node, 0, &res))
- goto out;
-
- frame->cntbase = res.start;
- frame->size = resource_size(&res);
-
- frame->virt_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_VIRT_SPI);
- frame->phys_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_PHYS_SPI);
-
- frame->valid = true;
- }
-
- frame = arch_timer_mem_find_best_frame(timer_mem);
- if (!frame) {
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer_mem->cntctlbase);
- ret = -EINVAL;
- goto out;
- }
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- arch_timer_of_configure_rate(rate, np);
-
- ret = arch_timer_mem_frame_register(frame);
- if (!ret && !arch_timer_needs_of_probing())
- ret = arch_timer_common_init();
-out:
- kfree(timer_mem);
- return ret;
-}
-TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
- arch_timer_mem_of_init);
-
#ifdef CONFIG_ACPI_GTDT
-static int __init
-arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame;
- u32 rate;
- int i;
-
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- frame = &timer_mem->frame[i];
-
- if (!frame->valid)
- continue;
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- if (rate == arch_timer_rate)
- continue;
-
- pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
- &frame->cntbase,
- (unsigned long)rate, (unsigned long)arch_timer_rate);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init arch_timer_mem_acpi_init(int platform_timer_count)
-{
- struct arch_timer_mem *timers, *timer;
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- int timer_count, i, ret = 0;
-
- timers = kcalloc(platform_timer_count, sizeof(*timers),
- GFP_KERNEL);
- if (!timers)
- return -ENOMEM;
-
- ret = acpi_arch_timer_mem_init(timers, &timer_count);
- if (ret || !timer_count)
- goto out;
-
- /*
- * While unlikely, it's theoretically possible that none of the frames
- * in a timer expose the combination of feature we want.
- */
- for (i = 0; i < timer_count; i++) {
- timer = &timers[i];
-
- frame = arch_timer_mem_find_best_frame(timer);
- if (!best_frame)
- best_frame = frame;
-
- ret = arch_timer_mem_verify_cntfrq(timer);
- if (ret) {
- pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
- goto out;
- }
-
- if (!best_frame) /* implies !frame */
- /*
- * Only complain about missing suitable frames if we
- * haven't already found one in a previous iteration.
- */
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer->cntctlbase);
- }
-
- if (best_frame)
- ret = arch_timer_mem_frame_register(best_frame);
-out:
- kfree(timers);
- return ret;
-}
-
-/* Initialize per-processor generic timer and memory-mapped timer(if present) */
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
{
- int ret, platform_timer_count;
+ int ret;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("already initialized, skipping\n");
return -EINVAL;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
- ret = acpi_gtdt_init(table, &platform_timer_count);
+ ret = acpi_gtdt_init(table, NULL);
if (ret)
return ret;
@@ -1790,10 +1238,6 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
if (ret)
return ret;
- if (platform_timer_count &&
- arch_timer_mem_acpi_init(platform_timer_count))
- pr_err("Failed to initialize memory-mapped timer.\n");
-
return arch_timer_common_init();
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
diff --git a/drivers/clocksource/arm_arch_timer_mmio.c b/drivers/clocksource/arm_arch_timer_mmio.c
new file mode 100644
index 000000000000..ebe1987d651e
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer_mmio.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Generic Memory Mapped Timer support
+ *
+ * Split from drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#define pr_fmt(fmt) "arch_timer_mmio: " fmt
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#define CNTTIDR 0x08
+#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
+#define CNTFRQ 0x10
+#define CNTP_CVAL_LO 0x20
+#define CNTP_CTL 0x2c
+#define CNTV_CVAL_LO 0x30
+#define CNTV_CTL 0x3c
+
+enum arch_timer_access {
+ PHYS_ACCESS,
+ VIRT_ACCESS,
+};
+
+struct arch_timer {
+ struct clock_event_device evt;
+ struct clocksource cs;
+ struct arch_timer_mem *gt_block;
+ void __iomem *base;
+ enum arch_timer_access access;
+ u32 rate;
+};
+
+#define evt_to_arch_timer(e) container_of(e, struct arch_timer, evt)
+#define cs_to_arch_timer(c) container_of(c, struct arch_timer, cs)
+
+static void arch_timer_mmio_write(struct arch_timer *timer,
+ enum arch_timer_reg reg, u64 val)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
+ return;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
+ return;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+}
+
+static u32 arch_timer_mmio_read(struct arch_timer *timer, enum arch_timer_reg reg)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTP_CTL);
+ default:
+ break;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTV_CTL);
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static noinstr u64 arch_counter_mmio_get_cnt(struct arch_timer *t)
+{
+ int offset_lo = t->access == VIRT_ACCESS ? CNTVCT_LO : CNTPCT_LO;
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
+ tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static u64 arch_mmio_counter_read(struct clocksource *cs)
+{
+ struct arch_timer *at = cs_to_arch_timer(cs);
+
+ return arch_counter_mmio_get_cnt(at);
+}
+
+static int arch_timer_mmio_shutdown(struct clock_event_device *clk)
+{
+ struct arch_timer *at = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+
+ return 0;
+}
+
+static int arch_timer_mmio_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_mmio_read(timer, ARCH_TIMER_REG_CTRL);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ }
+
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ cnt = arch_counter_mmio_get_cnt(timer);
+
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ return 0;
+}
+
+static irqreturn_t arch_timer_mmio_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct arch_timer *at = evt_to_arch_timer(evt);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct arch_timer_mem_frame *find_best_frame(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame, *best_frame = NULL;
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ void __iomem *cntctlbase;
+ u32 cnttidr;
+
+ cntctlbase = ioremap(at->gt_block->cntctlbase, at->gt_block->size);
+ if (!cntctlbase) {
+ dev_err(&pdev->dev, "Can't map CNTCTLBase @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return NULL;
+ }
+
+ cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+
+ /*
+ * Try to find a virtual capable frame. Otherwise fall back to a
+ * physical capable frame.
+ */
+ for (int i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
+ u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+
+ frame = &at->gt_block->frame[i];
+ if (!frame->valid)
+ continue;
+
+ /* Try enabling everything, and see what sticks */
+ writel_relaxed(cntacr, cntctlbase + CNTACR(i));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(i));
+
+ /* Pick a suitable frame for which we have an IRQ */
+ if ((cnttidr & CNTTIDR_VIRT(i)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT)) &&
+ frame->virt_irq) {
+ best_frame = frame;
+ at->access = VIRT_ACCESS;
+ break;
+ }
+
+ if ((~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) ||
+ !frame->phys_irq)
+ continue;
+
+ at->access = PHYS_ACCESS;
+ best_frame = frame;
+ }
+
+ iounmap(cntctlbase);
+
+ return best_frame;
+}
+
+static void arch_timer_mmio_setup(struct arch_timer *at, int irq)
+{
+ at->evt = (struct clock_event_device) {
+ .features = (CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ),
+ .name = "arch_mem_timer",
+ .rating = 400,
+ .cpumask = cpu_possible_mask,
+ .irq = irq,
+ .set_next_event = arch_timer_mmio_set_next_event,
+ .set_state_oneshot_stopped = arch_timer_mmio_shutdown,
+ .set_state_shutdown = arch_timer_mmio_shutdown,
+ };
+
+ at->evt.set_state_shutdown(&at->evt);
+
+ clockevents_config_and_register(&at->evt, at->rate, 0xf,
+ (unsigned long)CLOCKSOURCE_MASK(56));
+
+ enable_irq(at->evt.irq);
+
+ at->cs = (struct clocksource) {
+ .name = "arch_mmio_counter",
+ .rating = 300,
+ .read = arch_mmio_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+ clocksource_register_hz(&at->cs, at->rate);
+}
+
+static int arch_timer_mmio_frame_register(struct platform_device *pdev,
+ struct arch_timer_mem_frame *frame)
+{
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret, irq;
+ u32 rate;
+
+ if (!devm_request_mem_region(&pdev->dev, frame->cntbase, frame->size,
+ "arch_mem_timer"))
+ return -EBUSY;
+
+ at->base = devm_ioremap(&pdev->dev, frame->cntbase, frame->size);
+ if (!at->base) {
+ dev_err(&pdev->dev, "Can't map frame's registers\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Allow "clock-frequency" to override the probed rate. If neither
+ * lead to something useful, use the CPU timer frequency as the
+ * fallback. The nice thing about that last point is that we woudn't
+ * made it here if we didn't have a valid frequency.
+ */
+ rate = readl_relaxed(at->base + CNTFRQ);
+
+ if (!np || of_property_read_u32(np, "clock-frequency", &at->rate))
+ at->rate = rate;
+
+ if (!at->rate)
+ at->rate = arch_timer_get_rate();
+
+ irq = at->access == VIRT_ACCESS ? frame->virt_irq : frame->phys_irq;
+ ret = devm_request_irq(&pdev->dev, irq, arch_timer_mmio_handler,
+ IRQF_TIMER | IRQF_NO_AUTOEN, "arch_mem_timer",
+ &at->evt);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mem timer irq\n");
+ return ret;
+ }
+
+ /* Afer this point, we're not allowed to fail anymore */
+ arch_timer_mmio_setup(at, irq);
+ return 0;
+}
+
+static int of_populate_gt_block(struct platform_device *pdev,
+ struct arch_timer *at)
+{
+ struct resource res;
+
+ if (of_address_to_resource(pdev->dev.of_node, 0, &res))
+ return -EINVAL;
+
+ at->gt_block->cntctlbase = res.start;
+ at->gt_block->size = resource_size(&res);
+
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, frame_node) {
+ struct arch_timer_mem_frame *frame;
+ u32 n;
+
+ if (of_property_read_u32(frame_node, "frame-number", &n)) {
+ dev_err(&pdev->dev, FW_BUG "Missing frame-number\n");
+ return -EINVAL;
+ }
+ if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
+ dev_err(&pdev->dev,
+ FW_BUG "Wrong frame-number, only 0-%u are permitted\n",
+ ARCH_TIMER_MEM_MAX_FRAMES - 1);
+ return -EINVAL;
+ }
+
+ frame = &at->gt_block->frame[n];
+
+ if (frame->valid) {
+ dev_err(&pdev->dev, FW_BUG "Duplicated frame-number\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(frame_node, 0, &res))
+ return -EINVAL;
+
+ frame->cntbase = res.start;
+ frame->size = resource_size(&res);
+
+ frame->phys_irq = irq_of_parse_and_map(frame_node, 0);
+ frame->virt_irq = irq_of_parse_and_map(frame_node, 1);
+
+ frame->valid = true;
+ }
+
+ return 0;
+}
+
+static int arch_timer_mmio_probe(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame;
+ struct arch_timer *at;
+ struct device_node *np;
+ int ret;
+
+ np = pdev->dev.of_node;
+
+ at = devm_kmalloc(&pdev->dev, sizeof(*at), GFP_KERNEL | __GFP_ZERO);
+ if (!at)
+ return -ENOMEM;
+
+ if (np) {
+ at->gt_block = devm_kmalloc(&pdev->dev, sizeof(*at->gt_block),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!at->gt_block)
+ return -ENOMEM;
+ ret = of_populate_gt_block(pdev, at);
+ if (ret)
+ return ret;
+ } else {
+ at->gt_block = dev_get_platdata(&pdev->dev);
+ }
+
+ platform_set_drvdata(pdev, at);
+
+ frame = find_best_frame(pdev);
+ if (!frame) {
+ dev_err(&pdev->dev,
+ "Unable to find a suitable frame in timer @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mmio_frame_register(pdev, frame);
+ if (!ret)
+ dev_info(&pdev->dev,
+ "mmio timer running at %lu.%02luMHz (%s)\n",
+ (unsigned long)at->rate / 1000000,
+ (unsigned long)(at->rate / 10000) % 100,
+ at->access == VIRT_ACCESS ? "virt" : "phys");
+
+ return ret;
+}
+
+static const struct of_device_id arch_timer_mmio_of_table[] = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {}
+};
+
+static struct platform_driver arch_timer_mmio_drv = {
+ .driver = {
+ .name = "arch-timer-mmio",
+ .of_match_table = arch_timer_mmio_of_table,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_drv);
+
+static struct platform_driver arch_timer_mmio_acpi_drv = {
+ .driver = {
+ .name = "gtdt-arm-mmio-timer",
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_acpi_drv);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2d86bbc2764a..5e3d6bb7e437 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -263,14 +263,13 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static int __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(unsigned int psv)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
- CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv - 1) |
GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@@ -338,11 +337,45 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct gt_prescaler_config {
+ const char *compatible;
+ unsigned long prescaler;
+};
+
+static const struct gt_prescaler_config gt_prescaler_configs[] = {
+ /*
+ * On am43 the global timer clock is a child of the clock used for CPU
+ * OPPs, so the initial prescaler has to be compatible with all OPPs
+ * which are 300, 600, 720, 800 and 1000 with a fixed divider of 2, this
+ * gives us a GCD of 10. Initial frequency is 1000, so the prescaler is
+ * 50.
+ */
+ { .compatible = "ti,am43", .prescaler = 50 },
+ { .compatible = "xlnx,zynq-7000", .prescaler = 2 },
+ { .compatible = NULL }
+};
+
+static unsigned long gt_get_initial_prescaler_value(struct device_node *np)
+{
+ const struct gt_prescaler_config *config;
+
+ if (CONFIG_ARM_GT_INITIAL_PRESCALER_VAL != 0)
+ return CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+
+ for (config = gt_prescaler_configs; config->compatible; config++) {
+ if (of_machine_is_compatible(config->compatible))
+ return config->prescaler;
+ }
+
+ return 1;
+}
+
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
int err;
+ unsigned long psv;
/*
* In A9 r2p0 the comparators for each processor with the global timer
@@ -378,8 +411,9 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_unmap;
}
+ psv = gt_get_initial_prescaler_value(np);
gt_clk_rate = clk_get_rate(gt_clk);
- gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_target_rate = gt_clk_rate / psv;
gt_clk_rate_change_nb.notifier_call =
gt_clk_rate_change_cb;
err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
@@ -404,7 +438,7 @@ static int __init global_timer_of_register(struct device_node *np)
}
/* Register and immediately configure the timer on the boot CPU */
- err = gt_clocksource_init();
+ err = gt_clocksource_init(psv);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index e95fdc49c226..bbceb0289d45 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ int ret = 0;
if (!base)
return -ENOMEM;
- if (!irq)
- return -EINVAL;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
+ if (!irq) {
+ ret = -EINVAL;
+ goto unmap_io;
+ }
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto unmap_io;
+ }
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
clps711x_clksrc_init(clock, base);
break;
case CLPS711X_CLKSRC_CLOCKEVENT:
- return _clps711x_clkevt_init(clock, base, irq);
+ ret = _clps711x_clkevt_init(clock, base, irq);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+unmap_io:
+ iounmap(base);
+ return ret;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index cb6fc2f152d4..e79cfb0b8e05 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -127,18 +127,23 @@ static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
return 2; /* /16 divider */
}
-static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int ingenic_ost_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long rate = *parent_rate;
+ unsigned long rate = req->best_parent_rate;
u8 prescale;
- if (req_rate > rate)
- return rate;
+ if (req->rate > rate) {
+ req->rate = rate;
- prescale = ingenic_ost_get_prescale(rate, req_rate);
+ return 0;
+ }
+
+ prescale = ingenic_ost_get_prescale(rate, req->rate);
- return rate >> (prescale * 2);
+ req->rate = rate >> (prescale * 2);
+
+ return 0;
}
static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -175,14 +180,14 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
static const struct clk_ops ingenic_ost_percpu_timer_ops = {
.recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_percpu_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_percpu_timer_set_rate,
};
static const struct clk_ops ingenic_ost_global_timer_ops = {
.recalc_rate = ingenic_ost_global_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_global_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_global_timer_set_rate,
};
static const char * const ingenic_ost_clk_parents[] = { "ext" };
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index c3536fffbe9a..5a99801a1657 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -52,6 +52,7 @@ static struct clocksource cs_hrt = {
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
+ .owner = THIS_MODULE,
};
static int __init init_hrt_clocksource(void)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b72b36e0abed..385eb94bbe7c 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -578,37 +578,74 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
+static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch)
{
int ret = 0;
unsigned long flags;
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(ch);
- }
if (ret)
goto out;
- ch->flags |= flag;
+
+ ch->flags |= FLAG_CLOCKSOURCE;
/* setup timeout if no clockevent */
- if (ch->cmt->num_channels == 1 &&
- flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
+ if (ch->cmt->num_channels == 1 && !(ch->flags & FLAG_CLOCKEVENT))
__sh_cmt_set_next(ch, ch->max_match_value);
+out:
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ return ret;
+}
+
+static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch)
+{
+ unsigned long flags;
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+
+ ch->flags &= ~FLAG_CLOCKSOURCE;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(ch);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ pm_runtime_put(&ch->cmt->pdev->dev);
+}
+
+static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ ret = sh_cmt_enable(ch);
+ }
+
+ if (ret)
+ goto out;
+
+ ch->flags |= FLAG_CLOCKEVENT;
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
-static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
+static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch)
{
unsigned long flags;
unsigned long f;
@@ -616,22 +653,19 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
raw_spin_lock_irqsave(&ch->lock, flags);
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
- ch->flags &= ~flag;
+
+ ch->flags &= ~FLAG_CLOCKEVENT;
if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
sh_cmt_disable(ch);
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_put(&ch->cmt->pdev->dev);
+ pm_runtime_put(&ch->cmt->pdev->dev);
}
/* adjust the timeout to maximum if only clocksource left */
- if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
+ if (ch->flags & FLAG_CLOCKSOURCE)
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
-
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
@@ -672,7 +706,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
ch->total_cycles = 0;
- ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ ret = sh_cmt_start_clocksource(ch);
if (!ret)
ch->cs_enabled = true;
@@ -685,7 +719,7 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs)
WARN_ON(!ch->cs_enabled);
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
ch->cs_enabled = false;
}
@@ -696,7 +730,7 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
if (!ch->cs_enabled)
return;
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
}
@@ -708,7 +742,7 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
return;
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
- sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_start_clocksource(ch);
}
static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
@@ -740,7 +774,7 @@ static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
- sh_cmt_start(ch, FLAG_CLOCKEVENT);
+ sh_cmt_start_clockevent(ch);
if (periodic)
sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
@@ -752,7 +786,7 @@ static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
return 0;
}
@@ -763,7 +797,7 @@ static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
diff --git a/drivers/clocksource/timer-cs5535.c b/drivers/clocksource/timer-cs5535.c
index d47acfe848ae..8af666c39890 100644
--- a/drivers/clocksource/timer-cs5535.c
+++ b/drivers/clocksource/timer-cs5535.c
@@ -101,6 +101,7 @@ static struct clock_event_device cs5535_clockevent = {
.tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
+ .owner = THIS_MODULE,
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
index 3b449fdaafee..4008076b1a21 100644
--- a/drivers/clocksource/timer-econet-en751221.c
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -146,7 +146,7 @@ static int __init cevt_init(struct device_node *np)
for_each_possible_cpu(i) {
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
- cd->rating = 310,
+ cd->rating = 310;
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
new file mode 100644
index 000000000000..2d0a3554b6bf
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ */
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each pit takes 0x10 Bytes register space
+ */
+#define PIT0_OFFSET 0x100
+#define PIT_CH(n) (PIT0_OFFSET + 0x10 * (n))
+
+#define PITMCR(__base) (__base)
+
+#define PITMCR_FRZ BIT(0)
+#define PITMCR_MDIS BIT(1)
+
+#define PITLDVAL(__base) (__base)
+#define PITTCTRL(__base) ((__base) + 0x08)
+
+#define PITCVAL_OFFSET 0x04
+#define PITCVAL(__base) ((__base) + 0x04)
+
+#define PITTCTRL_TEN BIT(0)
+#define PITTCTRL_TIE BIT(1)
+
+#define PITTFLG(__base) ((__base) + 0x0c)
+
+#define PITTFLG_TIF BIT(0)
+
+struct pit_timer {
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ int rate;
+};
+
+struct pit_timer_data {
+ int max_pit_instances;
+};
+
+static DEFINE_PER_CPU(struct pit_timer *, pit_timers);
+
+/*
+ * Global structure for multiple PITs initialization
+ */
+static int pit_instances;
+static int max_pit_instances = 1;
+
+static void __iomem *sched_clock_base;
+
+static inline struct pit_timer *ced_to_pit(struct clock_event_device *ced)
+{
+ return container_of(ced, struct pit_timer, ced);
+}
+
+static inline struct pit_timer *cs_to_pit(struct clocksource *cs)
+{
+ return container_of(cs, struct pit_timer, cs);
+}
+
+static inline void pit_module_enable(void __iomem *base)
+{
+ writel(0, PITMCR(base));
+}
+
+static inline void pit_module_disable(void __iomem *base)
+{
+ writel(PITMCR_MDIS, PITMCR(base));
+}
+
+static inline void pit_timer_enable(void __iomem *base, bool tie)
+{
+ u32 val = PITTCTRL_TEN | (tie ? PITTCTRL_TIE : 0);
+
+ writel(val, PITTCTRL(base));
+}
+
+static inline void pit_timer_disable(void __iomem *base)
+{
+ writel(0, PITTCTRL(base));
+}
+
+static inline void pit_timer_set_counter(void __iomem *base, unsigned int cnt)
+{
+ writel(cnt, PITLDVAL(base));
+}
+
+static inline void pit_timer_irqack(struct pit_timer *pit)
+{
+ writel(PITTFLG_TIF, PITTFLG(pit->clkevt_base));
+}
+
+static u64 notrace pit_read_sched_clock(void)
+{
+ return ~readl(sched_clock_base);
+}
+
+static u64 pit_timer_clocksource_read(struct clocksource *cs)
+{
+ struct pit_timer *pit = cs_to_pit(cs);
+
+ return (u64)~readl(PITCVAL(pit->clksrc_base));
+}
+
+static int pit_clocksource_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate)
+{
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 2 as a clocksource and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clksrc_base = base + PIT_CH(2);
+ pit->cs.name = name;
+ pit->cs.rating = 300;
+ pit->cs.read = pit_timer_clocksource_read;
+ pit->cs.mask = CLOCKSOURCE_MASK(32);
+ pit->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* set the max load value and start the clock source counter */
+ pit_timer_disable(pit->clksrc_base);
+ pit_timer_set_counter(pit->clksrc_base, ~0);
+ pit_timer_enable(pit->clksrc_base, 0);
+
+ sched_clock_base = pit->clksrc_base + PITCVAL_OFFSET;
+ sched_clock_register(pit_read_sched_clock, 32, rate);
+
+ return clocksource_register_hz(&pit->cs, rate);
+}
+
+static int pit_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ /*
+ * set a new value to PITLDVAL register will not restart the timer,
+ * to abort the current cycle and start a timer period with the new
+ * value, the timer must be disabled and enabled again.
+ * and the PITLAVAL should be set to delta minus one according to pit
+ * hardware requirement.
+ */
+ pit_timer_disable(pit->clkevt_base);
+ pit_timer_set_counter(pit->clkevt_base, delta - 1);
+ pit_timer_enable(pit->clkevt_base, true);
+
+ return 0;
+}
+
+static int pit_shutdown(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_disable(pit->clkevt_base);
+
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_set_next_event(pit->rate / HZ, ced);
+
+ return 0;
+}
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ced = dev_id;
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_irqack(pit);
+
+ /*
+ * pit hardware doesn't support oneshot, it will generate an interrupt
+ * and reload the counter value from PITLDVAL when PITCVAL reach zero,
+ * and start the counter again. So software need to disable the timer
+ * to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ pit_timer_disable(pit->clkevt_base);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int pit_clockevent_per_cpu_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate,
+ int irq, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 3 as a clockevent and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clkevt_base = base + PIT_CH(3);
+ pit->rate = rate;
+
+ pit_timer_disable(pit->clkevt_base);
+
+ pit_timer_irqack(pit);
+
+ ret = request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING,
+ name, &pit->ced);
+ if (ret)
+ return ret;
+
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.irq = irq;
+
+ pit->ced.name = name;
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ pit->ced.set_state_shutdown = pit_shutdown;
+ pit->ced.set_state_periodic = pit_set_periodic;
+ pit->ced.set_next_event = pit_set_next_event;
+ pit->ced.rating = 300;
+
+ per_cpu(pit_timers, cpu) = pit;
+
+ return 0;
+}
+
+static void pit_clockevent_per_cpu_exit(struct pit_timer *pit, unsigned int cpu)
+{
+ pit_timer_disable(pit->clkevt_base);
+ free_irq(pit->ced.irq, &pit->ced);
+ per_cpu(pit_timers, cpu) = NULL;
+}
+
+static int pit_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct pit_timer *pit = per_cpu(pit_timers, cpu);
+ int ret;
+
+ if (!pit)
+ return 0;
+
+ ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
+ if (ret) {
+ pit_clockevent_per_cpu_exit(pit, cpu);
+ return ret;
+ }
+
+ /*
+ * The value for the LDVAL register trigger is calculated as:
+ * LDVAL trigger = (period / clock period) - 1
+ * The pit is a 32-bit down count timer, when the counter value
+ * reaches 0, it will generate an interrupt, thus the minimal
+ * LDVAL trigger value is 1. And then the min_delta is
+ * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
+ */
+ clockevents_config_and_register(&pit->ced, pit->rate, 2, 0xffffffff);
+
+ return 0;
+}
+
+static int pit_timer_init(struct device_node *np)
+{
+ struct pit_timer *pit;
+ struct clk *pit_clk;
+ void __iomem *timer_base;
+ const char *name = of_node_full_name(np);
+ unsigned long clk_rate;
+ int irq, ret;
+
+ pit = kzalloc(sizeof(*pit), GFP_KERNEL);
+ if (!pit)
+ return -ENOMEM;
+
+ ret = -ENXIO;
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("Failed to iomap\n");
+ goto out_kfree;
+ }
+
+ ret = -EINVAL;
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Failed to irq_of_parse_and_map\n");
+ goto out_iounmap;
+ }
+
+ pit_clk = of_clk_get(np, 0);
+ if (IS_ERR(pit_clk)) {
+ ret = PTR_ERR(pit_clk);
+ goto out_irq_dispose_mapping;
+ }
+
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ goto out_clk_put;
+
+ clk_rate = clk_get_rate(pit_clk);
+
+ pit_module_disable(timer_base);
+
+ ret = pit_clocksource_init(pit, name, timer_base, clk_rate);
+ if (ret) {
+ pr_err("Failed to initialize clocksource '%pOF'\n", np);
+ goto out_pit_module_disable;
+ }
+
+ ret = pit_clockevent_per_cpu_init(pit, name, timer_base, clk_rate, irq, pit_instances);
+ if (ret) {
+ pr_err("Failed to initialize clockevent '%pOF'\n", np);
+ goto out_pit_clocksource_unregister;
+ }
+
+ /* enable the pit module */
+ pit_module_enable(timer_base);
+
+ pit_instances++;
+
+ if (pit_instances == max_pit_instances) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "PIT timer:starting",
+ pit_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ goto out_pit_clocksource_unregister;
+ }
+
+ return 0;
+
+out_pit_clocksource_unregister:
+ clocksource_unregister(&pit->cs);
+out_pit_module_disable:
+ pit_module_disable(timer_base);
+ clk_disable_unprepare(pit_clk);
+out_clk_put:
+ clk_put(pit_clk);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(irq);
+out_iounmap:
+ iounmap(timer_base);
+out_kfree:
+ kfree(pit);
+
+ return ret;
+}
+
+static int pit_timer_probe(struct platform_device *pdev)
+{
+ const struct pit_timer_data *pit_timer_data;
+
+ pit_timer_data = of_device_get_match_data(&pdev->dev);
+ if (pit_timer_data)
+ max_pit_instances = pit_timer_data->max_pit_instances;
+
+ return pit_timer_init(pdev->dev.of_node);
+}
+
+static struct pit_timer_data s32g2_data = { .max_pit_instances = 2 };
+
+static const struct of_device_id pit_timer_of_match[] = {
+ { .compatible = "nxp,s32g2-pit", .data = &s32g2_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pit_timer_of_match);
+
+static struct platform_driver nxp_pit_driver = {
+ .driver = {
+ .name = "nxp-pit",
+ .of_match_table = pit_timer_of_match,
+ },
+ .probe = pit_timer_probe,
+};
+module_platform_driver(nxp_pit_driver);
+
+TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
index d7ccf9001729..bbc40623728f 100644
--- a/drivers/clocksource/timer-nxp-stm.c
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -201,6 +201,7 @@ static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer
stm_timer->cs.resume = nxp_stm_clocksource_resume;
stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ stm_timer->cs.owner = THIS_MODULE;
ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
if (ret)
@@ -314,6 +315,7 @@ static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm
stm_timer->ced.cpumask = cpumask_of(cpu);
stm_timer->ced.rating = 460;
stm_timer->ced.irq = irq;
+ stm_timer->ced.owner = THIS_MODULE;
per_cpu(stm_timers, cpu) = stm_timer;
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
index 8a3068b36e75..6113d2fdd4de 100644
--- a/drivers/clocksource/timer-rtl-otto.c
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -38,14 +38,13 @@
#define RTTM_BIT_COUNT 28
#define RTTM_MIN_DELTA 8
#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+#define RTTM_MAX_DIVISOR GENMASK(15, 0)
/*
- * Timers are derived from the LXB clock frequency. Usually this is a fixed
- * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
- * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
- * base. The only meaningful frequencies we can achieve from that are 175.000
- * MHz and 153.125 MHz. The greatest common divisor of all explained possible
- * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ * Timers are derived from the lexra bus (LXB) clock frequency. This is 175 MHz
+ * on RTL930x and 200 MHz on the other platforms. With 3.125 MHz choose a common
+ * divisor to have enough range and detail. This provides comparability between
+ * the different platforms.
*/
#define RTTM_TICKS_PER_SEC 3125000
@@ -55,11 +54,6 @@ struct rttm_cs {
};
/* Simple internal register functions */
-static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
-{
- iowrite32(counter, base + RTTM_CNT);
-}
-
static inline unsigned int rttm_get_counter(void __iomem *base)
{
return ioread32(base + RTTM_CNT);
@@ -112,6 +106,22 @@ static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void rttm_bounce_timer(void __iomem *base, u32 mode)
+{
+ /*
+ * When a running timer has less than ~5us left, a stop/start sequence
+ * might fail. While the details are unknown the most evident effect is
+ * that the subsequent interrupt will not be fired.
+ *
+ * As a workaround issue an intermediate restart with a very slow
+ * frequency of ~3kHz keeping the target counter (>=8). So the follow
+ * up restart will always be issued outside the critical window.
+ */
+
+ rttm_disable_timer(base);
+ rttm_enable_timer(base, mode, RTTM_MAX_DIVISOR);
+}
+
static void rttm_stop_timer(void __iomem *base)
{
rttm_disable_timer(base);
@@ -120,7 +130,6 @@ static void rttm_stop_timer(void __iomem *base)
static void rttm_start_timer(struct timer_of *to, u32 mode)
{
- rttm_set_counter(to->of_base.base, 0);
rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
}
@@ -129,7 +138,8 @@ static int rttm_next_event(unsigned long delta, struct clock_event_device *clkev
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, delta);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -141,7 +151,8 @@ static int rttm_state_oneshot(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -153,7 +164,8 @@ static int rttm_state_periodic(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_TIMER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_TIMER);
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 6e7944ffd7c0..c2a699f5c1dd 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -211,6 +211,7 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.rating = STM32_LP_RATING;
priv->clkevt.suspend = stm32_clkevent_lp_suspend;
priv->clkevt.resume = stm32_clkevent_lp_resume;
+ priv->clkevt.owner = THIS_MODULE;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 6b48a9006444..f827d3f98f60 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -185,6 +185,7 @@ static int sun5i_setup_clocksource(struct platform_device *pdev,
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.owner = THIS_MODULE;
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
@@ -214,6 +215,7 @@ static int sun5i_setup_clockevent(struct platform_device *pdev,
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.owner = THIS_MODULE;
/* Enable timer0 interrupt */
val = readl(base + TIMER_IRQ_EN_REG);
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index e5394f98a02e..355558893e5f 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -159,7 +159,7 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
- value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ value = TMRCR_PTV(wdt->base.timeout * (USEC_PER_SEC / 5)) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
@@ -231,7 +231,7 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
u32 expiration, val;
- u64 timeleft;
+ u32 timeleft;
if (!watchdog_active(&wdt->base)) {
/* return zero if the watchdog timer is not activated. */
@@ -266,21 +266,26 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
* Calculate the time remaining by adding the time for the
* counter value to the time of the counter expirations that
* remain.
+ * Note: Since wdt->base.timeout is bound to 255, the maximum
+ * value added to timeleft is
+ * 255 * (1,000,000 / 5) * 4
+ * = 255 * 200,000 * 4
+ * = 204,000,000
+ * TMRSR_PCV is a 29-bit field.
+ * Its maximum value is 0x1fffffff = 536,870,911.
+ * 204,000,000 + 536,870,911 = 740,870,911 = 0x2C28CAFF.
+ * timeleft can therefore not overflow, and 64-bit calculations
+ * are not necessary.
*/
- timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+ timeleft += (wdt->base.timeout * (USEC_PER_SEC / 5)) * (4 - expiration);
/*
* Convert the current counter value to seconds,
- * rounding up to the nearest second. Cast u64 to
- * u32 under the assumption that no overflow happens
- * when coverting to seconds.
+ * rounding to the nearest second.
*/
- timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+ timeleft = DIV_ROUND_CLOSEST(timeleft, USEC_PER_SEC);
- if (WARN_ON_ONCE(timeleft > U32_MAX))
- return U32_MAX;
-
- return lower_32_bits(timeleft);
+ return timeleft;
}
static const struct watchdog_ops tegra186_wdt_ops = {
@@ -328,16 +333,12 @@ static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
wdt->base.parent = tegra->dev;
err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
- if (err < 0) {
- dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
err = devm_watchdog_register_device(tegra->dev, &wdt->base);
- if (err < 0) {
- dev_err(tegra->dev, "failed to register WDT: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
return wdt;
}
@@ -373,6 +374,7 @@ static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
tegra->tsc.read = tegra186_timer_tsc_read;
tegra->tsc.mask = CLOCKSOURCE_MASK(56);
tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->tsc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->tsc, 31250000);
}
@@ -392,6 +394,7 @@ static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
tegra->osc.read = tegra186_timer_osc_read;
tegra->osc.mask = CLOCKSOURCE_MASK(32);
tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->osc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->osc, 38400000);
}
@@ -411,6 +414,7 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
tegra->usec.read = tegra186_timer_usec_read;
tegra->usec.mask = CLOCKSOURCE_MASK(32);
tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->usec.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index e9e32df6b566..793e7cdcb1b1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -31,6 +31,7 @@
#include <linux/platform_data/dmtimer-omap.h>
#include <clocksource/timer-ti-dm.h>
+#include <linux/delay.h>
/*
* timer errata flags
@@ -836,6 +837,48 @@ static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
return 0;
}
+static int omap_dm_timer_set_cap(struct omap_dm_timer *cookie,
+ int autoreload, bool config_period)
+{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+ u32 l;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+ /*
+ * 1. Select autoreload mode. TIMER_TCLR[1] AR bit.
+ * 2. TIMER_TCLR[14]: Sets the functionality of the TIMER IO pin.
+ * 3. TIMER_TCLR[13] : Capture mode select bit.
+ * 3. TIMER_TCLR[9-8] : Select transition capture mode.
+ */
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+
+ l |= OMAP_TIMER_CTRL_CAPTMODE | OMAP_TIMER_CTRL_GPOCFG;
+
+ if (config_period == true)
+ l |= OMAP_TIMER_CTRL_TCM_LOWTOHIGH; /* Time Period config */
+ else
+ l |= OMAP_TIMER_CTRL_TCM_BOTHEDGES; /* Duty Cycle config */
+
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
@@ -1023,23 +1066,92 @@ static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
return __omap_dm_timer_read_counter(timer);
}
+static inline unsigned int __omap_dm_timer_cap(struct dmtimer *timer, int idx)
+{
+ return idx == 0 ? dmtimer_read(timer, OMAP_TIMER_CAPTURE_REG) :
+ dmtimer_read(timer, OMAP_TIMER_CAPTURE2_REG);
+}
+
static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
+ struct device *dev;
timer = to_dmtimer(cookie);
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
+ if (unlikely(!timer)) {
+ pr_err("%s: timer not available.\n", __func__);
return -EINVAL;
}
+ dev = &timer->pdev->dev;
+
+ pm_runtime_resume_and_get(dev);
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
+ pm_runtime_put_sync(dev);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
+/**
+ * omap_dm_timer_cap_counter() - Calculate the high count or period count depending on the
+ * configuration.
+ * @cookie:Pointer to OMAP DM timer
+ * @is_period:Whether to configure timer in period or duty cycle mode
+ *
+ * Return high count or period count if timer is enabled else appropriate error.
+ */
+static unsigned int omap_dm_timer_cap_counter(struct omap_dm_timer *cookie, bool is_period)
+{
+ struct dmtimer *timer;
+ unsigned int cap1 = 0;
+ unsigned int cap2 = 0;
+ u32 l, ret;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
+ pr_err("%s:timer is not available or enabled.%p\n", __func__, (void *)timer);
+ return -EINVAL;
+ }
+
+ /* Stop the timer */
+ omap_dm_timer_stop(cookie);
+
+ /* Clear the timer counter value to 0 */
+ ret = omap_dm_timer_write_counter(cookie, 0);
+ if (ret)
+ return ret;
+
+ /* Sets the timer capture configuration for period/duty cycle calculation */
+ ret = omap_dm_timer_set_cap(cookie, true, is_period);
+ if (ret) {
+ pr_err("%s: Failed to set timer capture configuration.\n", __func__);
+ return ret;
+ }
+ /* Start the timer */
+ omap_dm_timer_start(cookie);
+
+ /*
+ * 1 sec delay is given so as to provide
+ * enough time to capture low frequency signals.
+ */
+ msleep(1000);
+
+ cap1 = __omap_dm_timer_cap(timer, 0);
+ cap2 = __omap_dm_timer_cap(timer, 1);
+
+ /*
+ * Clears the TCLR configuration.
+ * The start bit must be set to 1 as the timer is already in start mode.
+ */
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(0xffff) | 0x1;
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ return (cap2-cap1);
+}
+
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
@@ -1246,6 +1358,9 @@ static const struct omap_dm_timer_ops dmtimer_ops = {
.write_counter = omap_dm_timer_write_counter,
.read_status = omap_dm_timer_read_status,
.write_status = omap_dm_timer_write_status,
+ .set_cap = omap_dm_timer_set_cap,
+ .get_cap_status = omap_dm_timer_get_pwm_status,
+ .read_cap = omap_dm_timer_cap_counter,
};
static const struct dmtimer_platform_data omap3plus_pdata = {
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
deleted file mode 100644
index 911c92146eca..000000000000
--- a/drivers/clocksource/timer-vf-pit.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
- */
-
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/*
- * Each pit takes 0x10 Bytes register space
- */
-#define PITMCR 0x00
-#define PIT0_OFFSET 0x100
-#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n))
-#define PITLDVAL 0x00
-#define PITCVAL 0x04
-#define PITTCTRL 0x08
-#define PITTFLG 0x0c
-
-#define PITMCR_MDIS (0x1 << 1)
-
-#define PITTCTRL_TEN (0x1 << 0)
-#define PITTCTRL_TIE (0x1 << 1)
-#define PITCTRL_CHN (0x1 << 2)
-
-#define PITTFLG_TIF 0x1
-
-static void __iomem *clksrc_base;
-static void __iomem *clkevt_base;
-static unsigned long cycle_per_jiffy;
-
-static inline void pit_timer_enable(void)
-{
- __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_timer_disable(void)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_irq_acknowledge(void)
-{
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-}
-
-static u64 notrace pit_read_sched_clock(void)
-{
- return ~__raw_readl(clksrc_base + PITCVAL);
-}
-
-static int __init pit_clocksource_init(unsigned long rate)
-{
- /* set the max load value and start the clock source counter */
- __raw_writel(0, clksrc_base + PITTCTRL);
- __raw_writel(~0UL, clksrc_base + PITLDVAL);
- __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
-
- sched_clock_register(pit_read_sched_clock, 32, rate);
- return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
- 300, 32, clocksource_mmio_readl_down);
-}
-
-static int pit_set_next_event(unsigned long delta,
- struct clock_event_device *unused)
-{
- /*
- * set a new value to PITLDVAL register will not restart the timer,
- * to abort the current cycle and start a timer period with the new
- * value, the timer must be disabled and enabled again.
- * and the PITLAVAL should be set to delta minus one according to pit
- * hardware requirement.
- */
- pit_timer_disable();
- __raw_writel(delta - 1, clkevt_base + PITLDVAL);
- pit_timer_enable();
-
- return 0;
-}
-
-static int pit_shutdown(struct clock_event_device *evt)
-{
- pit_timer_disable();
- return 0;
-}
-
-static int pit_set_periodic(struct clock_event_device *evt)
-{
- pit_set_next_event(cycle_per_jiffy, evt);
- return 0;
-}
-
-static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- pit_irq_acknowledge();
-
- /*
- * pit hardware doesn't support oneshot, it will generate an interrupt
- * and reload the counter value from PITLDVAL when PITCVAL reach zero,
- * and start the counter again. So software need to disable the timer
- * to stop the counter loop in ONESHOT mode.
- */
- if (likely(clockevent_state_oneshot(evt)))
- pit_timer_disable();
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_pit = {
- .name = "VF pit timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = pit_shutdown,
- .set_state_periodic = pit_set_periodic,
- .set_next_event = pit_set_next_event,
- .rating = 300,
-};
-
-static int __init pit_clockevent_init(unsigned long rate, int irq)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-
- BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "VF pit timer", &clockevent_pit));
-
- clockevent_pit.cpumask = cpumask_of(0);
- clockevent_pit.irq = irq;
- /*
- * The value for the LDVAL register trigger is calculated as:
- * LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the counter value
- * reaches 0, it will generate an interrupt, thus the minimal
- * LDVAL trigger value is 1. And then the min_delta is
- * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
- */
- clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);
-
- return 0;
-}
-
-static int __init pit_timer_init(struct device_node *np)
-{
- struct clk *pit_clk;
- void __iomem *timer_base;
- unsigned long clk_rate;
- int irq, ret;
-
- timer_base = of_iomap(np, 0);
- if (!timer_base) {
- pr_err("Failed to iomap\n");
- return -ENXIO;
- }
-
- /*
- * PIT0 and PIT1 can be chained to build a 64-bit timer,
- * so choose PIT2 as clocksource, PIT3 as clockevent device,
- * and leave PIT0 and PIT1 unused for anyone else who needs them.
- */
- clksrc_base = timer_base + PITn_OFFSET(2);
- clkevt_base = timer_base + PITn_OFFSET(3);
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0)
- return -EINVAL;
-
- pit_clk = of_clk_get(np, 0);
- if (IS_ERR(pit_clk))
- return PTR_ERR(pit_clk);
-
- ret = clk_prepare_enable(pit_clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(pit_clk);
- cycle_per_jiffy = clk_rate / (HZ);
-
- /* enable the pit module */
- __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
-
- ret = pit_clocksource_init(clk_rate);
- if (ret)
- return ret;
-
- return pit_clockevent_init(clk_rate, irq);
-}
-TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fc7eace8b65b..58e3839a2140 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2953,6 +2953,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2974,21 +2983,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index f7b9c6547e18..65d6d0af140a 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -2612,7 +2612,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4137a8bf131f..4835bdebdbb3 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -69,7 +69,6 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
- select CRYPTO_CURVE25519
select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 1550c3818383..21ccf879f70c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
-#include <crypto/curve25519.h>
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
@@ -106,16 +105,6 @@ struct hpre_ecdh_ctx {
dma_addr_t dma_g;
};
-struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
-
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
-};
-
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
@@ -129,7 +118,6 @@ struct hpre_ctx {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
};
/* for ecc algorithms */
unsigned int curve_id;
@@ -146,7 +134,6 @@ struct hpre_asym_request {
struct akcipher_request *rsa;
struct kpp_request *dh;
struct kpp_request *ecdh;
- struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -1214,8 +1201,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
}
}
-static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
@@ -1224,17 +1210,11 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
+ if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
}
hpre_ctx_clear(ctx, is_clear_all);
@@ -1432,7 +1412,7 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- hpre_ecc_clear_ctx(ctx, false, true);
+ hpre_ecc_clear_ctx(ctx, false);
ret = hpre_ecdh_set_param(ctx, &params);
if (ret < 0) {
@@ -1683,337 +1663,7 @@ static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
-}
-
-static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
-
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
-
- p = ctx->curve25519.p + sz - len;
-
- curve = ecc_get_curve25519();
-
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
-}
-
-static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
-
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
-
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
-
- hpre_curve25519_fill_curve(ctx, buf, len);
-
- return 0;
-}
-
-static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
-
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
-
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
-
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
-
- return 0;
-}
-
-static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
-{
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
-
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
-
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
-}
-
-static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
-{
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
-
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
-
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
-
- /* Do unmap before data processing */
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
-
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
-
- kpp_request_complete(areq, ret);
-
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
-}
-
-static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
-{
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
-
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
-
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
-
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
-
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
-}
-
-static void hpre_curve25519_src_modulo_p(u8 *ptr)
-{
- int i;
-
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
-
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
-}
-
-static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
-
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
-
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
-
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
-
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
-
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
-
- curve = ecc_get_curve25519();
-
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
-
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
-
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
-
-err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
-}
-
-static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
-
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
-
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
-
- msg->out = cpu_to_le64(dma);
- return 0;
-}
-
-static int hpre_curve25519_compute_value(struct kpp_request *req)
-{
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
-
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
-
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
-
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
-
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
-
-clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
-}
-
-static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- return ctx->key_sz;
-}
-
-static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
-}
-
-static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- hpre_ecc_clear_ctx(ctx, true, false);
+ hpre_ecc_clear_ctx(ctx, true);
}
static struct akcipher_alg rsa = {
@@ -2095,22 +1745,6 @@ static struct kpp_alg ecdh_curves[] = {
}
};
-static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
-};
-
static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
@@ -2192,28 +1826,6 @@ static void hpre_unregister_ecdh(struct hisi_qm *qm)
crypto_unregister_kpp(&ecdh_curves[i]);
}
-static int hpre_register_x25519(struct hisi_qm *qm)
-{
- int ret;
-
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
-
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
-
- return ret;
-}
-
-static void hpre_unregister_x25519(struct hisi_qm *qm)
-{
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
-
- crypto_unregister_kpp(&curve25519_alg);
-}
-
int hpre_algs_register(struct hisi_qm *qm)
{
int ret = 0;
@@ -2236,17 +1848,11 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
-
hpre_available_devs++;
mutex_unlock(&hpre_algs_lock);
return ret;
-unreg_ecdh:
- hpre_unregister_ecdh(qm);
unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
@@ -2262,7 +1868,6 @@ void hpre_algs_unregister(struct hisi_qm *qm)
if (--hpre_available_devs)
goto unlock;
- hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 76b7ecb5624b..f22c12e36b56 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -700,7 +700,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "md5-generic");
+ return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 54c480e874cb..d7714d8afb0f 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -388,7 +388,7 @@ static const struct super_operations dax_sops = {
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
.free_inode = dax_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static int dax_init_fs_context(struct fs_context *fc)
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 036f21cac0a9..0a852011653c 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -211,8 +211,8 @@ static int
dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
- DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
enum dpll_clock_quality_level ql;
int ret;
@@ -221,7 +221,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
if (ret)
return ret;
- for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
return -EMSGSIZE;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 19ad3c3b675d..39352b9b7a7e 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -576,4 +576,20 @@ config EDAC_LOONGSON
errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
are compatible.
+config EDAC_CORTEX_A72
+ tristate "ARM Cortex A72"
+ depends on ARM64
+ help
+ Support for L1/L2 cache error detection for ARM Cortex A72 processor.
+ The detected and reported errors are from reading CPU/L2 memory error
+ syndrome registers.
+
+config EDAC_VERSALNET
+ tristate "AMD VersalNET DDR Controller"
+ depends on CDX_CONTROLLER && ARCH_ZYNQMP
+ help
+ Support for single bit error correction, double bit error detection
+ and other system errors from various IP subsystems like RPU, NOCs,
+ HNICX, PL on the AMD Versal NET DDR memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a8f2d8f6c894..1c14796410a3 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -88,3 +88,5 @@ obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
+obj-$(CONFIG_EDAC_VERSALNET) += versalnet_edac.o
+obj-$(CONFIG_EDAC_CORTEX_A72) += a72_edac.o
diff --git a/drivers/edac/a72_edac.c b/drivers/edac/a72_edac.c
new file mode 100644
index 000000000000..9262d75c3855
--- /dev/null
+++ b/drivers/edac/a72_edac.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cortex A72 EDAC L1 and L2 cache error detection
+ *
+ * Copyright (c) 2020 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (c) 2025 Microsoft Corporation, <vijayb@linux.microsoft.com>
+ *
+ * Based on Code from:
+ * Copyright (c) 2018, NXP Semiconductor
+ * Author: York Sun <york.sun@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <asm/smp_plat.h>
+
+#include "edac_module.h"
+
+#define DRVNAME "a72-edac"
+
+#define SYS_CPUMERRSR_EL1 sys_reg(3, 1, 15, 2, 2)
+#define SYS_L2MERRSR_EL1 sys_reg(3, 1, 15, 2, 3)
+
+#define CPUMERRSR_EL1_RAMID GENMASK(30, 24)
+#define L2MERRSR_EL1_CPUID_WAY GENMASK(21, 18)
+
+#define CPUMERRSR_EL1_VALID BIT(31)
+#define CPUMERRSR_EL1_FATAL BIT(63)
+#define L2MERRSR_EL1_VALID BIT(31)
+#define L2MERRSR_EL1_FATAL BIT(63)
+
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define TLB_RAM 0x18
+
+#define MESSAGE_SIZE 64
+
+struct mem_err_synd_reg {
+ u64 cpu_mesr;
+ u64 l2_mesr;
+};
+
+static struct cpumask compat_mask;
+
+static void report_errors(struct edac_device_ctl_info *edac_ctl, int cpu,
+ struct mem_err_synd_reg *mesr)
+{
+ u64 cpu_mesr = mesr->cpu_mesr;
+ u64 l2_mesr = mesr->l2_mesr;
+ char msg[MESSAGE_SIZE];
+
+ if (cpu_mesr & CPUMERRSR_EL1_VALID) {
+ const char *str;
+ bool fatal = cpu_mesr & CPUMERRSR_EL1_FATAL;
+
+ switch (FIELD_GET(CPUMERRSR_EL1_RAMID, cpu_mesr)) {
+ case L1_I_TAG_RAM:
+ str = "L1-I Tag RAM";
+ break;
+ case L1_I_DATA_RAM:
+ str = "L1-I Data RAM";
+ break;
+ case L1_D_TAG_RAM:
+ str = "L1-D Tag RAM";
+ break;
+ case L1_D_DATA_RAM:
+ str = "L1-D Data RAM";
+ break;
+ case TLB_RAM:
+ str = "TLB RAM";
+ break;
+ default:
+ str = "Unspecified";
+ break;
+ }
+
+ snprintf(msg, MESSAGE_SIZE, "%s %s error(s) on CPU %d",
+ str, fatal ? "fatal" : "correctable", cpu);
+
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 0, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 0, msg);
+ }
+
+ if (l2_mesr & L2MERRSR_EL1_VALID) {
+ bool fatal = l2_mesr & L2MERRSR_EL1_FATAL;
+
+ snprintf(msg, MESSAGE_SIZE, "L2 %s error(s) on CPU %d CPUID/WAY 0x%lx",
+ fatal ? "fatal" : "correctable", cpu,
+ FIELD_GET(L2MERRSR_EL1_CPUID_WAY, l2_mesr));
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 1, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 1, msg);
+ }
+}
+
+static void read_errors(void *data)
+{
+ struct mem_err_synd_reg *mesr = data;
+
+ mesr->cpu_mesr = read_sysreg_s(SYS_CPUMERRSR_EL1);
+ if (mesr->cpu_mesr & CPUMERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_CPUMERRSR_EL1);
+ isb();
+ }
+ mesr->l2_mesr = read_sysreg_s(SYS_L2MERRSR_EL1);
+ if (mesr->l2_mesr & L2MERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_L2MERRSR_EL1);
+ isb();
+ }
+}
+
+static void a72_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ struct mem_err_synd_reg mesr;
+ int cpu;
+
+ cpus_read_lock();
+ for_each_cpu_and(cpu, cpu_online_mask, &compat_mask) {
+ smp_call_function_single(cpu, read_errors, &mesr, true);
+ report_errors(edac_ctl, cpu, &mesr);
+ }
+ cpus_read_unlock();
+}
+
+static int a72_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ edac_ctl = edac_device_alloc_ctl_info(0, "cpu",
+ num_possible_cpus(), "L", 2, 1,
+ edac_device_alloc_index());
+ if (!edac_ctl)
+ return -ENOMEM;
+
+ edac_ctl->edac_check = a72_edac_check;
+ edac_ctl->dev = dev;
+ edac_ctl->mod_name = dev_name(dev);
+ edac_ctl->dev_name = dev_name(dev);
+ edac_ctl->ctl_name = DRVNAME;
+ dev_set_drvdata(dev, edac_ctl);
+
+ rc = edac_device_add_device(edac_ctl);
+ if (rc)
+ goto out_dev;
+
+ return 0;
+
+out_dev:
+ edac_device_free_ctl_info(edac_ctl);
+
+ return rc;
+}
+
+static void a72_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl = dev_get_drvdata(&pdev->dev);
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a72" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver a72_edac_driver = {
+ .probe = a72_edac_probe,
+ .remove = a72_edac_remove,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *a72_pdev;
+
+static int __init a72_edac_driver_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (np) {
+ if (of_match_node(cortex_arm64_edac_of_match, np) &&
+ of_property_read_bool(np, "edac-enabled")) {
+ cpumask_set_cpu(cpu, &compat_mask);
+ }
+ } else {
+ pr_warn("failed to find device node for CPU %d\n", cpu);
+ }
+ }
+
+ if (cpumask_empty(&compat_mask))
+ return 0;
+
+ a72_pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+ if (IS_ERR(a72_pdev)) {
+ pr_err("failed to register A72 EDAC device\n");
+ return PTR_ERR(a72_pdev);
+ }
+
+ return platform_driver_register(&a72_edac_driver);
+}
+
+static void __exit a72_edac_driver_exit(void)
+{
+ platform_device_unregister(a72_pdev);
+ platform_driver_unregister(&a72_edac_driver);
+}
+
+module_init(a72_edac_driver_init);
+module_exit(a72_edac_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("Cortex A72 L1 and L2 cache EDAC driver");
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 7685a8550d4b..103b2c2eba2a 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -2130,8 +2130,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
- 64, &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 64, &a10_eccmgr_ic_ops,
+ edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 07f1e9dc1ca7..2f6ab783bf20 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3923,6 +3923,26 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->ctl_name = "F1Ah_M40h";
pvt->flags.zn_regs_v2 = 1;
break;
+ case 0x50 ... 0x57:
+ pvt->ctl_name = "F1Ah_M50h";
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ pvt->ctl_name = "F1Ah_M90h";
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0xa0 ... 0xaf:
+ pvt->ctl_name = "F1Ah_MA0h";
+ pvt->max_mcs = 8;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0xc0 ... 0xc7:
+ pvt->ctl_name = "F1Ah_MC0h";
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
}
break;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 17228d07de4c..d70b8a8d0b09 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,7 +96,7 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
-#define NUM_CONTROLLERS 12
+#define NUM_CONTROLLERS 16
#define ON true
#define OFF false
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
index 51c451c7f0f0..51c451c7f0f0 100755..100644
--- a/drivers/edac/ecs.c
+++ b/drivers/edac/ecs.c
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0f338adf7d93..8689631f1905 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -305,6 +305,14 @@ DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 10);
DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
channel_dimm_label_show, channel_dimm_label_store, 11);
+DEVICE_CHANNEL(ch12_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 12);
+DEVICE_CHANNEL(ch13_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 13);
+DEVICE_CHANNEL(ch14_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 14);
+DEVICE_CHANNEL(ch15_dimm_label, S_IRUGO | S_IWUSR,
+ channel_dimm_label_show, channel_dimm_label_store, 15);
/* Total possible dynamic DIMM Label attribute file table */
static struct attribute *dynamic_csrow_dimm_attr[] = {
@@ -320,6 +328,10 @@ static struct attribute *dynamic_csrow_dimm_attr[] = {
&dev_attr_legacy_ch9_dimm_label.attr.attr,
&dev_attr_legacy_ch10_dimm_label.attr.attr,
&dev_attr_legacy_ch11_dimm_label.attr.attr,
+ &dev_attr_legacy_ch12_dimm_label.attr.attr,
+ &dev_attr_legacy_ch13_dimm_label.attr.attr,
+ &dev_attr_legacy_ch14_dimm_label.attr.attr,
+ &dev_attr_legacy_ch15_dimm_label.attr.attr,
NULL
};
@@ -348,6 +360,14 @@ DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 10);
DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
channel_ce_count_show, NULL, 11);
+DEVICE_CHANNEL(ch12_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 12);
+DEVICE_CHANNEL(ch13_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 13);
+DEVICE_CHANNEL(ch14_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 14);
+DEVICE_CHANNEL(ch15_ce_count, S_IRUGO,
+ channel_ce_count_show, NULL, 15);
/* Total possible dynamic ce_count attribute file table */
static struct attribute *dynamic_csrow_ce_count_attr[] = {
@@ -363,6 +383,10 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = {
&dev_attr_legacy_ch9_ce_count.attr.attr,
&dev_attr_legacy_ch10_ce_count.attr.attr,
&dev_attr_legacy_ch11_ce_count.attr.attr,
+ &dev_attr_legacy_ch12_ce_count.attr.attr,
+ &dev_attr_legacy_ch13_ce_count.attr.attr,
+ &dev_attr_legacy_ch14_ce_count.attr.attr,
+ &dev_attr_legacy_ch15_ce_count.attr.attr,
NULL
};
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index bf4171ac191d..2010a47149f4 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -468,17 +468,18 @@ static int i10nm_get_imc_num(struct res_config *cfg)
return -ENODEV;
}
- if (imc_num > I10NM_NUM_DDR_IMC) {
- i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
- return -EINVAL;
- }
-
if (cfg->ddr_imc_num != imc_num) {
/*
- * Store the number of present DDR memory controllers.
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
*/
cfg->ddr_imc_num = imc_num;
edac_dbg(2, "Set DDR MC number: %d", imc_num);
+
+ /* Release and reallocate skx_dev list with the updated number. */
+ skx_remove();
+ if (skx_get_all_bus_mappings(cfg, &i10nm_edac_list) <= 0)
+ return -ENODEV;
}
return 0;
@@ -1057,6 +1058,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
return !!GET_BITFIELD(mcmtr, 2, 2);
}
+static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
+{
+ u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
+
+ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
+
+ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
+}
+
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
@@ -1070,6 +1080,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
if (!imc->mbase)
continue;
+ if (i10nm_channel_disabled(imc, i)) {
+ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
+ continue;
+ }
+
ndimms = 0;
if (res_cfg->type != GNR)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 5c1fa1c0d12e..5a080ab65476 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -99,6 +99,8 @@
/* Alder Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2 0x4668 /* 8P+4E, e.g. i7-12700K */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3 0x4648 /* 6P+4E, e.g. i5-12600K */
/* Bartlett Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1 0x4639
@@ -761,6 +763,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3), (kernel_ulong_t)&rpl_s_cfg},
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 108d69209146..108d69209146 100755..100644
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
index f9d02af2fc3a..f9d02af2fc3a 100755..100644
--- a/drivers/edac/scrub.c
+++ b/drivers/edac/scrub.c
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 29897b21fb8e..078ddf95cc6e 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -33,6 +33,15 @@ static unsigned int nvdimm_count;
#define MASK26 0x3FFFFFF /* Mask for 2^26 */
#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 2,
+ .ddr_chan_num = 3,
+ .ddr_dimm_num = 2,
+};
+
static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
@@ -52,7 +61,7 @@ enum munittype {
struct munit {
u16 did;
- u16 devfn[SKX_NUM_IMC];
+ u16 devfn[2];
u8 busidx;
u8 per_socket;
enum munittype mtype;
@@ -89,11 +98,11 @@ static int get_all_munits(const struct munit *m)
if (!pdev)
break;
ndev++;
- if (m->per_socket == SKX_NUM_IMC) {
- for (i = 0; i < SKX_NUM_IMC; i++)
+ if (m->per_socket == skx_cfg.ddr_imc_num) {
+ for (i = 0; i < skx_cfg.ddr_imc_num; i++)
if (m->devfn[i] == pdev->devfn)
break;
- if (i == SKX_NUM_IMC)
+ if (i == skx_cfg.ddr_imc_num)
goto fail;
}
d = get_skx_dev(pdev->bus, m->busidx);
@@ -157,12 +166,6 @@ fail:
return -ENODEV;
}
-static struct res_config skx_cfg = {
- .type = SKX,
- .decs_did = 0x2016,
- .busno_cfg_offset = 0xcc,
-};
-
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
@@ -186,11 +189,11 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
- for (i = 0; i < SKX_NUM_CHANNELS; i++) {
+ for (i = 0; i < cfg->ddr_chan_num; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
- for (j = 0; j < SKX_NUM_DIMMS; j++) {
+ for (j = 0; j < cfg->ddr_dimm_num; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
@@ -620,6 +623,7 @@ static int __init skx_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
@@ -652,10 +656,13 @@ static int __init skx_init(void)
goto fail;
edac_dbg(2, "src_id = %d\n", src_id);
- for (i = 0; i < SKX_NUM_IMC; i++) {
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
+
rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 39c733dbc5b9..724842f512ac 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -14,9 +14,11 @@
* Copyright (c) 2018, Intel Corporation.
*/
+#include <linux/topology.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/adxl.h>
+#include <linux/overflow.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
#include <asm/uv/uv.h>
@@ -130,8 +132,8 @@ static void skx_init_mc_mapping(struct skx_dev *d)
* the logical indices of the memory controllers enumerated by the
* EDAC driver.
*/
- for (int i = 0; i < NUM_IMC; i++)
- d->mc_mapping[i] = i;
+ for (int i = 0; i < d->num_imc; i++)
+ d->imc[i].mc_mapping = i;
}
void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
@@ -139,22 +141,28 @@ void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
pmc, lmc);
- d->mc_mapping[pmc] = lmc;
+ d->imc[lmc].mc_mapping = pmc;
}
EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
-static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+static int skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
{
- edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
- pmc, d->mc_mapping[pmc]);
+ for (int lmc = 0; lmc < d->num_imc; lmc++) {
+ if (d->imc[lmc].mc_mapping == pmc) {
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
- return d->mc_mapping[pmc];
+ return lmc;
+ }
+ }
+
+ return -1;
}
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
+ int i, lmc, len = 0;
struct skx_dev *d;
- int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
res->addr < BIT_ULL(32))) {
@@ -200,7 +208,7 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
- if (res->imc > NUM_IMC - 1 || res->imc < 0) {
+ if (res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
@@ -218,7 +226,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
- res->imc = skx_get_mc_mapping(d, res->imc);
+ lmc = skx_get_mc_mapping(d, res->imc);
+ if (lmc < 0) {
+ skx_printk(KERN_ERR, "No lmc for imc %d\n", res->imc);
+ return false;
+ }
+
+ res->imc = lmc;
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
@@ -265,7 +279,7 @@ static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->initialized && cpu_to_node(cpu) == node) {
- *id = c->topo.pkg_id;
+ *id = topology_physical_package_id(cpu);
return 0;
}
}
@@ -320,10 +334,10 @@ static int get_width(u32 mtr)
*/
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
+ int ndev = 0, imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
- int ndev = 0;
prev = NULL;
for (;;) {
@@ -331,7 +345,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
if (!pdev)
break;
ndev++;
- d = kzalloc(sizeof(*d), GFP_KERNEL);
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
@@ -354,8 +368,10 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->seg = GET_BITFIELD(reg, 16, 23);
}
- edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
- d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ d->num_imc = imc_num;
+
+ edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x, imcs %d\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3], imc_num);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
@@ -541,10 +557,10 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
- layers[0].size = NUM_CHANNELS;
+ layers[0].size = imc->num_channels;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
- layers[1].size = NUM_DIMMS;
+ layers[1].size = imc->num_dimms;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
sizeof(struct skx_pvt));
@@ -784,7 +800,7 @@ void skx_remove(void)
list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
list_del(&d->list);
- for (i = 0; i < NUM_IMC; i++) {
+ for (i = 0; i < d->num_imc; i++) {
if (d->imc[i].mci)
skx_unregister_mci(&d->imc[i]);
@@ -794,7 +810,7 @@ void skx_remove(void)
if (d->imc[i].mbase)
iounmap(d->imc[i].mbase);
- for (j = 0; j < NUM_CHANNELS; j++) {
+ for (j = 0; j < d->imc[i].num_channels; j++) {
if (d->imc[i].chan[j].cdev)
pci_dev_put(d->imc[i].chan[j].cdev);
}
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ec4966f7ea40..73ba89786cdf 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -29,23 +29,18 @@
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL((hi), (lo))) >> (lo))
-#define SKX_NUM_IMC 2 /* Memory controllers per socket */
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
-#define I10NM_NUM_HBM_IMC 16
#define I10NM_NUM_HBM_CHANNELS 2
#define I10NM_NUM_HBM_DIMMS 1
-#define I10NM_NUM_IMC (I10NM_NUM_DDR_IMC + I10NM_NUM_HBM_IMC)
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
-#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
@@ -134,16 +129,7 @@ struct skx_dev {
struct pci_dev *uracu; /* for i10nm CPU */
struct pci_dev *pcu_cr3; /* for HBM memory detection */
u32 mcroute;
- /*
- * Some server BIOS may hide certain memory controllers, and the
- * EDAC driver skips those hidden memory controllers. However, the
- * ADXL still decodes memory error address using physical memory
- * controller indices. The mapping table is used to convert the
- * physical indices (reported by ADXL) to the logical indices
- * (used the EDAC driver) of present memory controllers during the
- * error handling process.
- */
- u8 mc_mapping[NUM_IMC];
+ int num_imc;
struct skx_imc {
struct mem_ctl_info *mci;
struct pci_dev *mdev; /* for i10nm CPU */
@@ -155,6 +141,16 @@ struct skx_dev {
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
u8 src_id;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -171,7 +167,7 @@ struct skx_dev {
u8 colbits;
} dimms[NUM_DIMMS];
} chan[NUM_CHANNELS];
- } imc[NUM_IMC];
+ } imc[];
};
struct skx_pvt {
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
new file mode 100644
index 000000000000..7c5db8bf0595
--- /dev/null
+++ b/drivers/edac/versalnet_edac.c
@@ -0,0 +1,960 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal NET memory controller driver
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cdx/edac_cdx_pcol.h>
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ras.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/sizes.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+/* Granularity of reported error in bytes */
+#define MC5_ERR_GRAIN 1
+#define MC_GET_DDR_CONFIG_IN_LEN 4
+
+#define MC5_IRQ_CE_MASK GENMASK(18, 15)
+#define MC5_IRQ_UE_MASK GENMASK(14, 11)
+
+#define MC5_RANK_1_MASK GENMASK(11, 6)
+#define MASK_24 GENMASK(29, 24)
+#define MASK_0 GENMASK(5, 0)
+
+#define MC5_LRANK_1_MASK GENMASK(11, 6)
+#define MC5_LRANK_2_MASK GENMASK(17, 12)
+#define MC5_BANK1_MASK GENMASK(11, 6)
+#define MC5_GRP_0_MASK GENMASK(17, 12)
+#define MC5_GRP_1_MASK GENMASK(23, 18)
+
+#define MC5_REGHI_ROW 7
+#define MC5_EACHBIT 1
+#define MC5_ERR_TYPE_CE 0
+#define MC5_ERR_TYPE_UE 1
+#define MC5_HIGH_MEM_EN BIT(20)
+#define MC5_MEM_MASK GENMASK(19, 0)
+#define MC5_X16_BASE 256
+#define MC5_X16_ECC 32
+#define MC5_X16_SIZE (MC5_X16_BASE + MC5_X16_ECC)
+#define MC5_X32_SIZE 576
+#define MC5_HIMEM_BASE (256 * SZ_1M)
+#define MC5_ILC_HIMEM_EN BIT(28)
+#define MC5_ILC_MEM GENMASK(27, 0)
+#define MC5_INTERLEAVE_SEL GENMASK(3, 0)
+#define MC5_BUS_WIDTH_MASK GENMASK(19, 18)
+#define MC5_NUM_CHANS_MASK BIT(17)
+#define MC5_RANK_MASK GENMASK(15, 14)
+
+#define ERROR_LEVEL 2
+#define ERROR_ID 3
+#define TOTAL_ERR_LENGTH 5
+#define MSG_ERR_OFFSET 8
+#define MSG_ERR_LENGTH 9
+#define ERROR_DATA 10
+#define MCDI_RESPONSE 0xFF
+
+#define REG_MAX 152
+#define ADEC_MAX 152
+#define NUM_CONTROLLERS 8
+#define REGS_PER_CONTROLLER 19
+#define ADEC_NUM 19
+#define BUFFER_SZ 80
+
+#define XDDR5_BUS_WIDTH_64 0
+#define XDDR5_BUS_WIDTH_32 1
+#define XDDR5_BUS_WIDTH_16 2
+
+/**
+ * struct ecc_error_info - ECC error log information.
+ * @burstpos: Burst position.
+ * @lrank: Logical Rank number.
+ * @rank: Rank number.
+ * @group: Group number.
+ * @bank: Bank number.
+ * @col: Column number.
+ * @row: Row number.
+ * @rowhi: Row number higher bits.
+ * @i: Combined ECC error vector containing encoded values of burst position,
+ * rank, bank, column, and row information.
+ */
+union ecc_error_info {
+ struct {
+ u32 burstpos:3;
+ u32 lrank:4;
+ u32 rank:2;
+ u32 group:3;
+ u32 bank:2;
+ u32 col:11;
+ u32 row:7;
+ u32 rowhi;
+ };
+ u64 i;
+} __packed;
+
+/* Row and column bit positions in the address decoder (ADEC) registers. */
+union row_col_mapping {
+ struct {
+ u32 row0:6;
+ u32 row1:6;
+ u32 row2:6;
+ u32 row3:6;
+ u32 row4:6;
+ u32 reserved:2;
+ };
+ struct {
+ u32 col1:6;
+ u32 col2:6;
+ u32 col3:6;
+ u32 col4:6;
+ u32 col5:6;
+ u32 reservedcol:2;
+ };
+ u32 i;
+} __packed;
+
+/**
+ * struct ecc_status - ECC status information to report.
+ * @ceinfo: Correctable errors.
+ * @ueinfo: Uncorrected errors.
+ * @channel: Channel number.
+ * @error_type: Error type.
+ */
+struct ecc_status {
+ union ecc_error_info ceinfo[2];
+ union ecc_error_info ueinfo[2];
+ u8 channel;
+ u8 error_type;
+};
+
+/**
+ * struct mc_priv - DDR memory controller private instance data.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @error_id: The error id.
+ * @error_level: The error level.
+ * @dwidth: Width of data bus excluding ECC bits.
+ * @part_len: The support of the message received.
+ * @regs: The registers sent on the rpmsg.
+ * @adec: Address decode registers.
+ * @mci: Memory controller interface.
+ * @ept: rpmsg endpoint.
+ * @mcdi: The mcdi handle.
+ */
+struct mc_priv {
+ char message[256];
+ struct ecc_status stat;
+ u32 error_id;
+ u32 error_level;
+ u32 dwidth;
+ u32 part_len;
+ u32 regs[REG_MAX];
+ u32 adec[ADEC_MAX];
+ struct mem_ctl_info *mci[NUM_CONTROLLERS];
+ struct rpmsg_endpoint *ept;
+ struct cdx_mcdi *mcdi;
+};
+
+/*
+ * Address decoder (ADEC) registers to match the order in which the register
+ * information is received from the firmware.
+ */
+enum adec_info {
+ CONF = 0,
+ ADEC0,
+ ADEC1,
+ ADEC2,
+ ADEC3,
+ ADEC4,
+ ADEC5,
+ ADEC6,
+ ADEC7,
+ ADEC8,
+ ADEC9,
+ ADEC10,
+ ADEC11,
+ ADEC12,
+ ADEC13,
+ ADEC14,
+ ADEC15,
+ ADEC16,
+ ADECILC,
+};
+
+enum reg_info {
+ ISR = 0,
+ IMR,
+ ECCR0_ERR_STATUS,
+ ECCR0_ADDR_LO,
+ ECCR0_ADDR_HI,
+ ECCR0_DATA_LO,
+ ECCR0_DATA_HI,
+ ECCR0_PAR,
+ ECCR1_ERR_STATUS,
+ ECCR1_ADDR_LO,
+ ECCR1_ADDR_HI,
+ ECCR1_DATA_LO,
+ ECCR1_DATA_HI,
+ ECCR1_PAR,
+ XMPU_ERR,
+ XMPU_ERR_ADDR_L0,
+ XMPU_ERR_ADDR_HI,
+ XMPU_ERR_AXI_ID,
+ ADEC_CHK_ERR_LOG,
+};
+
+static bool get_ddr_info(u32 *error_data, struct mc_priv *priv)
+{
+ u32 reglo, reghi, parity, eccr0_val, eccr1_val, isr;
+ struct ecc_status *p;
+
+ isr = error_data[ISR];
+
+ if (!(isr & (MC5_IRQ_UE_MASK | MC5_IRQ_CE_MASK)))
+ return false;
+
+ eccr0_val = error_data[ECCR0_ERR_STATUS];
+ eccr1_val = error_data[ECCR1_ERR_STATUS];
+
+ if (!eccr0_val && !eccr1_val)
+ return false;
+
+ p = &priv->stat;
+
+ if (!eccr0_val)
+ p->channel = 1;
+ else
+ p->channel = 0;
+
+ reglo = error_data[ECCR0_ADDR_LO];
+ reghi = error_data[ECCR0_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[0].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[0].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR0_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ reglo = error_data[ECCR1_ADDR_LO];
+ reghi = error_data[ECCR1_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[1].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[1].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR1_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ return true;
+}
+
+/**
+ * convert_to_physical - Convert @error_data to a physical address.
+ * @priv: DDR memory controller private instance data.
+ * @pinf: ECC error info structure.
+ * @controller: Controller number of the MC5
+ * @error_data: the DDRMC5 ADEC address decoder register data
+ *
+ * Return: physical address of the DDR memory.
+ */
+static unsigned long convert_to_physical(struct mc_priv *priv,
+ union ecc_error_info pinf,
+ int controller, int *error_data)
+{
+ u32 row, blk, rsh_req_addr, interleave, ilc_base_ctrl_add, ilc_himem_en, reg, offset;
+ u64 high_mem_base, high_mem_offset, low_mem_offset, ilcmem_base;
+ unsigned long err_addr = 0, addr;
+ union row_col_mapping cols;
+ union row_col_mapping rows;
+ u32 col_bit_0;
+
+ row = pinf.rowhi << MC5_REGHI_ROW | pinf.row;
+ offset = controller * ADEC_NUM;
+
+ reg = error_data[ADEC6];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC7];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC8];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+
+ reg = error_data[ADEC9];
+ rows.i = reg;
+
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+
+ col_bit_0 = FIELD_GET(MASK_24, error_data[ADEC9]);
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << col_bit_0;
+
+ cols.i = error_data[ADEC10];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ cols.i = error_data[ADEC11];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ reg = error_data[ADEC12];
+ err_addr |= (pinf.bank & BIT(0)) << (reg & MASK_0);
+ pinf.bank >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_BANK1_MASK, reg);
+ pinf.bank >>= MC5_EACHBIT;
+
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_0_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_1_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.group >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC4];
+ err_addr |= (pinf.rank & BIT(0)) << (reg & MASK_0);
+ pinf.rank >>= MC5_EACHBIT;
+ err_addr |= (pinf.rank & BIT(0)) << FIELD_GET(MC5_RANK_1_MASK, reg);
+ pinf.rank >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC5];
+ err_addr |= (pinf.lrank & BIT(0)) << (reg & MASK_0);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_1_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_2_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+
+ high_mem_base = (priv->adec[ADEC2 + offset] & MC5_MEM_MASK) * MC5_HIMEM_BASE;
+ interleave = priv->adec[ADEC13 + offset] & MC5_INTERLEAVE_SEL;
+
+ high_mem_offset = priv->adec[ADEC3 + offset] & MC5_MEM_MASK;
+ low_mem_offset = priv->adec[ADEC1 + offset] & MC5_MEM_MASK;
+ reg = priv->adec[ADEC14 + offset];
+ ilc_himem_en = !!(reg & MC5_ILC_HIMEM_EN);
+ ilcmem_base = (reg & MC5_ILC_MEM) * SZ_1M;
+ if (ilc_himem_en)
+ ilc_base_ctrl_add = ilcmem_base - high_mem_offset;
+ else
+ ilc_base_ctrl_add = ilcmem_base - low_mem_offset;
+
+ if (priv->dwidth == DEV_X16) {
+ blk = err_addr / MC5_X16_SIZE;
+ rsh_req_addr = (blk << 8) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ } else {
+ blk = err_addr / MC5_X32_SIZE;
+ rsh_req_addr = (blk << 9) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ }
+
+ if ((priv->adec[ADEC2 + offset] & MC5_HIGH_MEM_EN) && err_addr >= high_mem_base)
+ addr = err_addr - high_mem_offset;
+ else
+ addr = err_addr - low_mem_offset;
+
+ return addr;
+}
+
+/**
+ * handle_error - Handle errors.
+ * @priv: DDR memory controller private instance data.
+ * @stat: ECC status structure.
+ * @ctl_num: Controller number of the MC5
+ * @error_data: the MC5 ADEC address decoder register data
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
+ int ctl_num, int *error_data)
+{
+ union ecc_error_info pinf;
+ struct mem_ctl_info *mci;
+ unsigned long pa;
+ phys_addr_t pfn;
+ int err;
+
+ if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
+ return;
+
+ mci = priv->mci[ctl_num];
+
+ if (stat->error_type == MC5_ERR_TYPE_CE) {
+ pinf = stat->ceinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s Controller %d Addr at %lx\n",
+ "CE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (stat->error_type == MC5_ERR_TYPE_UE) {
+ pinf = stat->ueinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s controller %d Addr at %lx\n",
+ "UE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ pa = convert_to_physical(priv, pinf, ctl_num, error_data);
+ pfn = PHYS_PFN(pa);
+
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ err = memory_failure(pfn, MF_ACTION_REQUIRED);
+ if (err)
+ edac_dbg(2, "memory_failure() error: %d", err);
+ else
+ edac_dbg(2, "Poison page at PA 0x%lx\n", pa);
+ }
+ }
+}
+
+static void mc_init(struct mem_ctl_info *mci, struct device *dev)
+{
+ struct mc_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 row;
+ int ch;
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR5;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "VersalNET DDR5";
+ mci->dev_name = dev_name(dev);
+ mci->mod_name = "versalnet_edac";
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_DDR5;
+ dimm->grain = MC5_ERR_GRAIN;
+ dimm->dtype = priv->dwidth;
+ }
+ }
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static unsigned int mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ void *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx->ept, send_buf, hdr_len + sdu_len);
+ if (ret)
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data: %d\n", ret);
+
+ kfree(send_buf);
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = mcdi_rpc_timeout,
+ .mcdi_request = mcdi_request,
+};
+
+static void get_ddr_config(u32 index, u32 *buffer, struct cdx_mcdi *amd_mcdi)
+{
+ size_t outlen;
+ int ret;
+
+ MCDI_DECLARE_BUF(inbuf, MC_GET_DDR_CONFIG_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, BUFFER_SZ);
+
+ MCDI_SET_DWORD(inbuf, EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX, index);
+
+ ret = cdx_mcdi_rpc(amd_mcdi, MC_CMD_EDAC_GET_DDR_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (!ret)
+ memcpy(buffer, MCDI_PTR(outbuf, GET_DDR_CONFIG),
+ (ADEC_NUM * 4));
+}
+
+static int setup_mcdi(struct mc_priv *mc_priv)
+{
+ struct cdx_mcdi *amd_mcdi;
+ int ret, i;
+
+ amd_mcdi = kzalloc(sizeof(*amd_mcdi), GFP_KERNEL);
+ if (!amd_mcdi)
+ return -ENOMEM;
+
+ amd_mcdi->mcdi_ops = &mcdi_ops;
+ ret = cdx_mcdi_init(amd_mcdi);
+ if (ret) {
+ kfree(amd_mcdi);
+ return ret;
+ }
+
+ amd_mcdi->ept = mc_priv->ept;
+ mc_priv->mcdi = amd_mcdi;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++)
+ get_ddr_config(i, &mc_priv->adec[ADEC_NUM * i], amd_mcdi);
+
+ return 0;
+}
+
+static const guid_t amd_versalnet_guid = GUID_INIT(0x82678888, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+ const guid_t *sec_type = &guid_null;
+ u32 length, offset, error_id;
+ u32 *result = (u32 *)data;
+ struct ecc_status *p;
+ int i, j, k, sec_sev;
+ const char *err_str;
+ u32 *adec_data;
+
+ if (*(u8 *)data == MCDI_RESPONSE) {
+ cdx_mcdi_process_cmd(mc_priv->mcdi, (struct cdx_dword *)data, len);
+ return 0;
+ }
+
+ sec_sev = result[ERROR_LEVEL];
+ error_id = result[ERROR_ID];
+ length = result[MSG_ERR_LENGTH];
+ offset = result[MSG_ERR_OFFSET];
+
+ if (result[TOTAL_ERR_LENGTH] > length) {
+ if (!mc_priv->part_len)
+ mc_priv->part_len = length;
+ else
+ mc_priv->part_len += length;
+ /*
+ * The data can come in 2 stretches. Construct the regs from 2
+ * messages the offset indicates the offset from which the data is to
+ * be taken
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+ if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
+ return 0;
+ mc_priv->part_len = 0;
+ }
+
+ mc_priv->error_id = error_id;
+ mc_priv->error_level = result[ERROR_LEVEL];
+
+ switch (error_id) {
+ case 5: err_str = "General Software Non-Correctable error"; break;
+ case 6: err_str = "CFU error"; break;
+ case 7: err_str = "CFRAME error"; break;
+ case 10: err_str = "DDRMC Microblaze Correctable ECC error"; break;
+ case 11: err_str = "DDRMC Microblaze Non-Correctable ECC error"; break;
+ case 15: err_str = "MMCM error"; break;
+ case 16: err_str = "HNICX Correctable error"; break;
+ case 17: err_str = "HNICX Non-Correctable error"; break;
+
+ case 18:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_CE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+ case 19:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_UE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+
+ case 21: err_str = "GT Non-Correctable error"; break;
+ case 22: err_str = "PL Sysmon Correctable error"; break;
+ case 23: err_str = "PL Sysmon Non-Correctable error"; break;
+ case 111: err_str = "LPX unexpected dfx activation error"; break;
+ case 114: err_str = "INT_LPD Non-Correctable error"; break;
+ case 116: err_str = "INT_OCM Non-Correctable error"; break;
+ case 117: err_str = "INT_FPD Correctable error"; break;
+ case 118: err_str = "INT_FPD Non-Correctable error"; break;
+ case 120: err_str = "INT_IOU Non-Correctable error"; break;
+ case 123: err_str = "err_int_irq from APU GIC Distributor"; break;
+ case 124: err_str = "fault_int_irq from APU GIC Distribute"; break;
+ case 132 ... 139: err_str = "FPX SPLITTER error"; break;
+ case 140: err_str = "APU Cluster 0 error"; break;
+ case 141: err_str = "APU Cluster 1 error"; break;
+ case 142: err_str = "APU Cluster 2 error"; break;
+ case 143: err_str = "APU Cluster 3 error"; break;
+ case 145: err_str = "WWDT1 LPX error"; break;
+ case 147: err_str = "IPI error"; break;
+ case 152 ... 153: err_str = "AFIFS error"; break;
+ case 154 ... 155: err_str = "LPX glitch error"; break;
+ case 185 ... 186: err_str = "FPX AFIFS error"; break;
+ case 195 ... 199: err_str = "AFIFM error"; break;
+ case 108: err_str = "PSM Correctable error"; break;
+ case 59: err_str = "PMC correctable error"; break;
+ case 60: err_str = "PMC Un correctable error"; break;
+ case 43 ... 47: err_str = "PMC Sysmon error"; break;
+ case 163 ... 184: err_str = "RPU error"; break;
+ case 148: err_str = "OCM0 correctable error"; break;
+ case 149: err_str = "OCM1 correctable error"; break;
+ case 150: err_str = "OCM0 Un-correctable error"; break;
+ case 151: err_str = "OCM1 Un-correctable error"; break;
+ case 189: err_str = "PSX_CMN_3 PD block consolidated error"; break;
+ case 191: err_str = "FPD_INT_WRAP PD block consolidated error"; break;
+ case 232: err_str = "CRAM Un-Correctable error"; break;
+ default: err_str = "VERSAL_EDAC_ERR_ID: %d"; break;
+ }
+
+ snprintf(mc_priv->message,
+ sizeof(mc_priv->message),
+ "[VERSAL_EDAC_ERR_ID: %d] Error type: %s", error_id, err_str);
+
+ /* Convert to bytes */
+ length = result[TOTAL_ERR_LENGTH] * 4;
+ log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
+ sec_sev, (void *)&result[ERROR_DATA], length);
+
+ return 0;
+}
+
+static struct rpmsg_device_id amd_rpmsg_id_table[] = {
+ { .name = "error_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, amd_rpmsg_id_table);
+
+static int rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo;
+ struct mc_priv *pg;
+
+ pg = (struct mc_priv *)amd_rpmsg_id_table[0].driver_data;
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, amd_rpmsg_id_table[0].name,
+ strlen(amd_rpmsg_id_table[0].name));
+
+ pg->ept = rpmsg_create_ept(rpdev, rpmsg_cb, NULL, chinfo);
+ if (!pg->ept)
+ return dev_err_probe(&rpdev->dev, -ENXIO, "Failed to create ept for channel %s\n",
+ chinfo.name);
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ return 0;
+}
+
+static void rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+
+ rpmsg_destroy_ept(mc_priv->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver amd_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .probe = rpmsg_probe,
+ .remove = rpmsg_remove,
+ .callback = rpmsg_cb,
+ .id_table = amd_rpmsg_id_table,
+};
+
+static void versal_edac_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int init_versalnet(struct mc_priv *priv, struct platform_device *pdev)
+{
+ u32 num_chans, rank, dwidth, config;
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct device *dev;
+ enum dev_type dt;
+ char *name;
+ int rc, i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ config = priv->adec[CONF + i * ADEC_NUM];
+ num_chans = FIELD_GET(MC5_NUM_CHANS_MASK, config);
+ rank = 1 << FIELD_GET(MC5_RANK_MASK, config);
+ dwidth = FIELD_GET(MC5_BUS_WIDTH_MASK, config);
+
+ switch (dwidth) {
+ case XDDR5_BUS_WIDTH_16:
+ dt = DEV_X16;
+ break;
+ case XDDR5_BUS_WIDTH_32:
+ dt = DEV_X32;
+ break;
+ case XDDR5_BUS_WIDTH_64:
+ dt = DEV_X64;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ if (dt == DEV_UNKNOWN)
+ continue;
+
+ /* Find the first enabled device and register that one. */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = rank;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+
+ rc = -ENOMEM;
+ mci = edac_mc_alloc(i, ARRAY_SIZE(layers), layers,
+ sizeof(struct mc_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed memory allocation for MC%d\n", i);
+ goto err_alloc;
+ }
+
+ priv->mci[i] = mci;
+ priv->dwidth = dt;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev->release = versal_edac_release;
+ name = kmalloc(32, GFP_KERNEL);
+ sprintf(name, "versal-net-ddrmc5-edac-%d", i);
+ dev->init_name = name;
+ rc = device_register(dev);
+ if (rc)
+ goto err_alloc;
+
+ mci->pdev = dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ mc_init(mci, dev);
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register MC%d with EDAC core\n", i);
+ goto err_alloc;
+ }
+ }
+ return 0;
+
+err_alloc:
+ while (i--) {
+ mci = priv->mci[i];
+ if (!mci)
+ continue;
+
+ if (mci->pdev) {
+ device_unregister(mci->pdev);
+ edac_mc_del_mc(mci->pdev);
+ }
+
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+static void remove_versalnet(struct mc_priv *priv)
+{
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ device_unregister(priv->mci[i]->pdev);
+ mci = edac_mc_del_mc(priv->mci[i]->pdev);
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+ }
+}
+
+static int mc_probe(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct mc_priv *priv;
+ struct rproc *rp;
+ int rc;
+
+ r5_core_node = of_parse_phandle(pdev->dev.of_node, "amd,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "amd,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp)
+ return -EPROBE_DEFER;
+
+ rc = rproc_boot(rp);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ goto err_rproc_boot;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ amd_rpmsg_id_table[0].driver_data = (kernel_ulong_t)priv;
+
+ rc = register_rpmsg_driver(&amd_rpmsg_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register RPMsg driver: %d\n", rc);
+ goto err_alloc;
+ }
+
+ rc = setup_mcdi(priv);
+ if (rc)
+ goto err_unreg;
+
+ priv->mcdi->r5_rproc = rp;
+
+ rc = init_versalnet(priv, pdev);
+ if (rc)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ cdx_mcdi_finish(priv->mcdi);
+
+err_unreg:
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+
+err_alloc:
+ rproc_shutdown(rp);
+
+err_rproc_boot:
+ rproc_put(rp);
+
+ return rc;
+}
+
+static void mc_remove(struct platform_device *pdev)
+{
+ struct mc_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+ remove_versalnet(priv);
+ rproc_shutdown(priv->mcdi->r5_rproc);
+ cdx_mcdi_finish(priv->mcdi);
+}
+
+static const struct of_device_id amd_edac_match[] = {
+ { .compatible = "xlnx,versal-net-ddrmc5", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, amd_edac_match);
+
+static struct platform_driver amd_ddr_edac_mc_driver = {
+ .driver = {
+ .name = "versal-net-edac",
+ .of_match_table = amd_edac_match,
+ },
+ .probe = mc_probe,
+ .remove = mc_remove,
+};
+
+module_platform_driver(amd_ddr_edac_mc_driver);
+
+MODULE_AUTHOR("AMD Inc");
+MODULE_DESCRIPTION("Versal NET EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 78b10c6ef7fe..2e93189d7142 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -41,7 +41,7 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
-#define FW_CDEV_KERNEL_VERSION 5
+#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 7cfc5fdfa49d..64863db7a715 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -198,7 +198,10 @@ static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
- dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ if (err != -ENODEV)
+ dev_warn(bpmp->dev,
+ "failed to parse memory region: %d\n", err);
+
return err;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d8ac40d0eb6f..23b5820e1854 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -485,7 +485,6 @@ config GPIO_MM_LANTIQ
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
- select OF_GPIO_MM_GPIOCHIP
config GPIO_MPC8XXX
bool "MPC512x/MPC8xxx/QorIQ GPIO support"
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index dad0eca1ca2e..00f209157fd0 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -19,7 +19,8 @@
static DEFINE_SPINLOCK(gpio_lock);
struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
unsigned int shadow_dvo;
unsigned int shadow_gpioe;
unsigned int shadow_ddr;
@@ -43,8 +44,8 @@ struct mpc52xx_gpiochip {
*/
static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
@@ -57,9 +58,8 @@ static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (7 - gpio);
@@ -87,9 +87,8 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -110,9 +109,8 @@ static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -136,30 +134,41 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
struct gpio_chip *gc;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 8;
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
gc->set = mpc52xx_wkup_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
chip->shadow_ddr = in_8(&regs->wkup_ddr);
chip->shadow_dvo = in_8(&regs->wkup_dvo);
@@ -167,13 +176,6 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
return 0;
}
-static void mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip = platform_get_drvdata(ofdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
-}
-
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
@@ -185,7 +187,6 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
/*
@@ -207,8 +208,8 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
*/
static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
@@ -219,9 +220,8 @@ static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (31 - gpio);
@@ -248,9 +248,8 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -271,9 +270,8 @@ static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -298,30 +296,41 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
struct mpc52xx_gpio __iomem *regs;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 32;
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
gc->set = mpc52xx_simple_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
chip->shadow_ddr = in_be32(&regs->simple_ddr);
chip->shadow_dvo = in_be32(&regs->simple_dvo);
@@ -340,7 +349,6 @@ static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index e8a32dfebdcb..3f8b72311f8e 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -274,7 +274,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!chip->ngpio) {
ret = gpiochip_get_ngpios(chip, chip->parent);
if (ret)
- return ERR_PTR(ret);
+ goto err_free_gpio;
}
/* if not set, assume there is only one register */
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 12b24a717e43..284e762d92c4 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -942,8 +942,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
+ int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -957,6 +958,12 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
+ if (ret)
+ return ERR_PTR(ret);
+
return desc;
}
@@ -992,7 +999,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index bfb04e67c4bc..7b95d1b03361 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -319,6 +319,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * Same as G1619-04. New model.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
* Spurious wakeups from GPIO 11
* Found in BIOS 1.04
* https://gitlab.freedesktop.org/drm/amd/-/issues/3954
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0d2b470a252e..74d54513730a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -4604,6 +4604,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
+static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
+ struct device *consumer,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *flags,
+ unsigned long *lookupflags)
+{
+ struct gpio_desc *desc;
+
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
+ if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
+ desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
+ idx, flags, lookupflags);
+
+ return desc;
+}
+
struct gpio_desc *gpiod_find_and_request(struct device *consumer,
struct fwnode_handle *fwnode,
const char *con_id,
@@ -4622,8 +4639,8 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
int ret = 0;
scoped_guard(srcu, &gpio_devices_srcu) {
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
- &flags, &lookupflags);
+ desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
+ &flags, &lookupflags);
if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
* Either we are not using DT or ACPI, or their lookup
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fbe7616555c8..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
- if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
- if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 33eb4826b58b..aa88bad7416b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -426,7 +426,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -516,11 +518,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 01d234cf8156..c8459337fcb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5136,7 +5136,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
- if (!adev->in_s0ix && !adev->in_runpm)
+ if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@@ -5156,10 +5156,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
- }
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5254,15 +5252,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- if (r)
- goto exit;
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_userq_resume(adev);
- if (r)
- goto exit;
- }
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5275,7 +5271,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
- if (!adev->in_s0ix && !r && !adev->in_runpm)
+ if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c85de8c8f6f5..c37527704d43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1654,6 +1654,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.pfp_fw_version >= 102 &&
+ adev->gfx.mec_fw_version >= 66 &&
+ adev->mes.fw_version[0] >= 128) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7e749f9b6d69..349c351e242b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e86370ae705..ef026143dc1c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2037,6 +2037,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+ adev->dm.restore_backlight = true;
+
adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev);
if (!adev->dm.hpd_rx_offload_wq) {
drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n");
@@ -3399,6 +3401,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
dc_resume(dm->dc);
+ adev->dm.restore_backlight = true;
amdgpu_dm_irq_resume_early(adev);
@@ -8717,7 +8720,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
+{ /*
+ * We cannot be sure that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ * So we have to go through the CRTC to find the right IRQ.
+ */
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8770,7 +8782,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
+#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
+#endif
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -9792,7 +9832,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
bool mode_set_reset_required = false;
u32 i;
struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
- bool set_backlight_level = false;
/* Disable writeback */
for_each_old_connector_in_state(state, connector, old_con_state, i) {
@@ -9912,7 +9951,6 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
acrtc->hw_mode = new_crtc_state->mode;
crtc->hwmode = new_crtc_state->mode;
mode_set_reset_required = true;
- set_backlight_level = true;
} else if (modereset_required(new_crtc_state)) {
drm_dbg_atomic(dev,
"Atomic commit: RESET. crtc id %d:[%p]\n",
@@ -9969,13 +10007,16 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
* to fix a flicker issue.
* It will cause the dm->actual_brightness is not the current panel brightness
* level. (the dm->brightness is the correct panel level)
- * So we set the backlight level with dm->brightness value after set mode
+ * So we set the backlight level with dm->brightness value after initial
+ * set mode. Use restore_backlight flag to avoid setting backlight level
+ * for every subsequent mode set.
*/
- if (set_backlight_level) {
+ if (dm->restore_backlight) {
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i])
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
+ dm->restore_backlight = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b937da0a4e4a..6aae51c1beb3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -611,6 +611,13 @@ struct amdgpu_display_manager {
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
+ * @restore_backlight:
+ *
+ * Flag to indicate whether to restore backlight after modeset.
+ */
+ bool restore_backlight;
+
+ /**
* @aux_hpd_discon_quirk:
*
* quirk for hpd discon while aux is on-going.
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..c0dfe2d8b3be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -821,7 +821,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
- bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
/* shaper LUT is only available if 3D LUT color caps */
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index eef51652ca35..3d2f8eedeef2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1633,7 +1633,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
drm_object_attach_property(&plane->base,
dm->adev->mode_info.plane_ctm_property, 0);
- if (dpp_color_caps.hw_3d_lut) {
+ if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
drm_object_attach_property(&plane->base,
mode_info.plane_shaper_lut_property, 0);
drm_object_attach_property(&plane->base,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index bb1ac12a2b09..0e638bc6bf77 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -587,9 +587,118 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr_dcn35 *clk_mgr)
{
+ struct dcn35_clk_internal internal = {0};
+ char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+
+ dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass < 0 || regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass < 0 || regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass < 0 || regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass < 0 || regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
+
+ DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n",
+ regs_and_bypass->dcfclk,
+ regs_and_bypass->dcf_deep_sleep_divider,
+ regs_and_bypass->dcf_deep_sleep_allow,
+ bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
+
+ DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dprefclk,
+ bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
+
+ DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dispclk,
+ bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
+
+ // REGISTER VALUES
+ DC_LOG_SMU("reg_name,value,clk_type");
+
+ DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk",
+ internal.CLK1_CLK3_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk",
+ internal.CLK1_CLK4_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider",
+ internal.CLK1_CLK3_DS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow",
+ internal.CLK1_CLK3_ALLOW_DS);
+
+ DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk",
+ internal.CLK1_CLK2_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk",
+ internal.CLK1_CLK0_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk",
+ internal.CLK1_CLK1_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass",
+ internal.CLK1_CLK3_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass",
+ internal.CLK1_CLK2_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass",
+ internal.CLK1_CLK0_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass",
+ internal.CLK1_CLK1_BYPASS_CNTL);
+
+ }
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
@@ -623,6 +732,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int);
init_clk_states(clk_mgr);
@@ -633,6 +743,13 @@ void dcn35_init_clocks(struct clk_mgr *clk_mgr)
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+ dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35);
+
+ clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10;
+ if (clk_mgr->boot_snapshot.dtbclk > 59000) {
+ /*dtbclk enabled based on */
+ clk_mgr->clks.dtbclk_en = true;
+ }
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -1323,7 +1440,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+ dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index f24e1da68269..8c230cf8939b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -1348,7 +1348,6 @@ union surface_update_flags {
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
uint32_t coeff_reduction_change:1;
- uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
uint32_t gamut_remap_change:1;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b7c2d3095b25..5e57bd1a08e7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1982,10 +1982,8 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index cc9f40d97af2..61167c19359d 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -2019,10 +2019,8 @@ void dcn401_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b47cb4a5f488..408f05dfab90 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2236,7 +2236,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
return ret;
}
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 19c04687b0fe..8e650a02c528 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -134,7 +134,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si
* 3. The Delays are often longer a lot when system resume from S3/S4.
*/
if (j)
- mdelay(j + 1);
+ msleep(j + 1);
/* Wait for EDID offset to show up in mirror register */
vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index c0ad8f59e483..8b3304dedcd9 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2677,7 +2677,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2746,8 +2746,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index a614d1384f71..38726ae1bf15 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index db9b089ef62c..86853535fb7b 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2432,8 +2432,6 @@ static const struct drm_gpuvm_ops lock_ops = {
*
* The expected usage is::
*
- * .. code-block:: c
- *
* vm_bind {
* struct drm_exec exec;
*
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 50c286c5cee8..ac27e86c601c 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -968,7 +968,7 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
// nul-terminated string.
let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) };
let segments = &[
- &Segment::Binary(url_cstr.as_bytes()),
+ &Segment::Binary(url_cstr.to_bytes()),
&Segment::Numeric(&data_slice[0..data_len]),
];
match EncodedMsg::new(segments, tmp_slice) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 1cf394369127..c0feca58511d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -726,8 +726,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
if (hdmi_dev) {
pdev = hdmi_dev->dev;
- pci_set_drvdata(pdev, NULL);
oaktrail_hdmi_i2c_exit(pdev);
+ pci_set_drvdata(pdev, NULL);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0405396c7750..9ecbb4b99c37 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -596,8 +596,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(display->drm,
- master == INVALID_TRANSCODER);
+ if (drm_WARN_ON(display->drm,
+ master == INVALID_TRANSCODER))
+ master = TRANSCODER_A;
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 41228478b21c..0a3a3f6a5f9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -546,7 +546,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
luminance_range->max_luminance,
panel->vbt.backlight.pwm_freq_hz,
intel_dp->edp_dpcd, &current_level, &current_mode,
- false);
+ panel->backlight.edp.vesa.luminance_control_support);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index e3d188455f67..b9dae15c1d16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -514,6 +514,13 @@ static int __create_shmem(struct drm_i915_private *i915,
if (IS_ERR(filp))
return PTR_ERR(filp);
+ /*
+ * Prevent -EFBIG by allowing large writes beyond MAX_NON_LFS on shmem
+ * objects by setting O_LARGEFILE.
+ */
+ if (force_o_largefile())
+ filp->f_flags |= O_LARGEFILE;
+
obj->filp = filp;
return 0;
}
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 8f17394cc82a..df76653e649a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -886,8 +886,7 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (IS_ERR_OR_NULL(queue))
return;
- if (queue->entity.fence_context)
- drm_sched_entity_destroy(&queue->entity);
+ drm_sched_entity_destroy(&queue->entity);
if (queue->scheduler.ops)
drm_sched_fini(&queue->scheduler);
@@ -3558,11 +3557,6 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
if (!group)
return -EINVAL;
- for (u32 i = 0; i < group->queue_count; i++) {
- if (group->queues[i])
- drm_sched_entity_destroy(&group->queues[i]->entity);
- }
-
mutex_lock(&sched->reset.lock);
mutex_lock(&sched->lock);
group->destroyed = true;
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 81eb046aeebf..b9f67d7a00d8 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum xe_guc_action {
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 0366a9da5977..d7719d0e36ca 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -17,6 +17,7 @@
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
* | | | - `GuC Opt In Feature KLVs`_ |
+ * | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -153,6 +154,30 @@ enum {
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
/**
+ * DOC: GuC Scheduling Policies KLVs
+ *
+ * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
+ *
+ * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
+ * Some platforms do not allow concurrent execution of RCS and CCS
+ * workloads from different address spaces. By default, the GuC prioritizes
+ * RCS submissions over CCS ones, which can lead to CCS workloads being
+ * significantly (or completely) starved of execution time. This KLV allows
+ * the driver to specify a quantum (in ms) and a ratio (percentage value
+ * between 0 and 100), and the GuC will prioritize the CCS for that
+ * percentage of each quantum. For example, specifying 100ms and 30% will
+ * make the GuC prioritize the CCS for 30ms of every 100ms.
+ * Note that this does not necessarly mean that RCS and CCS engines will
+ * only be active for their percentage of the quantum, as the restriction
+ * only kicks in if both classes are fully busy with non-compatible address
+ * spaces; i.e., if one engine is idle or running the same address space,
+ * a pending job on the other engine will still be submitted to the HW no
+ * matter what the ratio is
+ */
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 7484ce55a303..d5dbc51e8612 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -158,8 +158,8 @@ int xe_bo_evict_all(struct xe_device *xe)
if (ret)
return ret;
- ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
- &xe->pinned.late.evicted, xe_bo_evict_pinned);
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external, xe_bo_evict_pinned);
if (!ret)
ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index e9b46a2d0019..58c1f397c68c 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -404,7 +404,7 @@ int __init xe_configfs_init(void)
return 0;
}
-void __exit xe_configfs_exit(void)
+void xe_configfs_exit(void)
{
configfs_unregister_subsystem(&xe_configfs);
}
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index bd9015761aa0..927ee7991696 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -308,15 +308,19 @@ int xe_device_sysfs_init(struct xe_device *xe)
return ret;
}
- if (xe->info.platform == XE_BATTLEMAGE) {
+ if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
if (ret)
- return ret;
+ goto cleanup;
ret = late_bind_create_files(dev);
if (ret)
- return ret;
+ goto cleanup;
}
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
+
+cleanup:
+ xe_device_sysfs_fini(xe);
+ return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 8991b4aed440..c07edcda99c5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -151,6 +151,16 @@ err_lrc:
return err;
}
+static void __xe_exec_queue_fini(struct xe_exec_queue *q)
+{
+ int i;
+
+ q->ops->fini(q);
+
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_put(q->lrc[i]);
+}
+
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags,
@@ -181,11 +191,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
- goto err_post_alloc;
+ goto err_post_init;
}
return q;
+err_post_init:
+ __xe_exec_queue_fini(q);
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
@@ -283,13 +295,11 @@ void xe_exec_queue_destroy(struct kref *ref)
xe_exec_queue_put(eq);
}
- q->ops->fini(q);
+ q->ops->destroy(q);
}
void xe_exec_queue_fini(struct xe_exec_queue *q)
{
- int i;
-
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
* and wakeup any waiters.
@@ -298,9 +308,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
wake_up_var(&q->xef->exec_queue.pending_removal);
- for (i = 0; i < q->width; ++i)
- xe_lrc_put(q->lrc[i]);
-
+ __xe_exec_queue_fini(q);
__xe_exec_queue_free(q);
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index cc1cffb5c87f..1c9d03f2a3e5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -166,8 +166,14 @@ struct xe_exec_queue_ops {
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
- /** @fini: Fini exec queue for submission backend */
+ /** @fini: Undoes the init() for submission backend */
void (*fini)(struct xe_exec_queue *q);
+ /**
+ * @destroy: Destroy exec queue for submission backend. The backend
+ * function must call xe_exec_queue_fini() (which will in turn call the
+ * fini() backend function) to ensure the queue is properly cleaned up.
+ */
+ void (*destroy)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 788f56b066b6..f83d421ac9d3 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -385,10 +385,20 @@ err_free:
return err;
}
-static void execlist_exec_queue_fini_async(struct work_struct *w)
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_execlist_exec_queue *exl = q->execlist;
+
+ drm_sched_entity_fini(&exl->entity);
+ drm_sched_fini(&exl->sched);
+
+ kfree(exl);
+}
+
+static void execlist_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_execlist_exec_queue *ee =
- container_of(w, struct xe_execlist_exec_queue, fini_async);
+ container_of(w, struct xe_execlist_exec_queue, destroy_async);
struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- drm_sched_entity_fini(&exl->entity);
- drm_sched_fini(&exl->sched);
- kfree(exl);
-
xe_exec_queue_fini(q);
}
@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
/* NIY */
}
-static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
- INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
+ queue_work(system_unbound_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.init = execlist_exec_queue_init,
.kill = execlist_exec_queue_kill,
.fini = execlist_exec_queue_fini,
+ .destroy = execlist_exec_queue_destroy,
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index 415140936f11..92c4ba52db0c 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
bool has_run;
- struct work_struct fini_async;
+ struct work_struct destroy_async;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index c8eda36546d3..17634195cdc2 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -41,6 +41,7 @@
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
@@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
- gt->uc.guc.submission_state.enabled = false;
+ xe_guc_submit_disable(&gt->uc.guc);
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 494909f74eb2..d84831a03610 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1632,7 +1632,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
- fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index b1d1d6da3758..270fc3792493 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -880,9 +880,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
return ret;
}
- guc->submission_state.enabled = true;
-
- return 0;
+ return xe_guc_submit_enable(guc);
}
int xe_guc_reset(struct xe_guc *guc)
@@ -1579,7 +1577,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
- guc->submission_state.enabled = false;
+ xe_guc_submit_disable(guc);
}
int xe_guc_reset_prepare(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3f421e2adc0..c30c0e3ccbbb 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
- /** @fini_async: do final fini async from this worker */
- struct work_struct fini_async;
+ /** @destroy_async: do final destroy async from this worker */
+ struct work_struct destroy_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index cafb47711e9b..0104afbc941c 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -32,6 +32,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
+/*
+ * Given that we want to guarantee enough RCS throughput to avoid missing
+ * frames, we set the yield policy to 20% of each 80ms interval.
+ */
+#define RC_YIELD_DURATION 80 /* in ms */
+#define RC_YIELD_RATIO 20 /* in percent */
+static u32 *emit_render_compute_yield_klv(u32 *emit)
+{
+ *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
+ *emit++ = RC_YIELD_DURATION;
+ *emit++ = RC_YIELD_RATIO;
+
+ return emit;
+}
+
+#define SCHEDULING_POLICY_MAX_DWORDS 16
+static int guc_init_global_schedule_policy(struct xe_guc *guc)
+{
+ u32 data[SCHEDULING_POLICY_MAX_DWORDS];
+ u32 *emit = data;
+ u32 count = 0;
+ int ret;
+
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
+ return 0;
+
+ *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+
+ if (CCS_MASK(guc_to_gt(guc)))
+ emit = emit_render_compute_yield_klv(emit);
+
+ count = emit - data;
+ if (count > 1) {
+ xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
+
+ ret = xe_guc_ct_send_block(&guc->ct, data, count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC sheduling policies: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int xe_guc_submit_enable(struct xe_guc *guc)
+{
+ int ret;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ return ret;
+
+ guc->submission_state.enabled = true;
+
+ return 0;
+}
+
+void xe_guc_submit_disable(struct xe_guc *guc)
+{
+ guc->submission_state.enabled = false;
+}
+
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
@@ -1277,48 +1343,57 @@ rearm:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
-static void __guc_exec_queue_fini_async(struct work_struct *w)
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_guc_exec_queue *ge = q->guc;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
+}
+
+static void __guc_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
- container_of(w, struct xe_guc_exec_queue, fini_async);
+ container_of(w, struct xe_guc_exec_queue, destroy_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
- release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- xe_sched_entity_fini(&ge->entity);
- xe_sched_fini(&ge->sched);
- /*
- * RCU free due sched being exported via DRM scheduler fences
- * (timeline name).
- */
- kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
+
xe_pm_runtime_put(guc_to_xe(guc));
}
-static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
- INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+ INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
- __guc_exec_queue_fini_async(&q->guc->fini_async);
+ __guc_exec_queue_destroy_async(&q->guc->destroy_async);
else
- queue_work(xe->destroy_wq, &q->guc->fini_async);
+ queue_work(xe->destroy_wq, &q->guc->destroy_async);
}
-static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
{
/*
* Might be done from within the GPU scheduler, need to do async as we
@@ -1327,7 +1402,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
* this we and don't really care when everything is fini'd, just that it
* is.
*/
- guc_exec_queue_fini_async(q);
+ guc_exec_queue_destroy_async(q);
}
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@@ -1341,7 +1416,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
if (exec_queue_registered(q))
disable_scheduling_deregister(guc, q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@@ -1574,14 +1649,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
#define STATIC_MSG_CLEANUP 0
#define STATIC_MSG_SUSPEND 1
#define STATIC_MSG_RESUME 2
-static void guc_exec_queue_fini(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
- __guc_exec_queue_fini(exec_queue_to_guc(q), q);
+ __guc_exec_queue_destroy(exec_queue_to_guc(q), q);
}
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -1711,6 +1786,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
+ .destroy = guc_exec_queue_destroy,
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@@ -1732,7 +1808,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else if (exec_queue_destroyed(q))
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
if (q->guc->suspend_pending) {
set_exec_queue_suspended(q);
@@ -1989,7 +2065,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 9b71a986c6ca..0d126b807c10 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -13,6 +13,8 @@ struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
+int xe_guc_submit_enable(struct xe_guc *guc);
+void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index c17ed1ae8649..c5b63e10bb91 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val = 0, min, max;
+ u32 reg_val = 0;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
/* Check if PL limits are disabled. */
if (!(reg_val & PWR_LIM_EN)) {
*value = PL_DISABLE;
- drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
- *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+ *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power;
/* For platforms with mailbox power limit support clamping would be done by pcode. */
if (!hwmon->xe->info.has_mbx_power_limits) {
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ u64 pkg_pwr, min, max;
+
+ pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
+ max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
if (min && max)
@@ -493,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- u32 x, y, x_w = 2; /* 2 bits */
- u64 r, tau4, out;
+ u32 reg_val, x, y, x_w = 2; /* 2 bits */
+ u64 tau4, out;
int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
@@ -505,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &reg_val);
if (ret) {
drm_err(&hwmon->xe->drm,
- "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
- channel, power_attr, r, ret);
- r = 0;
+ "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
+ channel, power_attr, reg_val, ret);
+ reg_val = 0;
}
} else {
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel));
}
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
- y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
/*
* tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
index 61b0a1531a53..2cfe9eb67391 100644
--- a/drivers/gpu/drm/xe/xe_nvm.c
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -35,6 +35,10 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
static void xe_nvm_release_dev(struct device *dev)
{
+ struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+ struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
+
+ kfree(nvm);
}
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
@@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe)
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
- kfree(nvm);
xe->nvm = NULL;
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index b804234a6551..9e1236a9ec67 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
kt->tile = tile;
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
- if (err) {
- kobject_put(&kt->base);
- return err;
- }
+ if (err)
+ goto err_object;
tile->sysfs = &kt->base;
err = xe_vram_freq_sysfs_init(tile);
if (err)
- return err;
+ goto err_object;
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
+
+err_object:
+ kobject_put(&kt->base);
+ return err;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index dc4f61e56579..5146999d27fa 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -240,8 +240,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
pfence = xe_preempt_fence_create(q, q->lr.context,
++q->lr.seqno);
- if (!pfence) {
- err = -ENOMEM;
+ if (IS_ERR(pfence)) {
+ err = PTR_ERR(pfence);
goto out_fini;
}
diff --git a/drivers/gpu/nova-core/fb.rs b/drivers/gpu/nova-core/fb.rs
index 4a702525fff4..e4dc74f2f90a 100644
--- a/drivers/gpu/nova-core/fb.rs
+++ b/drivers/gpu/nova-core/fb.rs
@@ -3,6 +3,7 @@
use core::ops::Range;
use kernel::prelude::*;
+use kernel::ptr::{Alignable, Alignment};
use kernel::sizes::*;
use kernel::types::ARef;
use kernel::{dev_warn, device};
@@ -130,10 +131,9 @@ impl FbLayout {
};
let frts = {
- const FRTS_DOWN_ALIGN: u64 = SZ_128K as u64;
+ const FRTS_DOWN_ALIGN: Alignment = Alignment::new::<SZ_128K>();
const FRTS_SIZE: u64 = SZ_1M as u64;
- // TODO[NUMM]: replace with `align_down` once it lands.
- let frts_base = (vga_workspace.start & !(FRTS_DOWN_ALIGN - 1)) - FRTS_SIZE;
+ let frts_base = vga_workspace.start.align_down(FRTS_DOWN_ALIGN) - FRTS_SIZE;
frts_base..frts_base + FRTS_SIZE
};
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index b5c9786619a9..600cc90b5fab 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{device, devres::Devres, error::code::*, pci, prelude::*, sync::Arc};
+use kernel::{device, devres::Devres, error::code::*, fmt, pci, prelude::*, sync::Arc};
use crate::driver::Bar0;
use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
@@ -12,7 +12,6 @@ use crate::gfw;
use crate::regs;
use crate::util;
use crate::vbios::Vbios;
-use core::fmt;
macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
index a3e6de1779d4..6b9df4205f46 100644
--- a/drivers/gpu/nova-core/regs/macros.rs
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -149,10 +149,10 @@ macro_rules! register {
// TODO[REGA]: display the raw hex value, then the value of all the fields. This requires
// matching the fields, which will complexify the syntax considerably...
- impl ::core::fmt::Debug for $name {
- fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
+ impl ::kernel::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::kernel::fmt::Formatter<'_>) -> ::kernel::fmt::Result {
f.debug_tuple(stringify!($name))
- .field(&format_args!("0x{0:x}", &self.0))
+ .field(&::kernel::prelude::fmt!("0x{0:x}", &self.0))
.finish()
}
}
diff --git a/drivers/gpu/nova-core/vbios.rs b/drivers/gpu/nova-core/vbios.rs
index 5b5d9f38cbb3..091642d6a5a1 100644
--- a/drivers/gpu/nova-core/vbios.rs
+++ b/drivers/gpu/nova-core/vbios.rs
@@ -10,6 +10,7 @@ use kernel::device;
use kernel::error::Result;
use kernel::pci;
use kernel::prelude::*;
+use kernel::ptr::{Alignable, Alignment};
/// The offset of the VBIOS ROM in the BAR0 space.
const ROM_OFFSET: usize = 0x300000;
@@ -177,8 +178,7 @@ impl<'a> Iterator for VbiosIterator<'a> {
// Advance to next image (aligned to 512 bytes).
self.current_offset += image_size;
- // TODO[NUMM]: replace with `align_up` once it lands.
- self.current_offset = self.current_offset.next_multiple_of(512);
+ self.current_offset = self.current_offset.align_up(Alignment::new::<512>())?;
Some(Ok(full_image))
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 79997553d8f9..b934523593d9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -597,8 +597,6 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
- depends on ACPI
- select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 0f2cbae39b2b..7017bfa59093 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
struct amdtp_hid_data *hid_data = hid->driver_data;
struct amdtp_cl_data *cli_data = hid_data->cli_data;
struct request_list *req_list = &cli_data->req_list;
+ struct amd_input_data *in_data = cli_data->in_data;
+ struct amd_mp2_dev *mp2;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->hid_sensor_hubs[i] == hid) {
struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work)
u8 report_id, node_type;
u8 report_size = 0;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
req_node = list_last_entry(&req_list->list, struct request_list, list);
list_del(&req_node->list);
current_index = req_node->current_index;
@@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work)
node_type = req_node->report_type;
kfree(req_node);
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
mp2_ops = mp2->mp2_ops;
if (node_type == HID_FEATURE_REPORT) {
report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
@@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work)
cli_data->cur_hid_dev = current_index;
cli_data->sensor_requested_cnt[current_index] = 0;
amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
+ if (!list_empty(&req_list->list))
+ schedule_delayed_work(&cli_data->work, 0);
}
void amd_sfh_work_buffer(struct work_struct *work)
@@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work)
u8 report_size;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
cli_data->report_id[i], in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index f44a3bb2fbd4..78f830c133e5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -10,6 +10,7 @@
#ifndef AMD_SFH_COMMON_H
#define AMD_SFH_COMMON_H
+#include <linux/mutex.h>
#include <linux/pci.h>
#include "amd_sfh_hid.h"
@@ -59,6 +60,8 @@ struct amd_mp2_dev {
u32 mp2_acs;
struct sfh_dev_status dev_en;
struct work_struct work;
+ /* mp2 to protect data */
+ struct mutex lock;
u8 init_done;
u8 rver;
};
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2983af969579..1d9f955573aa 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -466,6 +466,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
+ rc = devm_mutex_init(&pdev->dev, &privdata->lock);
+ if (rc)
+ return rc;
+
privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
if (privdata->sfh1_1_ops) {
if (boot_cpu_data.x86 >= 0x1A)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index d27dcfb2b9e4..8db9d4e7c3b0 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -974,7 +974,10 @@ static int asus_input_mapping(struct hid_device *hdev,
case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break;
case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break;
case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break;
+ case 0x4e: asus_map_key_clear(KEY_FN_ESC); break;
+ case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break;
+ case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */
case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */
case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */
case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 482f62a78c41..5a95ea3bec98 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -229,10 +229,12 @@ static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
- if (ret < 0)
+ if (ret != CP2112_GPIO_SET_LENGTH) {
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
+ }
- return ret;
+ return 0;
}
static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
@@ -309,9 +311,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set_unlocked(dev, offset, value);
-
- return 0;
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index b3121fa7a72d..654879814f97 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,8 +32,6 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
-#include <linux/platform_profile.h>
-
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
@@ -734,7 +732,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
report_key_event(input, KEY_RFKILL);
return 1;
}
- platform_profile_cycle();
+ report_key_event(input, KEY_PERFORMANCE);
return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index 854926b3cfd4..a2643ae790d6 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -997,6 +997,8 @@ static const struct pci_device_id quicki2c_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ }
};
MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
index d412eafcf9ea..4e60a7de4727 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -13,6 +13,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A
/* Packet size value, the unit is 16 bytes */
#define MAX_PACKET_SIZE_VALUE_LNL 256
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 5e5f179dd113..84314989dc53 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -976,6 +976,8 @@ static const struct pci_device_id quickspi_pci_tbl[] = {
{PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), },
{}
};
MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
index 6fdf674b21c5..f3532d866749 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -19,6 +19,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B
/* HIDSPI special ACPI parameters DSM methods */
#define ACPI_QUICKSPI_REVISION_NUM 2
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 8267dd1a2130..8f426f94e32a 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -23,7 +23,8 @@
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/*
* A padding packet that will help the user space tools
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 9c164a4b9bb9..b0ee9ac45a97 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -386,7 +386,7 @@ static int riic_init_hw(struct riic_dev *riic)
*/
total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
- for (cks = 0; cks < 7; cks++) {
+ for (cks = 0; cks <= 7; cks++) {
/*
* 60% low time must be less than BRL + 2 + 1
* BRL max register value is 0x1F.
diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
index 9e1f71fed0fe..af991b28e4f8 100644
--- a/drivers/i2c/busses/i2c-rtl9300.c
+++ b/drivers/i2c/busses/i2c-rtl9300.c
@@ -307,8 +307,7 @@ out_unlock:
static u32 rtl9300_i2c_func(struct i2c_adapter *a)
{
return I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
+ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm rtl9300_i2c_algo = {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 028d9f031dde..8b506417ad2f 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -233,6 +233,7 @@ static u16 get_legacy_obj_type(u16 opcode)
{
switch (opcode) {
case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
return MLX5_EVENT_QUEUE_TYPE_RQ;
case MLX5_CMD_OP_CREATE_QP:
return MLX5_EVENT_QUEUE_TYPE_QP;
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5219d7ddfdaa..95f63c5f6159 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -555,6 +555,7 @@ struct gcr3_tbl_info {
};
struct amd_io_pgtable {
+ seqcount_t seqcount; /* Protects root/mode update */
struct io_pgtable pgtbl;
int mode;
u64 *root;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 8de689b2c5ed..ba9e582a8bbe 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1455,12 +1455,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(e->devid));
devid = e->devid;
- for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias)
+ if (alias) {
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i)
pci_seg->alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
- set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -3067,7 +3067,8 @@ static int __init early_amd_iommu_init(void)
if (!boot_cpu_has(X86_FEATURE_CX16)) {
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index a91e71f981ef..70c2f5b1631b 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/seqlock.h>
#include <asm/barrier.h>
@@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
+ write_seqcount_begin(&pgtable->seqcount);
pgtable->root = pte;
pgtable->mode += 1;
+ write_seqcount_end(&pgtable->seqcount);
+
amd_iommu_update_and_flush_device_table(domain);
pte = NULL;
@@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
{
unsigned long last_addr = address + (page_size - 1);
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+ unsigned int seqcount;
int level, end_lvl;
u64 *pte, *page;
@@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
}
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
+
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long *page_size)
{
int level;
+ unsigned int seqcount;
u64 *pte;
*page_size = 0;
@@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
+ seqcount_init(&pgtable->seqcount);
cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9c3ab9d9f69a..dff2d895b8ab 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long lvl_pages = lvl_to_nr_pages(level);
struct dma_pte *pte = NULL;
+ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
+ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
+ return;
+
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
@@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
- pages_to_remove = min_t(unsigned long, nr_pages,
+ pages_to_remove = min_t(unsigned long,
+ round_down(nr_pages, lvl_pages),
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 65fbd098f9e9..4c842368289f 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -711,6 +711,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&igroup->lock);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
+
/* Caller must destroy hwpt */
return hwpt;
}
@@ -1057,7 +1059,6 @@ void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid)
hwpt = iommufd_hw_pagetable_detach(idev, pasid);
if (!hwpt)
return;
- iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, "IOMMUFD");
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index fc4de63b0bce..e23d9ee4fe38 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -393,12 +393,12 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
const struct file_operations *fops)
{
struct file *filep;
- int fdno;
spin_lock_init(&eventq->lock);
INIT_LIST_HEAD(&eventq->deliver);
init_waitqueue_head(&eventq->wait_queue);
+ /* The filep is fput() by the core code during failure */
filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
if (IS_ERR(filep))
return PTR_ERR(filep);
@@ -408,10 +408,7 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
eventq->filep = filep;
refcount_inc(&eventq->obj.users);
- fdno = get_unused_fd_flags(O_CLOEXEC);
- if (fdno < 0)
- fput(filep);
- return fdno;
+ return get_unused_fd_flags(O_CLOEXEC);
}
static const struct file_operations iommufd_fault_fops =
@@ -452,7 +449,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
return 0;
out_put_fdno:
put_unused_fd(fdno);
- fput(fault->common.filep);
return rc;
}
@@ -536,7 +532,6 @@ int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
out_put_fdno:
put_unused_fd(fdno);
- fput(veventq->common.filep);
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
out_unlock_veventqs:
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0da2a81eedfa..627f9b78483a 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -454,9 +454,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
- lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
-
if (hwpt_paging->auto_domain) {
+ lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
return;
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 15af7ced0501..ce775fbbae94 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -23,6 +23,7 @@
#include "iommufd_test.h"
struct iommufd_object_ops {
+ size_t file_offset;
void (*pre_destroy)(struct iommufd_object *obj);
void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
@@ -121,6 +122,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
old = xas_store(&xas, NULL);
xa_unlock(&ictx->objects);
WARN_ON(old != XA_ZERO_ENTRY);
+
+ if (WARN_ON(!refcount_dec_and_test(&obj->users)))
+ return;
+
kfree(obj);
}
@@ -131,10 +136,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
- if (iommufd_object_ops[obj->type].abort)
- iommufd_object_ops[obj->type].abort(obj);
+ const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type];
+
+ if (ops->file_offset) {
+ struct file **filep = ((void *)obj) + ops->file_offset;
+
+ /*
+ * A file should hold a users refcount while the file is open
+ * and put it back in its release. The file should hold a
+ * pointer to obj in their private data. Normal fput() is
+ * deferred to a workqueue and can get out of order with the
+ * following kfree(obj). Using the sync version ensures the
+ * release happens immediately. During abort we require the file
+ * refcount is one at this point - meaning the object alloc
+ * function cannot do anything to allow another thread to take a
+ * refcount prior to a guaranteed success.
+ */
+ if (*filep)
+ __fput_sync(*filep);
+ }
+
+ if (ops->abort)
+ ops->abort(obj);
else
- iommufd_object_ops[obj->type].destroy(obj);
+ ops->destroy(obj);
iommufd_object_abort(ictx, obj);
}
@@ -550,16 +575,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_EXEC)
return -EPERM;
+ mtree_lock(&ictx->mt_mmap);
/* vma->vm_pgoff carries a page-shifted start position to an immap */
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
- if (!immap)
+ if (!immap || !refcount_inc_not_zero(&immap->owner->users)) {
+ mtree_unlock(&ictx->mt_mmap);
return -ENXIO;
+ }
+ mtree_unlock(&ictx->mt_mmap);
+
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
- if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
- return -ENXIO;
+ if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
+ rc = -ENXIO;
+ goto err_refcount;
+ }
vma->vm_pgoff = 0;
vma->vm_private_data = immap;
@@ -570,10 +602,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
immap->mmio_addr >> PAGE_SHIFT, length,
vma->vm_page_prot);
if (rc)
- return rc;
+ goto err_refcount;
+ return 0;
- /* vm_ops.open won't be called for mmap itself. */
- refcount_inc(&immap->owner->users);
+err_refcount:
+ refcount_dec(&immap->owner->users);
return rc;
}
@@ -651,6 +684,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
}
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD");
+#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \
+ .file_offset = (offsetof(_struct, _filep) + \
+ BUILD_BUG_ON_ZERO(!__same_type( \
+ struct file *, ((_struct *)NULL)->_filep)) + \
+ BUILD_BUG_ON_ZERO(offsetof(_struct, _obj)))
+
static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_ACCESS] = {
.destroy = iommufd_access_destroy_object,
@@ -661,6 +700,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_FAULT] = {
.destroy = iommufd_fault_destroy,
+ IOMMUFD_FILE_OFFSET(struct iommufd_fault, common.filep, common.obj),
},
[IOMMUFD_OBJ_HW_QUEUE] = {
.destroy = iommufd_hw_queue_destroy,
@@ -683,6 +723,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_VEVENTQ] = {
.destroy = iommufd_veventq_destroy,
.abort = iommufd_veventq_abort,
+ IOMMUFD_FILE_OFFSET(struct iommufd_veventq, common.filep, common.obj),
},
[IOMMUFD_OBJ_VIOMMU] = {
.destroy = iommufd_viommu_destroy,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 9c80d61deb2c..aa576736d60b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
}
}
+static bool reg_ioat_propagate_error(int cc, u8 status)
+{
+ /*
+ * If the device is in the error state the reset routine
+ * will register the IOAT of the newly set domain on re-enable
+ */
+ if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return false;
+ /*
+ * If the device was removed treat registration as success
+ * and let the subsequent error event trigger tear down.
+ */
+ if (cc == ZPCI_CC_INVAL_HANDLE)
+ return false;
+ return cc != ZPCI_CC_OK;
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -696,7 +713,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev->dma_table = s390_domain->dma_table;
zdev_s390_domain_update(zdev, domain);
@@ -1032,7 +1049,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
lockdep_assert_held(&zdev->dom_lock);
- if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
return NULL;
s390_domain = to_s390_domain(zdev->s390_domain);
@@ -1123,12 +1141,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
-
- /*
- * If the device is undergoing error recovery the reset code
- * will re-establish the new domain.
- */
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev_s390_domain_update(zdev, domain);
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 1c7045467c48..5584e0f82cce 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -1,61 +1,78 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Aspeed AST24XX, AST25XX, AST26XX, and AST27XX SCU Interrupt Controller
* Copyright 2019 IBM Corporation
*
* Eddie James <eajames@linux.ibm.com>
*/
#include <linux/bitops.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#define ASPEED_SCU_IC_REG 0x018
-#define ASPEED_SCU_IC_SHIFT 0
-#define ASPEED_SCU_IC_ENABLE GENMASK(15, ASPEED_SCU_IC_SHIFT)
-#define ASPEED_SCU_IC_NUM_IRQS 7
#define ASPEED_SCU_IC_STATUS GENMASK(28, 16)
#define ASPEED_SCU_IC_STATUS_SHIFT 16
+#define AST2700_SCU_IC_STATUS GENMASK(15, 0)
+
+struct aspeed_scu_ic_variant {
+ const char *compatible;
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned long ier;
+ unsigned long isr;
+};
-#define ASPEED_AST2600_SCU_IC0_REG 0x560
-#define ASPEED_AST2600_SCU_IC0_SHIFT 0
-#define ASPEED_AST2600_SCU_IC0_ENABLE \
- GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
-#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+#define SCU_VARIANT(_compat, _shift, _enable, _num, _ier, _isr) { \
+ .compatible = _compat, \
+ .irq_shift = _shift, \
+ .irq_enable = _enable, \
+ .num_irqs = _num, \
+ .ier = _ier, \
+ .isr = _isr, \
+}
-#define ASPEED_AST2600_SCU_IC1_REG 0x570
-#define ASPEED_AST2600_SCU_IC1_SHIFT 4
-#define ASPEED_AST2600_SCU_IC1_ENABLE \
- GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
-#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+static const struct aspeed_scu_ic_variant scu_ic_variants[] __initconst = {
+ SCU_VARIANT("aspeed,ast2400-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2500-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2600-scu-ic0", 0, GENMASK(5, 0), 6, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2600-scu-ic1", 4, GENMASK(5, 4), 2, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2700-scu-ic0", 0, GENMASK(3, 0), 4, 0x00, 0x04),
+ SCU_VARIANT("aspeed,ast2700-scu-ic1", 0, GENMASK(3, 0), 4, 0x00, 0x04),
+ SCU_VARIANT("aspeed,ast2700-scu-ic2", 0, GENMASK(3, 0), 4, 0x04, 0x00),
+ SCU_VARIANT("aspeed,ast2700-scu-ic3", 0, GENMASK(1, 0), 2, 0x04, 0x00),
+};
struct aspeed_scu_ic {
- unsigned long irq_enable;
- unsigned long irq_shift;
- unsigned int num_irqs;
- unsigned int reg;
- struct regmap *scu;
- struct irq_domain *irq_domain;
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ unsigned long ier;
+ unsigned long isr;
};
-static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+static inline bool scu_has_split_isr(struct aspeed_scu_ic *scu)
+{
+ return scu->ier != scu->isr;
+}
+
+static void aspeed_scu_ic_irq_handler_combined(struct irq_desc *desc)
{
- unsigned int sts;
- unsigned long bit;
- unsigned long enabled;
- unsigned long max;
- unsigned long status;
struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+ unsigned long bit, enabled, max, status;
+ unsigned int sts, mask;
chained_irq_enter(chip, desc);
+ mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
/*
* The SCU IC has just one register to control its operation and read
* status. The interrupt enable bits occupy the lower 16 bits of the
@@ -66,7 +83,7 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
* shifting the status down to get the mapping and then back up to
* clear the bit.
*/
- regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ sts = readl(scu_ic->base);
enabled = sts & scu_ic->irq_enable;
status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
@@ -74,43 +91,83 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
max = scu_ic->num_irqs + bit;
for_each_set_bit_from(bit, &status, max) {
- generic_handle_domain_irq(scu_ic->irq_domain,
- bit - scu_ic->irq_shift);
+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
+ writel((readl(scu_ic->base) & ~mask) | BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT),
+ scu_ic->base);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_handler_split(struct irq_desc *desc)
+{
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long bit, enabled, max, status;
+ unsigned int sts, mask;
- regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ chained_irq_enter(chip, desc);
+
+ mask = scu_ic->irq_enable;
+ sts = readl(scu_ic->base + scu_ic->isr);
+ enabled = sts & scu_ic->irq_enable;
+ sts = readl(scu_ic->base + scu_ic->isr);
+ status = sts & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
+ /* Clear interrupt */
+ writel(BIT(bit), scu_ic->base + scu_ic->isr);
}
chained_irq_exit(chip, desc);
}
-static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+static void aspeed_scu_ic_irq_mask_combined(struct irq_data *data)
{
struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
- unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
/*
* Status bits are cleared by writing 1. In order to prevent the mask
* operation from clearing the status bits, they should be under the
* mask and written with 0.
*/
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+ writel(readl(scu_ic->base) & ~mask, scu_ic->base);
}
-static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+static void aspeed_scu_ic_irq_unmask_combined(struct irq_data *data)
{
struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
- unsigned int mask = bit |
- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+ unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
/*
* Status bits are cleared by writing 1. In order to prevent the unmask
* operation from clearing the status bits, they should be under the
* mask and written with 0.
*/
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+ writel((readl(scu_ic->base) & ~mask) | bit, scu_ic->base);
+}
+
+static void aspeed_scu_ic_irq_mask_split(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift);
+
+ writel(readl(scu_ic->base) & ~mask, scu_ic->base + scu_ic->ier);
+}
+
+static void aspeed_scu_ic_irq_unmask_split(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+
+ writel(readl(scu_ic->base) | bit, scu_ic->base + scu_ic->ier);
}
static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
@@ -120,17 +177,29 @@ static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
return -EINVAL;
}
-static struct irq_chip aspeed_scu_ic_chip = {
+static struct irq_chip aspeed_scu_ic_chip_combined = {
.name = "aspeed-scu-ic",
- .irq_mask = aspeed_scu_ic_irq_mask,
- .irq_unmask = aspeed_scu_ic_irq_unmask,
- .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+ .irq_mask = aspeed_scu_ic_irq_mask_combined,
+ .irq_unmask = aspeed_scu_ic_irq_unmask_combined,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static struct irq_chip aspeed_scu_ic_chip_split = {
+ .name = "ast2700-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask_split,
+ .irq_unmask = aspeed_scu_ic_irq_unmask_split,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
};
static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ struct aspeed_scu_ic *scu_ic = domain->host_data;
+
+ if (scu_has_split_isr(scu_ic))
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_split, handle_level_irq);
+ else
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_combined, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
@@ -143,21 +212,21 @@ static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
struct device_node *node)
{
- int irq;
- int rc = 0;
+ int irq, rc = 0;
- if (!node->parent) {
- rc = -ENODEV;
+ scu_ic->base = of_iomap(node, 0);
+ if (IS_ERR(scu_ic->base)) {
+ rc = PTR_ERR(scu_ic->base);
goto err;
}
- scu_ic->scu = syscon_node_to_regmap(node->parent);
- if (IS_ERR(scu_ic->scu)) {
- rc = PTR_ERR(scu_ic->scu);
- goto err;
+ if (scu_has_split_isr(scu_ic)) {
+ writel(AST2700_SCU_IC_STATUS, scu_ic->base + scu_ic->isr);
+ writel(0, scu_ic->base + scu_ic->ier);
+ } else {
+ writel(ASPEED_SCU_IC_STATUS, scu_ic->base);
+ writel(0, scu_ic->base);
}
- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
@@ -166,75 +235,60 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
}
scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs,
- &aspeed_scu_ic_domain_ops,
- scu_ic);
+ &aspeed_scu_ic_domain_ops, scu_ic);
if (!scu_ic->irq_domain) {
rc = -ENOMEM;
goto err;
}
- irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ irq_set_chained_handler_and_data(irq, scu_has_split_isr(scu_ic) ?
+ aspeed_scu_ic_irq_handler_split :
+ aspeed_scu_ic_irq_handler_combined,
scu_ic);
return 0;
err:
kfree(scu_ic);
-
return rc;
}
-static int __init aspeed_scu_ic_of_init(struct device_node *node,
- struct device_node *parent)
+static const struct aspeed_scu_ic_variant *aspeed_scu_ic_find_variant(struct device_node *np)
{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-
- if (!scu_ic)
- return -ENOMEM;
-
- scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
- scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
- scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
- scu_ic->reg = ASPEED_SCU_IC_REG;
-
- return aspeed_scu_ic_of_init_common(scu_ic, node);
+ for (int i = 0; i < ARRAY_SIZE(scu_ic_variants); i++) {
+ if (of_device_is_compatible(np, scu_ic_variants[i].compatible))
+ return &scu_ic_variants[i];
+ }
+ return NULL;
}
-static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init aspeed_scu_ic_of_init(struct device_node *node, struct device_node *parent)
{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+ const struct aspeed_scu_ic_variant *variant;
+ struct aspeed_scu_ic *scu_ic;
- if (!scu_ic)
- return -ENOMEM;
-
- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
- scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
-
- return aspeed_scu_ic_of_init_common(scu_ic, node);
-}
-
-static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
- struct device_node *parent)
-{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+ variant = aspeed_scu_ic_find_variant(node);
+ if (!variant)
+ return -ENODEV;
+ scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
if (!scu_ic)
return -ENOMEM;
- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
- scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+ scu_ic->irq_enable = variant->irq_enable;
+ scu_ic->irq_shift = variant->irq_shift;
+ scu_ic->num_irqs = variant->num_irqs;
+ scu_ic->ier = variant->ier;
+ scu_ic->isr = variant->isr;
return aspeed_scu_ic_of_init_common(scu_ic, node);
}
IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
-IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
- aspeed_ast2600_scu_ic0_of_init);
-IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
- aspeed_ast2600_scu_ic1_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3", aspeed_scu_ic_of_init);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 24ef5af569fe..8a3410c2b7b5 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -153,14 +153,19 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
{
msi_alloc_info_t *info = args;
struct v2m_data *v2m = NULL, *tmp;
- int hwirq, offset, i, err = 0;
+ int hwirq, i, err = 0;
+ unsigned long offset;
+ unsigned long align_mask = nr_irqs - 1;
spin_lock(&v2m_lock);
list_for_each_entry(tmp, &v2m_nodes, entry) {
- offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
- get_count_order(nr_irqs));
- if (offset >= 0) {
+ unsigned long align_off = tmp->spi_start - (tmp->spi_start & ~align_mask);
+
+ offset = bitmap_find_next_zero_area_off(tmp->bm, tmp->nr_spis, 0,
+ nr_irqs, align_mask, align_off);
+ if (offset < tmp->nr_spis) {
v2m = tmp;
+ bitmap_set(v2m->bm, offset, nr_irqs);
break;
}
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index dbeb85677b08..3de351e66ee8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1766,8 +1766,9 @@ static int gic_irq_domain_select(struct irq_domain *d,
struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
- unsigned int type, ret, ppi_idx;
+ unsigned int type, ppi_idx;
irq_hw_number_t hwirq;
+ int ret;
/* Not for us */
if (fwspec->fwnode != d->fwnode)
diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c
index 13c035727e32..ce2732d649a3 100644
--- a/drivers/irqchip/irq-gic-v5-irs.c
+++ b/drivers/irqchip/irq-gic-v5-irs.c
@@ -571,7 +571,7 @@ static void __init gicv5_irs_init_bases(struct gicv5_irs_chip_data *irs_data,
FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_NO_READ_ALLOC) |
FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_NON_CACHE) |
FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_NON_CACHE);
- irs_data->flags |= IRS_FLAGS_NON_COHERENT;
+ irs_data->flags |= IRS_FLAGS_NON_COHERENT;
} else {
cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_WRITE_ALLOC) |
FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_READ_ALLOC) |
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 9290ac741949..554485f0be1f 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -191,9 +191,9 @@ static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
unsigned int num_events)
{
unsigned int l1_bits, l2_bits, span, events_per_l2_table;
- unsigned int i, complete_tables, final_span, num_ents;
+ unsigned int complete_tables, final_span, num_ents;
__le64 *itt_l1, *itt_l2, **l2ptrs;
- int ret;
+ int i, ret;
u64 val;
ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
@@ -768,8 +768,6 @@ static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *
goto out_dev_free;
}
- gicv5_its_device_cache_inv(its, its_dev);
-
its_dev->its_node = its;
its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
@@ -949,15 +947,18 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
device_id = its_dev->device_id;
for (i = 0; i < nr_irqs; i++) {
- lpi = gicv5_alloc_lpi();
+ ret = gicv5_alloc_lpi();
if (ret < 0) {
pr_debug("Failed to find free LPI!\n");
- goto out_eventid;
+ goto out_free_irqs;
}
+ lpi = ret;
ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
- if (ret)
- goto out_free_lpi;
+ if (ret) {
+ gicv5_free_lpi(lpi);
+ goto out_free_irqs;
+ }
/*
* Store eventid and deviceid into the hwirq for later use.
@@ -977,8 +978,13 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
return 0;
-out_free_lpi:
- gicv5_free_lpi(lpi);
+out_free_irqs:
+ while (--i >= 0) {
+ irqd = irq_domain_get_irq_data(domain, virq + i);
+ gicv5_free_lpi(irqd->parent_data->hwirq);
+ irq_domain_reset_irq_data(irqd);
+ irq_domain_free_irqs_parent(domain, virq + i, 1);
+ }
out_eventid:
gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
return ret;
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b2860eb2d32c..39e5a72ccd3c 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -46,6 +46,7 @@
#define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f))
#define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2))
#define EIOINTC_USE_CPU_ENCODE BIT(0)
+#define EIOINTC_ROUTE_MULT_IP BIT(1)
#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
@@ -59,6 +60,14 @@
#define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector))
static int nr_pics;
+struct eiointc_priv;
+
+struct eiointc_ip_route {
+ struct eiointc_priv *priv;
+ /* Offset Routed destination IP */
+ int start;
+ int end;
+};
struct eiointc_priv {
u32 node;
@@ -68,6 +77,8 @@ struct eiointc_priv {
struct fwnode_handle *domain_handle;
struct irq_domain *eiointc_domain;
int flags;
+ irq_hw_number_t parent_hwirq;
+ struct eiointc_ip_route route_info[VEC_REG_COUNT];
};
static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
@@ -188,6 +199,7 @@ static int eiointc_router_init(unsigned int cpu)
{
int i, bit, cores, index, node;
unsigned int data;
+ int hwirq, mask;
node = cpu_to_eio_node(cpu);
index = eiointc_index(node);
@@ -197,6 +209,13 @@ static int eiointc_router_init(unsigned int cpu)
return -EINVAL;
}
+ /* Enable cpu interrupt pin from eiointc */
+ hwirq = eiointc_priv[index]->parent_hwirq;
+ mask = BIT(hwirq);
+ if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP)
+ mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3);
+ set_csr_ecfg(mask);
+
if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE))
cores = CORES_PER_EIO_NODE;
else
@@ -211,8 +230,31 @@ static int eiointc_router_init(unsigned int cpu)
}
for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
- bit = BIT(1 + index); /* Route to IP[1 + index] */
- data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ /*
+ * Route to interrupt pin, relative offset used here
+ * Offset 0 means routing to IP0 and so on
+ *
+ * If flags is set with EIOINTC_ROUTE_MULT_IP,
+ * every 64 vector routes to different consecutive
+ * IPs, otherwise all vector routes to the same IP
+ */
+ if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) {
+ /* The first 64 vectors route to hwirq */
+ bit = BIT(hwirq++ - INT_HWI0);
+ data = bit | (bit << 8);
+
+ /* The second 64 vectors route to hwirq + 1 */
+ bit = BIT(hwirq++ - INT_HWI0);
+ data |= (bit << 16) | (bit << 24);
+
+ /*
+ * Route to hwirq + 2/hwirq + 3 separately
+ * in next loop
+ */
+ } else {
+ bit = BIT(hwirq - INT_HWI0);
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ }
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
}
@@ -241,15 +283,22 @@ static int eiointc_router_init(unsigned int cpu)
static void eiointc_irq_dispatch(struct irq_desc *desc)
{
- int i;
- u64 pending;
- bool handled = false;
+ struct eiointc_ip_route *info = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
+ bool handled = false;
+ u64 pending;
+ int i;
chained_irq_enter(chip, desc);
- for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
+ /*
+ * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in
+ * eiointc interrupt controller routes to different cpu interrupt pins
+ *
+ * Every cpu interrupt pin has its own irq handler, it is ok to
+ * read ISR for these 64 interrupt vectors rather than all vectors
+ */
+ for (i = info->start; i < info->end; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
/* Skip handling if pending bitmap is zero */
@@ -262,7 +311,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
int bit = __ffs(pending);
int irq = bit + VEC_COUNT_PER_REG * i;
- generic_handle_domain_irq(priv->eiointc_domain, irq);
+ generic_handle_domain_irq(info->priv->eiointc_domain, irq);
pending &= ~BIT(bit);
handled = true;
}
@@ -462,8 +511,33 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
}
eiointc_priv[nr_pics++] = priv;
+ /*
+ * Only the first eiointc device on VM supports routing to
+ * different CPU interrupt pins. The later eiointc devices use
+ * generic method if there are multiple eiointc devices in future
+ */
+ if (cpu_has_hypervisor && (nr_pics == 1)) {
+ priv->flags |= EIOINTC_ROUTE_MULT_IP;
+ priv->parent_hwirq = INT_HWI0;
+ }
+
+ if (priv->flags & EIOINTC_ROUTE_MULT_IP) {
+ for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) {
+ priv->route_info[i].start = priv->parent_hwirq - INT_HWI0 + i;
+ priv->route_info[i].end = priv->route_info[i].start + 1;
+ priv->route_info[i].priv = priv;
+ parent_irq = get_percpu_irq(priv->parent_hwirq + i);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
+ &priv->route_info[i]);
+ }
+ } else {
+ priv->route_info[0].start = 0;
+ priv->route_info[0].end = priv->vec_count / VEC_COUNT_PER_REG;
+ priv->route_info[0].priv = priv;
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
+ &priv->route_info[0]);
+ }
eiointc_router_init(0);
- irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
if (nr_pics == 1) {
register_syscore_ops(&eiointc_syscore_ops);
@@ -495,7 +569,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
priv->vec_count = VEC_COUNT;
priv->node = acpi_eiointc->node;
-
+ priv->parent_hwirq = acpi_eiointc->cascade;
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
@@ -527,8 +601,9 @@ out_free_priv:
static int __init eiointc_of_init(struct device_node *of_node,
struct device_node *parent)
{
- int parent_irq, ret;
struct eiointc_priv *priv;
+ struct irq_data *irq_data;
+ int parent_irq, ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -544,6 +619,12 @@ static int __init eiointc_of_init(struct device_node *of_node,
if (ret < 0)
goto out_free_priv;
+ irq_data = irq_get_irq_data(parent_irq);
+ if (!irq_data) {
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
/*
* In particular, the number of devices supported by the LS2K0500
* extended I/O interrupt vector is 128.
@@ -552,7 +633,7 @@ static int __init eiointc_of_init(struct device_node *of_node,
priv->vec_count = 128;
else
priv->vec_count = VEC_COUNT;
-
+ priv->parent_hwirq = irqd_to_hwirq(irq_data);
priv->node = 0;
priv->domain_handle = of_fwnode_handle(of_node);
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
index 2d4c3ec128b8..912bf50a5c7c 100644
--- a/drivers/irqchip/irq-loongson-pch-lpc.c
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -200,8 +200,13 @@ int __init pch_lpc_acpi_init(struct irq_domain *parent,
goto iounmap_base;
}
- priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
- &pch_lpc_domain_ops, priv);
+ /*
+ * The LPC interrupt controller is a legacy i8259-compatible device,
+ * which requires a static 1:1 mapping for IRQs 0-15.
+ * Use irq_domain_create_legacy to establish this static mapping early.
+ */
+ priv->lpc_domain = irq_domain_create_legacy(irq_handle, LPC_COUNT, 0, 0,
+ &pch_lpc_domain_ops, priv);
if (!priv->lpc_domain) {
pr_err("Failed to create IRQ domain\n");
goto free_irq_handle;
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 908944009c21..d5eefc3d7215 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -112,6 +112,20 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
*/
if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
chip->irq_set_affinity = msi_domain_set_affinity;
+
+ /*
+ * If the parent domain insists on being in charge of masking, obey
+ * blindly. The interrupt is un-masked at the PCI level on startup
+ * and masked on shutdown to prevent rogue interrupts after the
+ * driver freed the interrupt. Not masking it at the PCI level
+ * speeds up operation for disable/enable_irq() as it avoids
+ * getting all the way out to the PCI device.
+ */
+ if (info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT) {
+ chip->irq_mask = irq_chip_mask_parent;
+ chip->irq_unmask = irq_chip_unmask_parent;
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 76e11cac9631..2191a2b79578 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -73,8 +73,9 @@ static int __init nvic_of_init(struct device_node *node,
struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
- unsigned int irqs, i, ret, numbanks;
+ unsigned int irqs, i, numbanks;
void __iomem *nvic_base;
+ int ret;
numbanks = (readl_relaxed(V7M_SCS_ICTR) &
V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index a697eb55ac90..6047a524ac77 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -142,11 +142,12 @@ static const struct irq_domain_ops rza1_irqc_domain_ops = {
static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
struct device_node *gic_node)
{
- unsigned int imaplen, i, j, ret;
struct device *dev = priv->dev;
+ unsigned int imaplen, i, j;
struct device_node *ipar;
const __be32 *imap;
u32 intsize;
+ int ret;
imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
if (!imap)
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 360d88687e4f..2a54adeb4cc7 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -578,7 +578,7 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
&rzg2l_irqc_domain_ops, rzg2l_irqc_data);
if (!irq_domain) {
pm_runtime_put(dev);
- return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
+ return -ENOMEM;
}
register_syscore_ops(&rzg2l_irqc_syscore_ops);
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index bcfddc51bc6a..f7cf0dc72eab 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -30,6 +30,7 @@ struct sg204x_msi_chip_info {
* @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET
* @irq_first: First vectors number that MSIs starts
* @num_irqs: Number of vectors for MSIs
+ * @irq_type: IRQ type for MSIs
* @msi_map: mapping for allocated MSI vectors.
* @msi_map_lock: Lock for msi_map
* @chip_info: chip specific infomations
@@ -41,6 +42,7 @@ struct sg204x_msi_chipdata {
u32 irq_first;
u32 num_irqs;
+ unsigned int irq_type;
unsigned long *msi_map;
struct mutex msi_map_lock;
@@ -85,6 +87,8 @@ static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static const struct irq_chip sg2042_msi_middle_irq_chip = {
.name = "SG2042 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2042_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -114,6 +118,8 @@ static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static struct irq_chip sg2044_msi_middle_irq_chip = {
.name = "SG2044 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2044_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -133,14 +139,14 @@ static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in
fwspec.fwnode = domain->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = data->irq_first + hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param[1] = data->irq_type;
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
return ret;
d = irq_domain_get_irq_data(domain->parent, virq);
- return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+ return d->chip->irq_set_type(d, data->irq_type);
}
static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -185,8 +191,10 @@ static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
.select = msi_lib_irq_domain_select,
};
-#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
@@ -200,10 +208,13 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
-#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI | \
MSI_FLAG_PCI_MSIX)
static const struct msi_parent_ops sg2044_msi_parent_ops = {
@@ -289,6 +300,7 @@ static int sg2042_msi_probe(struct platform_device *pdev)
}
data->irq_first = (u32)args.args[0];
+ data->irq_type = (unsigned int)args.args[1];
data->num_irqs = (u32)args.args[args.nargs - 1];
mutex_init(&data->msi_map_lock);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index bf69a4802b71..559fda8fb3a8 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -179,12 +179,14 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- plic_irq_disable(d);
+ /* Invalidate the original routing entry */
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ /* Setting the new routing entry if irq is enabled */
if (!irqd_irq_disabled(d))
- plic_irq_enable(d);
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
return IRQ_SET_MASK_OK_DONE;
}
@@ -257,7 +259,7 @@ static int plic_irq_suspend(void)
readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
}
- for_each_cpu(cpu, cpu_present_mask) {
+ for_each_present_cpu(cpu) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
@@ -289,7 +291,7 @@ static void plic_irq_resume(void)
priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
}
- for_each_cpu(cpu, cpu_present_mask) {
+ for_each_present_cpu(cpu) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index efeee0a873c0..ab96b692e5a3 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -133,7 +133,7 @@ struct journal_sector {
commit_id_t commit_id;
};
-#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
+#define MAX_TAG_SIZE 255
#define METADATA_PADDING_SECTORS 8
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 79ea85d18e24..f4b904e24328 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3813,8 +3813,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
struct raid_set *rs = ti->private;
unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
- limits->io_min = chunk_size_bytes;
- limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ if (chunk_size_bytes) {
+ limits->io_min = chunk_size_bytes;
+ limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ }
}
static void raid_presuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 58902091bf79..1461dc740dae 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -456,11 +456,15 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ unsigned int io_min, io_opt;
limits->chunk_sectors = sc->chunk_size;
- limits->io_min = chunk_size;
- limits->io_opt = chunk_size * sc->stripes;
+
+ if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
+ !check_mul_overflow(io_min, sc->stripes, &io_opt)) {
+ limits->io_min = io_min;
+ limits->io_opt = io_opt;
+ }
}
static struct target_type stripe_target = {
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 5497eaee96e7..6e9a0045f0ff 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -979,7 +979,7 @@ err:
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
if (cinfo->lockspace)
- dlm_release_lockspace(cinfo->lockspace, 2);
+ dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
mddev->cluster_info = NULL;
kfree(cinfo);
return ret;
@@ -1042,7 +1042,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
- dlm_release_lockspace(cinfo->lockspace, 2);
+ dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
kfree(cinfo);
return 0;
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 5d9b08115375..3e1f165c2d20 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f1d8811a542a..419139ad7663 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.chunk_sectors = mddev->chunk_sectors;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index bf44878ec640..d30b82beeb92 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b60c30bfb6c7..9832eefb2f15 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 023649fe2476..e385ef1355e8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev)
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
diff --git a/drivers/media/rc/pwm-ir-tx.c b/drivers/media/rc/pwm-ir-tx.c
index 84533fdd61aa..047472dc9244 100644
--- a/drivers/media/rc/pwm-ir-tx.c
+++ b/drivers/media/rc/pwm-ir-tx.c
@@ -117,7 +117,6 @@ static int pwm_ir_tx_atomic(struct rc_dev *dev, unsigned int *txbuf,
static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
{
struct pwm_ir *pwm_ir = container_of(timer, struct pwm_ir, timer);
- ktime_t now;
/*
* If we happen to hit an odd latency spike, loop through the
@@ -139,9 +138,7 @@ static enum hrtimer_restart pwm_ir_timer(struct hrtimer *timer)
hrtimer_add_expires_ns(timer, ns);
pwm_ir->txbuf_index++;
-
- now = timer->base->get_time();
- } while (hrtimer_get_expires_tv64(timer) < now);
+ } while (hrtimer_expires_remaining(timer) > 0);
return HRTIMER_RESTART;
}
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c44de892a61e..5372ed2a363e 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -94,7 +94,7 @@ static int ibmasmfs_init_fs_context(struct fs_context *fc)
static const struct super_operations ibmasmfs_s_ops = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static const struct file_operations *ibmasmfs_dir_ops = &simple_dir_operations;
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index 6a33889d0902..c3971f7caa65 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -43,7 +43,7 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
lkdtm_indirect_call((void *)lkdtm_increment_int);
pr_err("FAIL: survived mismatched prototype function call!\n");
- pr_expected_config(CONFIG_CFI_CLANG);
+ pr_expected_config(CONFIG_CFI);
}
/*
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index 015927665678..00ed2147113e 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -44,6 +44,9 @@ static void lkdtm_FORTIFY_STR_MEMBER(void)
char *src;
src = kmalloc(size, GFP_KERNEL);
+ if (!src)
+ return;
+
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
@@ -109,6 +112,9 @@ static void lkdtm_FORTIFY_MEM_MEMBER(void)
char *src;
src = kmalloc(size, GFP_KERNEL);
+ if (!src)
+ return;
+
strscpy(src, "over ten bytes", size);
size = strlen(src) + 1;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index a9e6277789ba..79df2fa89a3f 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 3a1de477e9af..b0f91cc9e40e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -283,6 +283,8 @@
#define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6)
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13)
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14)
#define GLI_MAX_TUNING_LOOP 40
@@ -1179,6 +1181,65 @@ static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
gl9767_vhs_read(pdev);
}
+static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value, set, clr;
+
+ if (assert) {
+ /* Assert reset, set RESETN and clean RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ } else {
+ /* De-assert reset, clean RESETN and set RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ }
+
+ gl9767_vhs_write(pdev);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value);
+ value |= set;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ value &= ~clr;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ gl9767_vhs_read(pdev);
+}
+
+static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ } else {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+
+ /* Assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, true);
+ pwr |= SDHCI_VDD2_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+ }
+}
+
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -1205,6 +1266,11 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
}
sdhci_enable_clk(host, clk);
+
+ if (mmc_card_uhs2(host->mmc))
+ /* De-assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, false);
+
gl9767_set_low_power_negotiation(pdev, true);
}
@@ -1476,7 +1542,7 @@ static void sdhci_gl9767_set_power(struct sdhci_host *host, unsigned char mode,
gl9767_vhs_read(pdev);
sdhci_gli_overcurrent_event_enable(host, false);
- sdhci_uhs2_set_power(host, mode, vdd);
+ __gl9767_uhs2_set_power(host, mode, vdd);
sdhci_gli_overcurrent_event_enable(host, true);
} else {
gl9767_vhs_write(pdev);
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
index 0efeb9d0c376..c459a08d01da 100644
--- a/drivers/mmc/host/sdhci-uhs2.c
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -295,7 +295,8 @@ static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
- sdhci_set_clock(host, host->clock);
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
}
static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3a17821efa5c..ac7e11f37af7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2367,23 +2367,6 @@ void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios)
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
-
- if (!ios->clock || ios->clock != host->clock) {
- host->ops->set_clock(host, ios->clock);
- host->clock = ios->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
- host->clock) {
- host->timeout_clk = mmc->actual_clock ?
- mmc->actual_clock / 1000 :
- host->clock / 1000;
- mmc->max_busy_timeout =
- host->ops->get_max_timeout_count ?
- host->ops->get_max_timeout_count(host) :
- 1 << 27;
- mmc->max_busy_timeout /= host->timeout_clk;
- }
- }
}
EXPORT_SYMBOL_GPL(sdhci_set_ios_common);
@@ -2410,6 +2393,23 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_set_ios_common(mmc, ios);
+ if (!ios->clock || ios->clock != host->clock) {
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
+ host->clock) {
+ host->timeout_clk = mmc->actual_clock ?
+ mmc->actual_clock / 1000 :
+ host->clock / 1000;
+ mmc->max_busy_timeout =
+ host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) :
+ 1 << 27;
+ mmc->max_busy_timeout /= host->timeout_clk;
+ }
+ }
+
if (host->ops->set_power)
host->ops->set_power(host, ios->power_mode, ios->vdd);
else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b29628d46be9..ac12eaf11755 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -76,24 +76,11 @@ config WIREGUARD
tristate "WireGuard secure network tunnel"
depends on NET && INET
depends on IPV6 || !IPV6
- depends on !KMSAN # KMSAN doesn't support the crypto configs below
select NET_UDP_TUNNEL
select DST_CACHE
- select CRYPTO
select CRYPTO_LIB_CURVE25519
select CRYPTO_LIB_CHACHA20POLY1305
- select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT
- select CRYPTO_POLY1305_X86_64 if X86 && 64BIT
- select CRYPTO_BLAKE2S_X86 if X86 && 64BIT
- select CRYPTO_CURVE25519_X86 if X86 && 64BIT
- select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON)
- select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON
- select CRYPTO_POLY1305_ARM if ARM
- select CRYPTO_BLAKE2S_ARM if ARM
- select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON
- select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2
- select CRYPTO_POLY1305_MIPS if MIPS
- select CRYPTO_CHACHA_S390 if S390
+ select CRYPTO_LIB_UTILS
help
WireGuard is a secure, fast, and easy to use replacement for IPSec
that uses modern cryptography and clever networking tricks. It's
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 257333c88710..57be04f6cb11 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2132,6 +2132,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
} else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+ bond_has_slaves(bond) &&
memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
/* Set slave to random address to avoid duplicate mac
* address in later fail over.
@@ -3355,7 +3356,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
/* Find out through which dev should the packet go */
memset(&fl6, 0, sizeof(struct flowi6));
fl6.daddr = targets[i];
- fl6.flowi6_oif = bond->dev->ifindex;
dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
if (dst->error) {
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b3c8c592fb0e..7e8b1d2f1af6 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -823,9 +823,6 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
/* Reset Global error flags */
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
- /* Set the controller into appropriate mode */
- rcar_canfd_set_mode(gpriv);
-
/* Transition all Channels to reset mode */
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
rcar_canfd_clear_bit(gpriv->base,
@@ -844,6 +841,10 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
return err;
}
}
+
+ /* Set the controller into appropriate mode */
+ rcar_canfd_set_mode(gpriv);
+
return 0;
}
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 09ae218315d7..963ea8510dd9 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -545,8 +545,6 @@ static int hi3110_stop(struct net_device *net)
priv->force_quit = 1;
free_irq(spi->irq, priv);
- destroy_workqueue(priv->wq);
- priv->wq = NULL;
mutex_lock(&priv->hi3110_lock);
@@ -770,34 +768,23 @@ static int hi3110_open(struct net_device *net)
goto out_close;
}
- priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
- 0);
- if (!priv->wq) {
- ret = -ENOMEM;
- goto out_free_irq;
- }
- INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
- INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
-
ret = hi3110_hw_reset(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_setup(net);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
ret = hi3110_set_normal_mode(spi);
if (ret)
- goto out_free_wq;
+ goto out_free_irq;
netif_wake_queue(net);
mutex_unlock(&priv->hi3110_lock);
return 0;
- out_free_wq:
- destroy_workqueue(priv->wq);
out_free_irq:
free_irq(spi->irq, priv);
hi3110_hw_sleep(spi);
@@ -812,6 +799,7 @@ static const struct net_device_ops hi3110_netdev_ops = {
.ndo_open = hi3110_open,
.ndo_stop = hi3110_stop,
.ndo_start_xmit = hi3110_hard_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops hi3110_ethtool_ops = {
@@ -908,6 +896,15 @@ static int hi3110_can_probe(struct spi_device *spi)
if (ret)
goto out_clk;
+ priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ 0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clk;
+ }
+ INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+ INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
+
priv->spi = spi;
mutex_init(&priv->hi3110_lock);
@@ -943,6 +940,8 @@ static int hi3110_can_probe(struct spi_device *spi)
return 0;
error_probe:
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
hi3110_power_enable(priv->power, 0);
out_clk:
@@ -963,6 +962,9 @@ static void hi3110_can_remove(struct spi_device *spi)
hi3110_power_enable(priv->power, 0);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
clk_disable_unprepare(priv->clk);
free_candev(net);
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 6fcb301ef611..53bfd873de9b 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -768,6 +768,7 @@ static const struct net_device_ops sun4ican_netdev_ops = {
.ndo_open = sun4ican_open,
.ndo_stop = sun4ican_close,
.ndo_start_xmit = sun4ican_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops sun4ican_ethtool_ops = {
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c
index db1acf6d504c..adc91873c083 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.c
@@ -7,7 +7,7 @@
*
* Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved.
* Copyright (c) 2020 ETAS K.K.. All rights reserved.
- * Copyright (c) 2020-2022 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+ * Copyright (c) 2020-2025 Vincent Mailhol <mailhol@kernel.org>
*/
#include <linux/unaligned.h>
@@ -1977,6 +1977,7 @@ static const struct net_device_ops es58x_netdev_ops = {
.ndo_stop = es58x_stop,
.ndo_start_xmit = es58x_start_xmit,
.ndo_eth_ioctl = can_eth_ioctl_hwts,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops es58x_ethtool_ops = {
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 41c0a1c399bf..1f9b915094e6 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -761,6 +761,7 @@ static const struct net_device_ops mcba_netdev_ops = {
.ndo_open = mcba_usb_open,
.ndo_stop = mcba_usb_close,
.ndo_start_xmit = mcba_usb_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
};
static const struct ethtool_ops mcba_ethtool_ops = {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 117637b9b995..dd5caa1c302b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -111,7 +111,7 @@ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now)
u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1;
if (time_ref->ts_dev_2 < time_ref->ts_dev_1)
- delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1;
+ delta_ts &= (1ULL << time_ref->adapter->ts_used_bits) - 1;
time_ref->ts_total += delta_ts;
}
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 6eb3140d4044..84dc6e517acf 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -685,18 +685,27 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
return 0;
}
-static int gswip_port_enable(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static int gswip_port_setup(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
int err;
if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
+ }
+
+ return 0;
+}
+
+static int gswip_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
if (phydev)
mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
@@ -1359,8 +1368,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
int i;
int err;
+ /* Operation not supported on the CPU port, don't throw errors */
if (!bridge)
- return -EINVAL;
+ return 0;
for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
if (priv->vlans[i].bridge == bridge) {
@@ -1829,6 +1839,7 @@ static const struct phylink_mac_ops gswip_phylink_mac_ops = {
static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
+ .port_setup = gswip_port_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
.port_bridge_join = gswip_port_bridge_join,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index d72fd248f3aa..2d66bf59cd64 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -244,7 +244,7 @@ bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
offset < offset_of_ip6_daddr + 16) {
actions->nat.src_xlate = false;
idx = (offset - offset_of_ip6_daddr) / 4;
- actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
+ actions->nat.l3.ipv6.daddr.s6_addr32[idx] = htonl(val);
} else {
netdev_err(bp->dev,
"%s: IPv6_hdr: Invalid pedit field\n",
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index a9040c42d2ff..6e97a5a7daaf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_bnx2x_delete_wait(dev, 0);
- cancel_delayed_work(&cp->delete_task);
- flush_workqueue(cnic_wq);
+ cancel_delayed_work_sync(&cp->delete_task);
if (atomic_read(&cp->iscsi_conn) != 0)
netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index de8a6ce86ad7..12105ffb5dac 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->io_qmask.iq |= BIT_ULL(iq_no);
/* Set the 32B/64B mode for each input queue */
- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
iq->iqcmd_64B = (conf->instr_type == 64);
oct->fn_list.setup_iq_regs(oct, iq_no);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 4643a3380618..b1e1ad9e4b48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -2736,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
goto err_get_attr;
}
- ethsw->bpid = dpbp_attrs.id;
+ ethsw->bpid = dpbp_attrs.bpid;
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 49aa4497efce..801a57a925da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1278,7 +1278,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
const u8 *macaddr);
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
-int i40e_count_filters(struct i40e_vsi *vsi);
+int i40e_count_all_filters(struct i40e_vsi *vsi);
+int i40e_count_active_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b14019d44b58..529d5501baac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1243,12 +1243,30 @@ void i40e_update_stats(struct i40e_vsi *vsi)
}
/**
- * i40e_count_filters - counts VSI mac filters
+ * i40e_count_all_filters - counts VSI MAC filters
* @vsi: the VSI to be searched
*
- * Returns count of mac filters
- **/
-int i40e_count_filters(struct i40e_vsi *vsi)
+ * Return: count of MAC filters in any state.
+ */
+int i40e_count_all_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ int bkt, cnt = 0;
+
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
+ cnt++;
+
+ return cnt;
+}
+
+/**
+ * i40e_count_active_filters - counts VSI MAC filters
+ * @vsi: the VSI to be searched
+ *
+ * Return: count of active MAC filters.
+ */
+int i40e_count_active_filters(struct i40e_vsi *vsi)
{
struct i40e_mac_filter *f;
struct hlist_node *h;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 048c33039130..b194eae03208 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -948,9 +948,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
if (!eop_desc)
break;
- /* prevent any other reads prior to eop_desc */
- smp_rmb();
-
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
if (tx_head == tx_desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 9b8efdeafbcf..081a4526a2f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -448,7 +448,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
(qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
(pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
- (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ FIELD_PREP(I40E_QINT_RQCTL_ITR_INDX_MASK, itr_idx);
wr32(hw, reg_idx, reg);
}
@@ -653,6 +653,13 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
tx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 8 */
+ if (!IS_ALIGNED(info->ring_len, 8) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_context;
+ }
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
@@ -716,6 +723,13 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
/* only set the required fields */
rx_ctx.base = info->dma_ring_addr / 128;
+
+ /* ring_len has to be multiple of 32 */
+ if (!IS_ALIGNED(info->ring_len, 32) ||
+ info->ring_len > I40E_MAX_NUM_DESCRIPTORS_XL710) {
+ ret = -EINVAL;
+ goto error_param;
+ }
rx_ctx.qlen = info->ring_len;
if (info->splithdr_enabled) {
@@ -1450,6 +1464,7 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* functions that may still be running at this point.
*/
clear_bit(I40E_VF_STATE_INIT, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -2116,7 +2131,10 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
size_t len = 0;
int ret;
- if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
+ i40e_sync_vf_state(vf, I40E_VF_STATE_INIT);
+
+ if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states) ||
+ test_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states)) {
aq_ret = -EINVAL;
goto err;
}
@@ -2219,6 +2237,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vf->default_lan_addr.addr);
}
set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ set_bit(I40E_VF_STATE_RESOURCES_LOADED, &vf->vf_states);
err:
/* send the response back to the VF */
@@ -2381,7 +2400,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2402,7 +2421,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
* to its appropriate VSIs based on TC mapping
*/
if (vf->adq_enabled) {
- if (idx >= ARRAY_SIZE(vf->ch)) {
+ if (idx >= vf->num_tc) {
aq_ret = -ENODEV;
goto error_param;
}
@@ -2452,8 +2471,10 @@ static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
u16 vsi_queue_id, queue_id;
for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
- if (vf->adq_enabled) {
- vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ u16 idx = vsi_queue_id / I40E_MAX_VF_VSI;
+
+ if (vf->adq_enabled && idx < vf->num_tc) {
+ vsi_id = vf->ch[idx].vsi_id;
queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
} else {
queue_id = vsi_queue_id;
@@ -2841,24 +2862,6 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/**
- * i40e_can_vf_change_mac
- * @vf: pointer to the VF info
- *
- * Return true if the VF is allowed to change its MAC filters, false otherwise
- */
-static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
-{
- /* If the VF MAC address has been set administratively (via the
- * ndo_set_vf_mac command), then deny permission to the VF to
- * add/delete unicast MAC addresses, unless the VF is trusted
- */
- if (vf->pf_set_mac && !vf->trusted)
- return false;
-
- return true;
-}
-
#define I40E_MAX_MACVLAN_PER_HW 3072
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
(num_ports))
@@ -2897,8 +2900,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
struct i40e_hw *hw = &pf->hw;
- int mac2add_cnt = 0;
- int i;
+ int i, mac_add_max, mac_add_cnt = 0;
+ bool vf_trusted;
+
+ vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
for (i = 0; i < al->num_elements; i++) {
struct i40e_mac_filter *f;
@@ -2918,9 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
- if (!i40e_can_vf_change_mac(vf) &&
- !is_multicast_ether_addr(addr) &&
- !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
+ if (!vf_trusted && !is_multicast_ether_addr(addr) &&
+ vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
dev_err(&pf->pdev->dev,
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
@@ -2929,29 +2933,33 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
/*count filters that really will be added*/
f = i40e_find_mac(vsi, addr);
if (!f)
- ++mac2add_cnt;
+ ++mac_add_cnt;
}
/* If this VF is not privileged, then we can't add more than a limited
- * number of addresses. Check to make sure that the additions do not
- * push us over the limit.
- */
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MAC_ADDR_PER_VF) {
- dev_err(&pf->pdev->dev,
- "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
- return -EPERM;
- }
- /* If this VF is trusted, it can use more resources than untrusted.
+ * number of addresses.
+ *
+ * If this VF is trusted, it can use more resources than untrusted.
* However to ensure that every trusted VF has appropriate number of
* resources, divide whole pool of resources per port and then across
* all VFs.
*/
- } else {
- if ((i40e_count_filters(vsi) + mac2add_cnt) >
- I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
- hw->num_ports)) {
+ if (!vf_trusted)
+ mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
+ else
+ mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
+
+ /* VF can replace all its filters in one step, in this case mac_add_max
+ * will be added as active and another mac_add_max will be in
+ * a to-be-removed state. Account for that.
+ */
+ if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
+ (i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
+ if (!vf_trusted) {
+ dev_err(&pf->pdev->dev,
+ "Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
+ return -EPERM;
+ } else {
dev_err(&pf->pdev->dev,
"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
return -EPERM;
@@ -3587,7 +3595,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
/* action_meta is TC number here to which the filter is applied */
if (!tc_filter->action_meta ||
- tc_filter->action_meta > vf->num_tc) {
+ tc_filter->action_meta >= vf->num_tc) {
dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
vf->vf_id, tc_filter->action_meta);
goto err;
@@ -3884,6 +3892,8 @@ err:
aq_ret);
}
+#define I40E_MAX_VF_CLOUD_FILTER 0xFF00
+
/**
* i40e_vc_add_cloud_filter
* @vf: pointer to the VF info
@@ -3923,6 +3933,14 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
goto err_out;
}
+ if (vf->num_cloud_filters >= I40E_MAX_VF_CLOUD_FILTER) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d: Max number of filters reached, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = -ENOSPC;
+ goto err_out;
+ }
+
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
if (!cfilter) {
aq_ret = -ENOMEM;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5cf74f16f433..f558b45725c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -41,7 +41,8 @@ enum i40e_vf_states {
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
I40E_VF_STATE_PRE_ENABLE,
- I40E_VF_STATE_RESETTING
+ I40E_VF_STATE_RESETTING,
+ I40E_VF_STATE_RESOURCES_LOADED,
};
/* VF capabilities */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index d2871757ec94..41e7e29879a3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -894,10 +894,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
sinfo->xdp_frags_size += size;
- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
- * can pop off frags but driver has to handle it on its own
- */
- rx_ring->nr_frags = sinfo->nr_frags;
if (page_is_pfmemalloc(rx_buf->page))
xdp_buff_set_frag_pfmemalloc(xdp);
@@ -968,20 +964,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_get_pgcnts - grab page_count() for gathered fragments
* @rx_ring: Rx descriptor ring to store the page counts on
+ * @ntc: the next to clean element (not included in this frame!)
*
* This function is intended to be called right before running XDP
* program so that the page recycling mechanism will be able to take
* a correct decision regarding underlying pages; this is done in such
* way as XDP program can change the refcount of page
*/
-static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc)
{
- u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
struct ice_rx_buf *rx_buf;
u32 cnt = rx_ring->count;
- for (int i = 0; i < nr_frags; i++) {
+ while (idx != ntc) {
rx_buf = &rx_ring->rx_buf[idx];
rx_buf->pgcnt = page_count(rx_buf->page);
@@ -1154,62 +1150,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
- * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
* @rx_ring: Rx ring with all the auxiliary data
* @xdp: XDP buffer carrying linear + frags part
- * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
- * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @ntc: the next to clean element (not included in this frame!)
* @verdict: return code from XDP program execution
*
- * Walk through gathered fragments and satisfy internal page
- * recycle mechanism; we take here an action related to verdict
- * returned by XDP program;
+ * Called after XDP program is completed, or on error with verdict set to
+ * ICE_XDP_CONSUMED.
+ *
+ * Walk through buffers from first_desc to the end of the frame, releasing
+ * buffers and satisfying internal page recycle mechanism. The action depends
+ * on verdict from XDP program.
*/
static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- u32 *xdp_xmit, u32 ntc, u32 verdict)
+ u32 ntc, u32 verdict)
{
- u32 nr_frags = rx_ring->nr_frags + 1;
u32 idx = rx_ring->first_desc;
u32 cnt = rx_ring->count;
- u32 post_xdp_frags = 1;
struct ice_rx_buf *buf;
- int i;
+ u32 xdp_frags = 0;
+ int i = 0;
if (unlikely(xdp_buff_has_frags(xdp)))
- post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+ xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- for (i = 0; i < post_xdp_frags; i++) {
+ while (idx != ntc) {
buf = &rx_ring->rx_buf[idx];
+ if (++idx == cnt)
+ idx = 0;
- if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ /* An XDP program could release fragments from the end of the
+ * buffer. For these, we need to keep the pagecnt_bias as-is.
+ * To do this, only adjust pagecnt_bias for fragments up to
+ * the total remaining after the XDP program has run.
+ */
+ if (verdict != ICE_XDP_CONSUMED)
ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- *xdp_xmit |= verdict;
- } else if (verdict & ICE_XDP_CONSUMED) {
+ else if (i++ <= xdp_frags)
buf->pagecnt_bias++;
- } else if (verdict == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
ice_put_rx_buf(rx_ring, buf);
-
- if (++idx == cnt)
- idx = 0;
- }
- /* handle buffers that represented frags released by XDP prog;
- * for these we keep pagecnt_bias as-is; refcount from struct page
- * has been decremented within XDP prog and we do not have to increase
- * the biased refcnt
- */
- for (; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- ice_put_rx_buf(rx_ring, buf);
- if (++idx == cnt)
- idx = 0;
}
xdp->data = NULL;
rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
}
/**
@@ -1317,6 +1302,10 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+ /* Increment ntc before calls to ice_put_rx_mbuf() */
+ if (++ntc == cnt)
+ ntc = 0;
+
if (!xdp->data) {
void *hard_start;
@@ -1325,24 +1314,23 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
- ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED);
break;
}
- if (++ntc == cnt)
- ntc = 0;
/* skip if it is NOP desc */
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_get_pgcnts(rx_ring);
+ ice_get_pgcnts(rx_ring, ntc);
xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
+ xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR);
continue;
construct_skb:
@@ -1355,7 +1343,7 @@ construct_skb:
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
xdp_verdict = ICE_XDP_CONSUMED;
}
- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
if (!skb)
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fef750c5f288..2fd8e78178a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -358,7 +358,6 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u32 nr_frags;
u16 max_frame;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 266bfcf2a28f..a427f05814c1 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -345,6 +345,7 @@ struct igc_adapter {
/* LEDs */
struct mutex led_mutex;
struct igc_led_classdev *leds;
+ bool leds_available;
};
void igc_up(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e79b14d50b24..728d7ca5338b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -7335,8 +7335,14 @@ static int igc_probe(struct pci_dev *pdev,
if (IS_ENABLED(CONFIG_IGC_LEDS)) {
err = igc_led_setup(adapter);
- if (err)
- goto err_register;
+ if (err) {
+ netdev_warn_once(netdev,
+ "LED init failed (%d); continuing without LED support\n",
+ err);
+ adapter->leds_available = false;
+ } else {
+ adapter->leds_available = true;
+ }
}
return 0;
@@ -7392,7 +7398,7 @@ static void igc_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
hrtimer_cancel(&adapter->hrtimer);
- if (IS_ENABLED(CONFIG_IGC_LEDS))
+ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
igc_led_free(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 80e6a2ef1350..6218bdb7f941 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6973,6 +6973,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
break;
}
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_init(&hw->aci.lock);
+
#ifdef IXGBE_FCOE
/* FCoE support exists, always init the FCoE lock */
spin_lock_init(&adapter->fcoe.lock);
@@ -11643,10 +11650,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /* Make sure the SWFW semaphore is in a valid state */
- if (hw->mac.ops.init_swfw_sync)
- hw->mac.ops.init_swfw_sync(hw);
-
if (ixgbe_check_fw_error(adapter))
return ixgbe_recovery_probe(adapter);
@@ -11850,8 +11853,6 @@ skip_sriov:
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ixgbe_mac_set_default_filter(adapter);
- if (hw->mac.type == ixgbe_mac_e610)
- mutex_init(&hw->aci.lock);
timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
if (ixgbe_removed(hw->hw_addr)) {
@@ -12007,9 +12008,9 @@ err_register:
devl_unlock(adapter->devlink);
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
+err_sw_init:
if (hw->mac.type == ixgbe_mac_e610)
mutex_destroy(&adapter->hw.aci.lock);
-err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
@@ -12060,10 +12061,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
- if (adapter->hw.mac.type == ixgbe_mac_e610) {
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
ixgbe_disable_link_status_events(adapter);
- mutex_destroy(&adapter->hw.aci.lock);
- }
if (adapter->mii_bus)
mdiobus_unregister(adapter->mii_bus);
@@ -12123,6 +12122,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
+ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
+
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/libie/adminq.c b/drivers/net/ethernet/intel/libie/adminq.c
index 55356548e3f0..7b4ff479e7e5 100644
--- a/drivers/net/ethernet/intel/libie/adminq.c
+++ b/drivers/net/ethernet/intel/libie/adminq.c
@@ -6,7 +6,7 @@
static const char * const libie_aq_str_arr[] = {
#define LIBIE_AQ_STR(x) \
- [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC" #x
+ [LIBIE_AQ_RC_##x] = "LIBIE_AQ_RC_" #x
LIBIE_AQ_STR(OK),
LIBIE_AQ_STR(EPERM),
LIBIE_AQ_STR(ENOENT),
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 24499bb36c00..bcea3fc26a8c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1124,11 +1124,24 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
return err;
}
+static bool octep_is_vf_valid(struct octep_device *oct, int vf)
+{
+ if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) {
+ netdev_err(oct->netdev, "Invalid VF ID %d\n", vf);
+ return false;
+ }
+
+ return true;
+}
+
static int octep_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi)
{
struct octep_device *oct = netdev_priv(dev);
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
ivi->vf = vf;
ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
ivi->spoofchk = true;
@@ -1143,6 +1156,9 @@ static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
struct octep_device *oct = netdev_priv(dev);
int err;
+ if (!octep_is_vf_valid(oct, vf))
+ return -EINVAL;
+
if (!is_valid_ether_addr(mac)) {
dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
return -EADDRNOTAVAIL;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index ebecdd29f3bd..0867fab61b19 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -196,6 +196,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
vf_id);
return;
}
+ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
}
@@ -205,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
{
int err;
+ /* Reset VF-specific information maintained by the PF */
+ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
err = octep_ctrl_net_dev_remove(oct, vf_id);
if (err) {
rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 0c46ba8a5adc..69324ae09397 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -21,8 +21,7 @@
#include "rvu.h"
#include "lmac_common.h"
-#define DRV_NAME "Marvell-CGX/RPM"
-#define DRV_STRING "Marvell CGX/RPM Driver"
+#define DRV_NAME "Marvell-CGX-RPM"
#define CGX_RX_STAT_GLOBAL_INDEX 9
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index e52cc6b1a26c..dedd586ed310 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
+ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 5f80b23c5335..26a08d2cfbb1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -1326,7 +1326,6 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
free_leaf:
otx2_tc_del_from_flow_list(flow_cfg, new_node);
- kfree_rcu(new_node, rcu);
if (new_node->is_act_police) {
mutex_lock(&nic->mbox.lock);
@@ -1346,6 +1345,7 @@ free_leaf:
mutex_unlock(&nic->mbox.lock);
}
+ kfree_rcu(new_node, rcu);
return rc;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 9560fcba643f..ac65e3191480 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -92,6 +92,7 @@ enum {
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
MLX5E_ACCEL_FS_POL_FT_LEVEL,
+ MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index ffcd0cdeb775..23703f28386a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -185,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr {
u32 family;
int prio;
int pol_level;
+ int pol_miss_level;
int sa_level;
int status_level;
enum mlx5_flow_namespace_type chains_ns;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 98b6a3a623f9..65dc3529283b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -747,6 +747,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
attr->family = family;
attr->prio = MLX5E_NIC_PRIO;
attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
+ attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
@@ -833,7 +834,7 @@ static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
- ft_attr.level = attr->pol_level;
+ ft_attr.level = attr->pol_miss_level;
ft_attr.prio = attr->prio;
ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e680673ffb72..15eded36b872 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -139,8 +139,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
- NULL, NULL, NULL);
} else {
netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 63a7a788fb0d..cd0242eb008c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1506,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
static int
mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int err;
+
+ netdev = mlx5_uplink_netdev_get(dev);
+ if (!netdev)
+ return 0;
+ priv = netdev_priv(netdev);
rpriv->netdev = priv->netdev;
- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
- rpriv);
+ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+ rpriv);
+ mlx5_uplink_netdev_put(dev, netdev);
+ return err;
}
static void
@@ -1638,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
- struct mlx5e_priv *priv = netdev_priv(netdev);
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+ void *ppriv;
+
+ if (!netdev) {
+ ppriv = rpriv;
+ goto free_ppriv;
+ }
+
+ priv = netdev_priv(netdev);
+ ppriv = priv->ppriv;
if (rep->vport == MLX5_VPORT_UPLINK) {
mlx5e_vport_uplink_rep_unload(rpriv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 87536f158d07..c6185ddba04b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -1466,6 +1466,7 @@ static void fec_set_block_stats(struct mlx5e_priv *priv,
case MLX5E_FEC_RS_528_514:
case MLX5E_FEC_RS_544_514:
case MLX5E_FEC_LLRS_272_257_1:
+ case MLX5E_FEC_RS_544_514_INTERLEAVED_QUAD:
fec_set_rs_stats(fec_stats, out);
return;
case MLX5E_FEC_FIRECODE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 8b4977650183..5f2d6c35f1ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -1515,6 +1515,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
speed = lksettings.base.speed;
out:
+ mlx5_uplink_netdev_put(mdev, slave);
return speed;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index cb165085a4c1..80245c38dbad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -114,9 +114,9 @@
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+ * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
-#define KERNEL_NIC_PRIO_NUM_LEVELS 10
+#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc, and one more for promisc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
@@ -663,7 +663,7 @@ static void del_sw_hw_rule(struct fs_node *node)
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
- mlx5_fc_local_destroy(rule->dest_attr.counter);
+ mlx5_fc_local_put(rule->dest_attr.counter);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 500826229b0b..e6a95b310b55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -343,6 +343,7 @@ struct mlx5_fc {
enum mlx5_fc_type type;
struct mlx5_fc_bulk *bulk;
struct mlx5_fc_cache cache;
+ refcount_t fc_local_refcount;
/* last{packets,bytes} are used for calculating deltas since last reading. */
u64 lastpackets;
u64 lastbytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 492775d3d193..83001eda3884 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -562,17 +562,36 @@ mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size)
counter->id = counter_id;
fc_bulk->base_id = counter_id - offset;
fc_bulk->fs_bulk.bulk_len = bulk_size;
+ refcount_set(&fc_bulk->hws_data.hws_action_refcount, 0);
+ mutex_init(&fc_bulk->hws_data.lock);
counter->bulk = fc_bulk;
+ refcount_set(&counter->fc_local_refcount, 1);
return counter;
}
EXPORT_SYMBOL(mlx5_fc_local_create);
void mlx5_fc_local_destroy(struct mlx5_fc *counter)
{
- if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
- return;
-
kfree(counter->bulk);
kfree(counter);
}
EXPORT_SYMBOL(mlx5_fc_local_destroy);
+
+void mlx5_fc_local_get(struct mlx5_fc *counter)
+{
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
+
+ refcount_inc(&counter->fc_local_refcount);
+}
+
+void mlx5_fc_local_put(struct mlx5_fc *counter)
+{
+ if (!counter || counter->type != MLX5_FC_TYPE_LOCAL)
+ return;
+
+ if (!refcount_dec_and_test(&counter->fc_local_refcount))
+ return;
+
+ mlx5_fc_local_destroy(counter);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index b111ccd03b02..74ea5da58b7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -47,7 +47,20 @@ int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
- return mdev->mlx5e_res.uplink_netdev;
+ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
+ struct net_device *netdev;
+
+ mutex_lock(&mlx5e_res->uplink_netdev_lock);
+ netdev = mlx5e_res->uplink_netdev;
+ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
+ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
+ return netdev;
+}
+
+static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ netdev_put(netdev, &mdev->mlx5e_res.tracker);
}
struct mlx5_sd;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 2d7adf7444ba..aa9f2b0a77d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -1170,7 +1170,11 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
force_legacy);
i = find_first_bit(&temp, max_size);
- if (i < max_size)
+
+ /* mlx5e_link_info has holes. Check speed
+ * is not zero as indication of one.
+ */
+ if (i < max_size && table[i].speed)
return &table[i];
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index 6b36a4a7d895..fe56b59e24c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -1360,7 +1360,7 @@ free_action:
struct mlx5hws_action *
mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level, u32 flags)
+ u32 flags)
{
struct mlx5hws_cmd_set_fte_dest *dest_list = NULL;
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
@@ -1397,7 +1397,7 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- fte_attr.ignore_flow_level = ignore_flow_level;
+ fte_attr.ignore_flow_level = 1;
if (dests[i].is_wire_ft)
last_dest_idx = i;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 131e74b2b774..6a4c4cccd643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -572,12 +572,12 @@ static void mlx5_fs_put_dest_action_sampler(struct mlx5_fs_hws_context *fs_ctx,
static struct mlx5hws_action *
mlx5_fs_create_action_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_action_dest_attr *dests,
- u32 num_of_dests, bool ignore_flow_level)
+ u32 num_of_dests)
{
u32 flags = MLX5HWS_ACTION_FLAG_HWS_FDB | MLX5HWS_ACTION_FLAG_SHARED;
return mlx5hws_action_create_dest_array(ctx, num_of_dests, dests,
- ignore_flow_level, flags);
+ flags);
}
static struct mlx5hws_action *
@@ -1014,19 +1014,14 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
}
(*ractions)[num_actions++].action = dest_actions->dest;
} else if (num_dest_actions > 1) {
- bool ignore_flow_level;
-
if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
num_fs_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
err = -EOPNOTSUPP;
goto free_actions;
}
- ignore_flow_level =
- !!(fte_action->flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
tmp_action =
mlx5_fs_create_action_dest_array(ctx, dest_actions,
- num_dest_actions,
- ignore_flow_level);
+ num_dest_actions);
if (!tmp_action) {
err = -EOPNOTSUPP;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
index f1ecdba74e1f..839d71bd4216 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c
@@ -407,15 +407,21 @@ struct mlx5hws_action *mlx5_fc_get_hws_action(struct mlx5hws_context *ctx,
{
struct mlx5_fs_hws_create_action_ctx create_ctx;
struct mlx5_fc_bulk *fc_bulk = counter->bulk;
+ struct mlx5hws_action *hws_action;
create_ctx.hws_ctx = ctx;
create_ctx.id = fc_bulk->base_id;
create_ctx.actions_type = MLX5HWS_ACTION_TYP_CTR;
- return mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+ mlx5_fc_local_get(counter);
+ hws_action = mlx5_fs_get_hws_action(&fc_bulk->hws_data, &create_ctx);
+ if (!hws_action)
+ mlx5_fc_local_put(counter);
+ return hws_action;
}
void mlx5_fc_put_hws_action(struct mlx5_fc *counter)
{
mlx5_fs_put_hws_action(&counter->bulk->hws_data);
+ mlx5_fc_local_put(counter);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 2498ceff2060..1ad7a50d938b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -735,7 +735,6 @@ mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
* @num_dest: The number of dests attributes.
* @dests: The destination array. Each contains a destination action and can
* have additional actions.
- * @ignore_flow_level: Whether to turn on 'ignore_flow_level' for this dest.
* @flags: Action creation flags (enum mlx5hws_action_flags).
*
* Return: pointer to mlx5hws_action on success NULL otherwise.
@@ -743,7 +742,7 @@ mlx5hws_action_create_push_vlan(struct mlx5hws_context *ctx, u32 flags);
struct mlx5hws_action *
mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx, size_t num_dest,
struct mlx5hws_action_dest_attr *dests,
- bool ignore_flow_level, u32 flags);
+ u32 flags);
/**
* mlx5hws_action_create_insert_header - Create insert header action.
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 56d5464222d9..cdbf82affa7b 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
struct rx_info *info = &dev->rx_info;
unsigned next_rx;
- int rx_rc, len;
+ int len;
u32 cmdsts;
__le32 *desc;
unsigned long flags;
@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
if (likely(CMDSTS_OK & cmdsts)) {
#endif
skb_put(skb, len);
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ ndev->stats.rx_dropped++;
goto netdev_mangle_me_harder_failed;
+ }
if (cmdsts & CMDSTS_DEST_MULTI)
ndev->stats.multicast++;
ndev->stats.rx_packets++;
@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
}
#endif
- rx_rc = netif_rx(skb);
- if (NET_RX_DROP == rx_rc) {
-netdev_mangle_me_harder_failed:
- ndev->stats.rx_dropped++;
- }
+ netif_rx(skb);
} else {
dev_kfree_skb_irq(skb);
}
+netdev_mangle_me_harder_failed:
nr++;
next_rx = info->next_rx;
desc = info->descs + (DESC_SIZE * next_rx);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 9c3d3dd2f847..1f0cea3cae92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -4462,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
goto out;
}
- /* Add override window info to buffer */
+ /* Add override window info to buffer, preventing buffer overflow */
override_window_dwords =
- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
+ PROTECTION_OVERRIDE_DEPTH_DWORDS);
if (override_window_dwords) {
addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
offset += qed_grc_dump_addr_range(p_hwfn,
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index eba8b5fb1365..d3501f8487d9 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -597,10 +597,6 @@ static int bcm_ptp_perout_locked(struct bcm_ptp_private *priv,
period = BCM_MAX_PERIOD_8NS; /* write nonzero value */
- /* Reject unsupported flags */
- if (req->flags & ~PTP_PEROUT_DUTY_CYCLE)
- return -EOPNOTSUPP;
-
if (req->flags & PTP_PEROUT_DUTY_CYCLE)
pulse = ktime_to_ns(ktime_set(req->on.sec, req->on.nsec));
else
@@ -741,6 +737,8 @@ static const struct ptp_clock_info bcm_ptp_clock_info = {
.n_pins = 1,
.n_per_out = 1,
.n_ext_ts = 1,
+ .supported_perout_flags = PTP_PEROUT_DUTY_CYCLE,
+ .supported_extts_flags = PTP_STRICT_FLAGS | PTP_RISING_EDGE,
};
static void bcm_ptp_txtstamp(struct mii_timestamper *mii_ts,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index cc6c50180663..47ddcb4b9a78 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1875,6 +1875,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
local_bh_enable();
goto unlock_frags;
}
+
+ if (frags && skb != tfile->napi.skb)
+ tfile->napi.skb = skb;
}
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
index d912e709a92c..bb03dad4a300 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c
@@ -2092,7 +2092,7 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
break;
}
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_7000 &&
trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = DIV_ROUND_UP(len, 4);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 74aaea61de13..d2b690857e58 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -519,6 +519,7 @@ int of_irq_count(struct device_node *dev)
return nr;
}
+EXPORT_SYMBOL_GPL(of_irq_count);
/**
* of_irq_to_resource_table - Fill in resource table with node's IRQ info
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
index 0938ef7ebabf..dfb61f152702 100644
--- a/drivers/pci/msi/irqdomain.c
+++ b/drivers/pci/msi/irqdomain.c
@@ -148,20 +148,43 @@ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *d
arg->hwirq = desc->msi_index;
}
-static __always_inline void cond_mask_parent(struct irq_data *data)
+static void cond_shutdown_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
- if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ irq_chip_shutdown_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_mask_parent(data);
}
-static __always_inline void cond_unmask_parent(struct irq_data *data)
+static unsigned int cond_startup_parent(struct irq_data *data)
{
struct msi_domain_info *info = data->domain->host_data;
- if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
+ if (unlikely(info->flags & MSI_FLAG_PCI_MSI_STARTUP_PARENT))
+ return irq_chip_startup_parent(data);
+ else if (unlikely(info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT))
irq_chip_unmask_parent(data);
+
+ return 0;
+}
+
+static void pci_irq_shutdown_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+
+ pci_msi_mask(desc, BIT(data->irq - desc->irq));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msi(struct irq_data *data)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msi_unmask(desc, BIT(data->irq - desc->irq));
+ return ret;
}
static void pci_irq_mask_msi(struct irq_data *data)
@@ -169,14 +192,12 @@ static void pci_irq_mask_msi(struct irq_data *data)
struct msi_desc *desc = irq_data_get_msi_desc(data);
pci_msi_mask(desc, BIT(data->irq - desc->irq));
- cond_mask_parent(data);
}
static void pci_irq_unmask_msi(struct irq_data *data)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
- cond_unmask_parent(data);
pci_msi_unmask(desc, BIT(data->irq - desc->irq));
}
@@ -194,6 +215,8 @@ static void pci_irq_unmask_msi(struct irq_data *data)
static const struct msi_domain_template pci_msi_template = {
.chip = {
.name = "PCI-MSI",
+ .irq_startup = pci_irq_startup_msi,
+ .irq_shutdown = pci_irq_shutdown_msi,
.irq_mask = pci_irq_mask_msi,
.irq_unmask = pci_irq_unmask_msi,
.irq_write_msi_msg = pci_msi_domain_write_msg,
@@ -210,15 +233,27 @@ static const struct msi_domain_template pci_msi_template = {
},
};
+static void pci_irq_shutdown_msix(struct irq_data *data)
+{
+ pci_msix_mask(irq_data_get_msi_desc(data));
+ cond_shutdown_parent(data);
+}
+
+static unsigned int pci_irq_startup_msix(struct irq_data *data)
+{
+ unsigned int ret = cond_startup_parent(data);
+
+ pci_msix_unmask(irq_data_get_msi_desc(data));
+ return ret;
+}
+
static void pci_irq_mask_msix(struct irq_data *data)
{
pci_msix_mask(irq_data_get_msi_desc(data));
- cond_mask_parent(data);
}
static void pci_irq_unmask_msix(struct irq_data *data)
{
- cond_unmask_parent(data);
pci_msix_unmask(irq_data_get_msi_desc(data));
}
@@ -234,6 +269,8 @@ EXPORT_SYMBOL_GPL(pci_msix_prepare_desc);
static const struct msi_domain_template pci_msix_template = {
.chip = {
.name = "PCI-MSIX",
+ .irq_startup = pci_irq_startup_msix,
+ .irq_shutdown = pci_irq_shutdown_msix,
.irq_mask = pci_irq_mask_msix,
.irq_unmask = pci_irq_unmask_msix,
.irq_write_msi_msg = pci_msi_domain_write_msg,
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index b0f4d98036cd..005b92e6585e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5932,6 +5932,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
int ret;
+ unsigned int firstbit;
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
@@ -5949,7 +5950,10 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
rq = mps;
}
- v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, ffs(rq) - 8);
+ firstbit = ffs(rq);
+ if (firstbit < 8)
+ return -EINVAL;
+ v = FIELD_PREP(PCI_EXP_DEVCTL_READRQ, firstbit - 8);
if (bridge->no_inc_mrrs) {
int max_mrrs = pcie_get_readrq(dev);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index a9188dec36fe..638321fc9800 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -178,6 +178,15 @@ config FSL_IMX9_DDR_PMU
can give information about memory throughput and other related
events.
+config FUJITSU_UNCORE_PMU
+ tristate "Fujitsu Uncore PMU"
+ depends on (ARM64 && ACPI) || (COMPILE_TEST && 64BIT)
+ help
+ Provides support for the Uncore performance monitor unit (PMU)
+ in Fujitsu processors.
+ Adds the Uncore PMU into the perf events subsystem for
+ monitoring Uncore events.
+
config QCOM_L2_PMU
bool "Qualcomm Technologies L2-cache PMU"
depends on ARCH_QCOM && ARM64 && ACPI
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index 192fc8b16204..ea52711a87e3 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_ARM_XSCALE_PMU) += arm_xscale_pmu.o
obj-$(CONFIG_ARM_SMMU_V3_PMU) += arm_smmuv3_pmu.o
obj-$(CONFIG_FSL_IMX8_DDR_PMU) += fsl_imx8_ddr_perf.o
obj-$(CONFIG_FSL_IMX9_DDR_PMU) += fsl_imx9_ddr_perf.o
+obj-$(CONFIG_FUJITSU_UNCORE_PMU) += fujitsu_uncore_pmu.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 1a0d0e1a2263..8af3563fdf60 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -565,7 +565,7 @@ module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
static ktime_t arm_ccn_pmu_timer_period(void)
{
- return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
+ return us_to_ktime((u64)arm_ccn_pmu_poll_period_us);
}
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 11fb2234b10f..23245352a3fc 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -65,7 +65,7 @@
/* PMU registers occupy the 3rd 4KB page of each node's region */
#define CMN_PMU_OFFSET 0x2000
/* ...except when they don't :( */
-#define CMN_S3_DTM_OFFSET 0xa000
+#define CMN_S3_R1_DTM_OFFSET 0xa000
#define CMN_S3_PMU_OFFSET 0xd900
/* For most nodes, this is all there is */
@@ -233,6 +233,9 @@ enum cmn_revision {
REV_CMN700_R1P0,
REV_CMN700_R2P0,
REV_CMN700_R3P0,
+ REV_CMNS3_R0P0 = 0,
+ REV_CMNS3_R0P1,
+ REV_CMNS3_R1P0,
REV_CI700_R0P0 = 0,
REV_CI700_R1P0,
REV_CI700_R2P0,
@@ -425,8 +428,8 @@ static enum cmn_model arm_cmn_model(const struct arm_cmn *cmn)
static int arm_cmn_pmu_offset(const struct arm_cmn *cmn, const struct arm_cmn_node *dn)
{
if (cmn->part == PART_CMN_S3) {
- if (dn->type == CMN_TYPE_XP)
- return CMN_S3_DTM_OFFSET;
+ if (cmn->rev >= REV_CMNS3_R1P0 && dn->type == CMN_TYPE_XP)
+ return CMN_S3_R1_DTM_OFFSET;
return CMN_S3_PMU_OFFSET;
}
return CMN_PMU_OFFSET;
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index f6d7bab5d555..69c5cc8f5606 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -978,6 +978,32 @@ static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}
+static bool armv8pmu_can_use_pmccntr(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
+
+ if (evtype != ARMV8_PMUV3_PERFCTR_CPU_CYCLES)
+ return false;
+
+ /*
+ * A CPU_CYCLES event with threshold counting cannot use PMCCNTR_EL0
+ * since it lacks threshold support.
+ */
+ if (armv8pmu_event_get_threshold(&event->attr))
+ return false;
+
+ /*
+ * PMCCNTR_EL0 is not affected by BRBE controls like BRBCR_ELx.FZP.
+ * So don't use it for branch events.
+ */
+ if (has_branch_stack(event))
+ return false;
+
+ return true;
+}
+
static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
struct perf_event *event)
{
@@ -986,8 +1012,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT;
/* Always prefer to place a cycle counter into the cycle counter. */
- if ((evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) &&
- !armv8pmu_event_get_threshold(&event->attr) && !has_branch_stack(event)) {
+ if (armv8pmu_can_use_pmccntr(cpuc, event)) {
if (!test_and_set_bit(ARMV8_PMU_CYCLE_IDX, cpuc->used_mask))
return ARMV8_PMU_CYCLE_IDX;
else if (armv8pmu_event_is_64bit(event) &&
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 369e77ad5f13..fa50645fedda 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -86,9 +86,11 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_ERND (1UL << 5)
#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
#define SPE_PMU_FEAT_DISCARD (1UL << 7)
+#define SPE_PMU_FEAT_EFT (1UL << 8)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
+ u64 pmsevfr_res0;
u16 max_record_sz;
u16 align;
struct perf_output_handle __percpu *handle;
@@ -97,7 +99,8 @@ struct arm_spe_pmu {
#define to_spe_pmu(p) (container_of(p, struct arm_spe_pmu, pmu))
/* Convert a free-running index from perf into an SPE buffer offset */
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/* Keep track of our dynamic hotplug state */
static enum cpuhp_state arm_spe_pmu_online;
@@ -115,6 +118,7 @@ enum arm_spe_pmu_capabilities {
SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_CNT_SZ = SPE_PMU_CAP_FEAT_MAX,
SPE_PMU_CAP_MIN_IVAL,
+ SPE_PMU_CAP_EVENT_FILTER,
};
static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
@@ -122,7 +126,7 @@ static int arm_spe_pmu_feat_caps[SPE_PMU_CAP_FEAT_MAX] = {
[SPE_PMU_CAP_ERND] = SPE_PMU_FEAT_ERND,
};
-static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
+static u64 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
{
if (cap < SPE_PMU_CAP_FEAT_MAX)
return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]);
@@ -132,6 +136,8 @@ static u32 arm_spe_pmu_cap_get(struct arm_spe_pmu *spe_pmu, int cap)
return spe_pmu->counter_sz;
case SPE_PMU_CAP_MIN_IVAL:
return spe_pmu->min_period;
+ case SPE_PMU_CAP_EVENT_FILTER:
+ return ~spe_pmu->pmsevfr_res0;
default:
WARN(1, "unknown cap %d\n", cap);
}
@@ -148,7 +154,19 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
container_of(attr, struct dev_ext_attribute, attr);
int cap = (long)ea->var;
- return sysfs_emit(buf, "%u\n", arm_spe_pmu_cap_get(spe_pmu, cap));
+ return sysfs_emit(buf, "%llu\n", arm_spe_pmu_cap_get(spe_pmu, cap));
+}
+
+static ssize_t arm_spe_pmu_cap_show_hex(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+ struct dev_ext_attribute *ea =
+ container_of(attr, struct dev_ext_attribute, attr);
+ int cap = (long)ea->var;
+
+ return sysfs_emit(buf, "0x%llx\n", arm_spe_pmu_cap_get(spe_pmu, cap));
}
#define SPE_EXT_ATTR_ENTRY(_name, _func, _var) \
@@ -158,12 +176,15 @@ static ssize_t arm_spe_pmu_cap_show(struct device *dev,
#define SPE_CAP_EXT_ATTR_ENTRY(_name, _var) \
SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show, _var)
+#define SPE_CAP_EXT_ATTR_ENTRY_HEX(_name, _var) \
+ SPE_EXT_ATTR_ENTRY(_name, arm_spe_pmu_cap_show_hex, _var)
static struct attribute *arm_spe_pmu_cap_attr[] = {
SPE_CAP_EXT_ATTR_ENTRY(arch_inst, SPE_PMU_CAP_ARCH_INST),
SPE_CAP_EXT_ATTR_ENTRY(ernd, SPE_PMU_CAP_ERND),
SPE_CAP_EXT_ATTR_ENTRY(count_size, SPE_PMU_CAP_CNT_SZ),
SPE_CAP_EXT_ATTR_ENTRY(min_interval, SPE_PMU_CAP_MIN_IVAL),
+ SPE_CAP_EXT_ATTR_ENTRY_HEX(event_filter, SPE_PMU_CAP_EVENT_FILTER),
NULL,
};
@@ -197,6 +218,27 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_discard_CFG config /* PMBLIMITR_EL1.FM = DISCARD */
#define ATTR_CFG_FLD_discard_LO 35
#define ATTR_CFG_FLD_discard_HI 35
+#define ATTR_CFG_FLD_branch_filter_mask_CFG config /* PMSFCR_EL1.Bm */
+#define ATTR_CFG_FLD_branch_filter_mask_LO 36
+#define ATTR_CFG_FLD_branch_filter_mask_HI 36
+#define ATTR_CFG_FLD_load_filter_mask_CFG config /* PMSFCR_EL1.LDm */
+#define ATTR_CFG_FLD_load_filter_mask_LO 37
+#define ATTR_CFG_FLD_load_filter_mask_HI 37
+#define ATTR_CFG_FLD_store_filter_mask_CFG config /* PMSFCR_EL1.STm */
+#define ATTR_CFG_FLD_store_filter_mask_LO 38
+#define ATTR_CFG_FLD_store_filter_mask_HI 38
+#define ATTR_CFG_FLD_simd_filter_CFG config /* PMSFCR_EL1.SIMD */
+#define ATTR_CFG_FLD_simd_filter_LO 39
+#define ATTR_CFG_FLD_simd_filter_HI 39
+#define ATTR_CFG_FLD_simd_filter_mask_CFG config /* PMSFCR_EL1.SIMDm */
+#define ATTR_CFG_FLD_simd_filter_mask_LO 40
+#define ATTR_CFG_FLD_simd_filter_mask_HI 40
+#define ATTR_CFG_FLD_float_filter_CFG config /* PMSFCR_EL1.FP */
+#define ATTR_CFG_FLD_float_filter_LO 41
+#define ATTR_CFG_FLD_float_filter_HI 41
+#define ATTR_CFG_FLD_float_filter_mask_CFG config /* PMSFCR_EL1.FPm */
+#define ATTR_CFG_FLD_float_filter_mask_LO 42
+#define ATTR_CFG_FLD_float_filter_mask_HI 42
#define ATTR_CFG_FLD_event_filter_CFG config1 /* PMSEVFR_EL1 */
#define ATTR_CFG_FLD_event_filter_LO 0
@@ -215,8 +257,15 @@ GEN_PMU_FORMAT_ATTR(pa_enable);
GEN_PMU_FORMAT_ATTR(pct_enable);
GEN_PMU_FORMAT_ATTR(jitter);
GEN_PMU_FORMAT_ATTR(branch_filter);
+GEN_PMU_FORMAT_ATTR(branch_filter_mask);
GEN_PMU_FORMAT_ATTR(load_filter);
+GEN_PMU_FORMAT_ATTR(load_filter_mask);
GEN_PMU_FORMAT_ATTR(store_filter);
+GEN_PMU_FORMAT_ATTR(store_filter_mask);
+GEN_PMU_FORMAT_ATTR(simd_filter);
+GEN_PMU_FORMAT_ATTR(simd_filter_mask);
+GEN_PMU_FORMAT_ATTR(float_filter);
+GEN_PMU_FORMAT_ATTR(float_filter_mask);
GEN_PMU_FORMAT_ATTR(event_filter);
GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
@@ -228,8 +277,15 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_pct_enable.attr,
&format_attr_jitter.attr,
&format_attr_branch_filter.attr,
+ &format_attr_branch_filter_mask.attr,
&format_attr_load_filter.attr,
+ &format_attr_load_filter_mask.attr,
&format_attr_store_filter.attr,
+ &format_attr_store_filter_mask.attr,
+ &format_attr_simd_filter.attr,
+ &format_attr_simd_filter_mask.attr,
+ &format_attr_float_filter.attr,
+ &format_attr_float_filter_mask.attr,
&format_attr_event_filter.attr,
&format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
@@ -250,6 +306,16 @@ static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
return 0;
+ if ((attr == &format_attr_branch_filter_mask.attr ||
+ attr == &format_attr_load_filter_mask.attr ||
+ attr == &format_attr_store_filter_mask.attr ||
+ attr == &format_attr_simd_filter.attr ||
+ attr == &format_attr_simd_filter_mask.attr ||
+ attr == &format_attr_float_filter.attr ||
+ attr == &format_attr_float_filter_mask.attr) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_EFT))
+ return 0;
+
return attr->mode;
}
@@ -345,8 +411,15 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
u64 reg = 0;
reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_LDm, ATTR_CFG_GET_FLD(attr, load_filter_mask));
reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_STm, ATTR_CFG_GET_FLD(attr, store_filter_mask));
reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_Bm, ATTR_CFG_GET_FLD(attr, branch_filter_mask));
+ reg |= FIELD_PREP(PMSFCR_EL1_SIMD, ATTR_CFG_GET_FLD(attr, simd_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_SIMDm, ATTR_CFG_GET_FLD(attr, simd_filter_mask));
+ reg |= FIELD_PREP(PMSFCR_EL1_FP, ATTR_CFG_GET_FLD(attr, float_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_FPm, ATTR_CFG_GET_FLD(attr, float_filter_mask));
if (reg)
reg |= PMSFCR_EL1_FT;
@@ -697,20 +770,6 @@ static irqreturn_t arm_spe_pmu_irq_handler(int irq, void *dev)
return IRQ_HANDLED;
}
-static u64 arm_spe_pmsevfr_res0(u16 pmsver)
-{
- switch (pmsver) {
- case ID_AA64DFR0_EL1_PMSVer_IMP:
- return PMSEVFR_EL1_RES0_IMP;
- case ID_AA64DFR0_EL1_PMSVer_V1P1:
- return PMSEVFR_EL1_RES0_V1P1;
- case ID_AA64DFR0_EL1_PMSVer_V1P2:
- /* Return the highest version we support in default */
- default:
- return PMSEVFR_EL1_RES0_V1P2;
- }
-}
-
/* Perf callbacks */
static int arm_spe_pmu_event_init(struct perf_event *event)
{
@@ -726,10 +785,10 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus))
return -ENOENT;
- if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ if (arm_spe_event_to_pmsevfr(event) & spe_pmu->pmsevfr_res0)
return -EOPNOTSUPP;
- if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ if (arm_spe_event_to_pmsnevfr(event) & spe_pmu->pmsevfr_res0)
return -EOPNOTSUPP;
if (attr->exclude_idle)
@@ -762,6 +821,16 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
+ if ((FIELD_GET(PMSFCR_EL1_LDm, reg) ||
+ FIELD_GET(PMSFCR_EL1_STm, reg) ||
+ FIELD_GET(PMSFCR_EL1_Bm, reg) ||
+ FIELD_GET(PMSFCR_EL1_SIMD, reg) ||
+ FIELD_GET(PMSFCR_EL1_SIMDm, reg) ||
+ FIELD_GET(PMSFCR_EL1_FP, reg) ||
+ FIELD_GET(PMSFCR_EL1_FPm, reg)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_EFT))
+ return -EOPNOTSUPP;
+
if (ATTR_CFG_GET_FLD(&event->attr, discard) &&
!(spe_pmu->features & SPE_PMU_FEAT_DISCARD))
return -EOPNOTSUPP;
@@ -1053,6 +1122,9 @@ static void __arm_spe_pmu_dev_probe(void *info)
if (spe_pmu->pmsver >= ID_AA64DFR0_EL1_PMSVer_V1P2)
spe_pmu->features |= SPE_PMU_FEAT_DISCARD;
+ if (FIELD_GET(PMSIDR_EL1_EFT, reg))
+ spe_pmu->features |= SPE_PMU_FEAT_EFT;
+
/* This field has a spaced out encoding, so just use a look-up */
fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
@@ -1107,6 +1179,10 @@ static void __arm_spe_pmu_dev_probe(void *info)
spe_pmu->counter_sz = 16;
}
+ /* Write all 1s and then read back. Unsupported filter bits are RAZ/WI. */
+ write_sysreg_s(U64_MAX, SYS_PMSEVFR_EL1);
+ spe_pmu->pmsevfr_res0 = ~read_sysreg_s(SYS_PMSEVFR_EL1);
+
dev_info(dev,
"probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c
index 146ff57813fb..22f73ac894e9 100644
--- a/drivers/perf/dwc_pcie_pmu.c
+++ b/drivers/perf/dwc_pcie_pmu.c
@@ -39,6 +39,10 @@
#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0)
#define DWC_PCIE_EVENT_PER_CLEAR 0x1
+/* Event Selection Field has two subfields */
+#define DWC_PCIE_CNT_EVENT_SEL_GROUP GENMASK(11, 8)
+#define DWC_PCIE_CNT_EVENT_SEL_EVID GENMASK(7, 0)
+
#define DWC_PCIE_EVENT_CNT_DATA 0xC
#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10
@@ -73,6 +77,10 @@ enum dwc_pcie_event_type {
DWC_PCIE_EVENT_TYPE_MAX,
};
+#define DWC_PCIE_LANE_GROUP_6 6
+#define DWC_PCIE_LANE_GROUP_7 7
+#define DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP 256
+
#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0)
#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0)
@@ -82,8 +90,11 @@ struct dwc_pcie_pmu {
u16 ras_des_offset;
u32 nr_lanes;
+ /* Groups #6 and #7 */
+ DECLARE_BITMAP(lane_events, 2 * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP);
+ struct perf_event *time_based_event;
+
struct hlist_node cpuhp_node;
- struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX];
int on_cpu;
};
@@ -246,19 +257,26 @@ static const struct attribute_group *dwc_pcie_attr_groups[] = {
};
static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
+ struct perf_event *event,
bool enable)
{
struct pci_dev *pdev = pcie_pmu->pdev;
u16 ras_des_offset = pcie_pmu->ras_des_offset;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
+ u32 ctrl;
+
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
if (enable)
- pci_clear_and_set_config_dword(pdev,
- ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
- DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ ctrl |= FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
else
- pci_clear_and_set_config_dword(pdev,
- ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
- DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+ ctrl |= FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
+
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
}
static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
@@ -276,11 +294,22 @@ static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
{
struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
struct pci_dev *pdev = pcie_pmu->pdev;
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int lane = DWC_PCIE_EVENT_LANE(event);
u16 ras_des_offset = pcie_pmu->ras_des_offset;
- u32 val;
+ u32 val, ctrl;
+ ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
+ FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
+ FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
+ ctrl |= FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
+ pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
+ ctrl);
+
return val;
}
@@ -329,26 +358,77 @@ static void dwc_pcie_pmu_event_update(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
- u64 delta, prev, now = 0;
+ u64 delta, prev, now;
+
+ if (type == DWC_PCIE_LANE_EVENT) {
+ now = dwc_pcie_pmu_read_lane_event_counter(event) &
+ DWC_PCIE_LANE_EVENT_MAX_PERIOD;
+ local64_add(now, &event->count);
+ return;
+ }
do {
prev = local64_read(&hwc->prev_count);
-
- if (type == DWC_PCIE_LANE_EVENT)
- now = dwc_pcie_pmu_read_lane_event_counter(event);
- else if (type == DWC_PCIE_TIME_BASE_EVENT)
- now = dwc_pcie_pmu_read_time_based_counter(event);
+ now = dwc_pcie_pmu_read_time_based_counter(event);
} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
- /* 32-bit counter for Lane Event Counting */
- if (type == DWC_PCIE_LANE_EVENT)
- delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD;
-
local64_add(delta, &event->count);
}
+static int dwc_pcie_pmu_validate_add_lane_event(struct perf_event *event,
+ unsigned long val_lane_events[])
+{
+ int event_id, event_nr, group;
+
+ event_id = DWC_PCIE_EVENT_ID(event);
+ event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id);
+
+ if (group != DWC_PCIE_LANE_GROUP_6 && group != DWC_PCIE_LANE_GROUP_7)
+ return -EINVAL;
+
+ group -= DWC_PCIE_LANE_GROUP_6;
+
+ if (test_and_set_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ val_lane_events))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dwc_pcie_pmu_validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ DECLARE_BITMAP(val_lane_events, 2 * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP);
+ bool time_event = false;
+ int type;
+
+ type = DWC_PCIE_EVENT_TYPE(leader);
+ if (type == DWC_PCIE_TIME_BASE_EVENT)
+ time_event = true;
+ else
+ if (dwc_pcie_pmu_validate_add_lane_event(leader, val_lane_events))
+ return -ENOSPC;
+
+ for_each_sibling_event(sibling, leader) {
+ type = DWC_PCIE_EVENT_TYPE(sibling);
+ if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ if (time_event)
+ return -ENOSPC;
+
+ time_event = true;
+ continue;
+ }
+
+ if (dwc_pcie_pmu_validate_add_lane_event(sibling, val_lane_events))
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
static int dwc_pcie_pmu_event_init(struct perf_event *event)
{
struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
@@ -367,10 +447,6 @@ static int dwc_pcie_pmu_event_init(struct perf_event *event)
if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
return -EINVAL;
- if (event->group_leader != event &&
- !is_software_event(event->group_leader))
- return -EINVAL;
-
for_each_sibling_event(sibling, event->group_leader) {
if (sibling->pmu != event->pmu && !is_software_event(sibling))
return -EINVAL;
@@ -385,6 +461,9 @@ static int dwc_pcie_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
+ if (dwc_pcie_pmu_validate_group(event))
+ return -ENOSPC;
+
event->cpu = pcie_pmu->on_cpu;
return 0;
@@ -400,7 +479,7 @@ static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
local64_set(&hwc->prev_count, 0);
if (type == DWC_PCIE_LANE_EVENT)
- dwc_pcie_pmu_lane_event_enable(pcie_pmu, true);
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, event, true);
else if (type == DWC_PCIE_TIME_BASE_EVENT)
dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
}
@@ -414,12 +493,13 @@ static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
if (event->hw.state & PERF_HES_STOPPED)
return;
+ dwc_pcie_pmu_event_update(event);
+
if (type == DWC_PCIE_LANE_EVENT)
- dwc_pcie_pmu_lane_event_enable(pcie_pmu, false);
+ dwc_pcie_pmu_lane_event_enable(pcie_pmu, event, false);
else if (type == DWC_PCIE_TIME_BASE_EVENT)
dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
- dwc_pcie_pmu_event_update(event);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
@@ -434,14 +514,17 @@ static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
u16 ras_des_offset = pcie_pmu->ras_des_offset;
u32 ctrl;
- /* one counter for each type and it is in use */
- if (pcie_pmu->event[type])
- return -ENOSPC;
-
- pcie_pmu->event[type] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (type == DWC_PCIE_LANE_EVENT) {
+ int event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ int group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id) -
+ DWC_PCIE_LANE_GROUP_6;
+
+ if (test_and_set_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ pcie_pmu->lane_events))
+ return -ENOSPC;
+
/* EVENT_COUNTER_DATA_REG needs clear manually */
ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
@@ -450,6 +533,11 @@ static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
ctrl);
} else if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ if (pcie_pmu->time_based_event)
+ return -ENOSPC;
+
+ pcie_pmu->time_based_event = event;
+
/*
* TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
* use it with any manually controlled duration. And it is
@@ -478,7 +566,18 @@ static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
perf_event_update_userpage(event);
- pcie_pmu->event[type] = NULL;
+
+ if (type == DWC_PCIE_TIME_BASE_EVENT) {
+ pcie_pmu->time_based_event = NULL;
+ } else {
+ int event_id = DWC_PCIE_EVENT_ID(event);
+ int event_nr = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_EVID, event_id);
+ int group = FIELD_GET(DWC_PCIE_CNT_EVENT_SEL_GROUP, event_id) -
+ DWC_PCIE_LANE_GROUP_6;
+
+ clear_bit(group * DWC_PCIE_LANE_MAX_EVENTS_PER_GROUP + event_nr,
+ pcie_pmu->lane_events);
+ }
}
static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 267754fdf581..7050b48c0467 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -104,6 +104,11 @@ static const struct imx_ddr_devtype_data imx93_devtype_data = {
.filter_ver = DDR_PERF_AXI_FILTER_V1
};
+static const struct imx_ddr_devtype_data imx94_devtype_data = {
+ .identifier = "imx94",
+ .filter_ver = DDR_PERF_AXI_FILTER_V2
+};
+
static const struct imx_ddr_devtype_data imx95_devtype_data = {
.identifier = "imx95",
.filter_ver = DDR_PERF_AXI_FILTER_V2
@@ -122,6 +127,7 @@ static inline bool axi_filter_v2(struct ddr_pmu *pmu)
static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
{ .compatible = "fsl,imx91-ddr-pmu", .data = &imx91_devtype_data },
{ .compatible = "fsl,imx93-ddr-pmu", .data = &imx93_devtype_data },
+ { .compatible = "fsl,imx94-ddr-pmu", .data = &imx94_devtype_data },
{ .compatible = "fsl,imx95-ddr-pmu", .data = &imx95_devtype_data },
{ /* sentinel */ }
};
diff --git a/drivers/perf/fujitsu_uncore_pmu.c b/drivers/perf/fujitsu_uncore_pmu.c
new file mode 100644
index 000000000000..c3c6f56474ad
--- /dev/null
+++ b/drivers/perf/fujitsu_uncore_pmu.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Uncore PMUs in Fujitsu chips.
+ *
+ * See Documentation/admin-guide/perf/fujitsu_uncore_pmu.rst for more details.
+ *
+ * Copyright (c) 2025 Fujitsu. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/* Number of counters on each PMU */
+#define MAC_NUM_COUNTERS 8
+#define PCI_NUM_COUNTERS 8
+/* Mask for the event type field within perf_event_attr.config and EVTYPE reg */
+#define UNCORE_EVTYPE_MASK 0xFF
+
+/* Perfmon registers */
+#define PM_EVCNTR(__cntr) (0x000 + (__cntr) * 8)
+#define PM_CNTCTL(__cntr) (0x100 + (__cntr) * 8)
+#define PM_CNTCTL_RESET 0
+#define PM_EVTYPE(__cntr) (0x200 + (__cntr) * 8)
+#define PM_EVTYPE_EVSEL(__val) FIELD_GET(UNCORE_EVTYPE_MASK, __val)
+#define PM_CR 0x400
+#define PM_CR_RESET BIT(1)
+#define PM_CR_ENABLE BIT(0)
+#define PM_CNTENSET 0x410
+#define PM_CNTENSET_IDX(__cntr) BIT(__cntr)
+#define PM_CNTENCLR 0x418
+#define PM_CNTENCLR_IDX(__cntr) BIT(__cntr)
+#define PM_CNTENCLR_RESET 0xFF
+#define PM_INTENSET 0x420
+#define PM_INTENSET_IDX(__cntr) BIT(__cntr)
+#define PM_INTENCLR 0x428
+#define PM_INTENCLR_IDX(__cntr) BIT(__cntr)
+#define PM_INTENCLR_RESET 0xFF
+#define PM_OVSR 0x440
+#define PM_OVSR_OVSRCLR_RESET 0xFF
+
+enum fujitsu_uncore_pmu {
+ FUJITSU_UNCORE_PMU_MAC = 1,
+ FUJITSU_UNCORE_PMU_PCI = 2,
+};
+
+struct uncore_pmu {
+ int num_counters;
+ struct pmu pmu;
+ struct hlist_node node;
+ void __iomem *regs;
+ struct perf_event **events;
+ unsigned long *used_mask;
+ int cpu;
+ int irq;
+ struct device *dev;
+};
+
+#define to_uncore_pmu(p) (container_of(p, struct uncore_pmu, pmu))
+
+static int uncore_pmu_cpuhp_state;
+
+static void fujitsu_uncore_counter_start(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ /* Initialize the hardware counter and reset prev_count*/
+ local64_set(&event->hw.prev_count, 0);
+ writeq_relaxed(0, uncorepmu->regs + PM_EVCNTR(idx));
+
+ /* Set the event type */
+ writeq_relaxed(PM_EVTYPE_EVSEL(event->attr.config), uncorepmu->regs + PM_EVTYPE(idx));
+
+ /* Enable interrupt generation by this counter */
+ writeq_relaxed(PM_INTENSET_IDX(idx), uncorepmu->regs + PM_INTENSET);
+
+ /* Finally, enable the counter */
+ writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(idx));
+ writeq_relaxed(PM_CNTENSET_IDX(idx), uncorepmu->regs + PM_CNTENSET);
+}
+
+static void fujitsu_uncore_counter_stop(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+
+ /* Disable the counter */
+ writeq_relaxed(PM_CNTENCLR_IDX(idx), uncorepmu->regs + PM_CNTENCLR);
+
+ /* Disable interrupt generation by this counter */
+ writeq_relaxed(PM_INTENCLR_IDX(idx), uncorepmu->regs + PM_INTENCLR);
+}
+
+static void fujitsu_uncore_counter_update(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ int idx = event->hw.idx;
+ u64 prev, new;
+
+ do {
+ prev = local64_read(&event->hw.prev_count);
+ new = readq_relaxed(uncorepmu->regs + PM_EVCNTR(idx));
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
+
+ local64_add(new - prev, &event->count);
+}
+
+static inline void fujitsu_uncore_init(struct uncore_pmu *uncorepmu)
+{
+ int i;
+
+ writeq_relaxed(PM_CR_RESET, uncorepmu->regs + PM_CR);
+
+ writeq_relaxed(PM_CNTENCLR_RESET, uncorepmu->regs + PM_CNTENCLR);
+ writeq_relaxed(PM_INTENCLR_RESET, uncorepmu->regs + PM_INTENCLR);
+ writeq_relaxed(PM_OVSR_OVSRCLR_RESET, uncorepmu->regs + PM_OVSR);
+
+ for (i = 0; i < uncorepmu->num_counters; ++i) {
+ writeq_relaxed(PM_CNTCTL_RESET, uncorepmu->regs + PM_CNTCTL(i));
+ writeq_relaxed(PM_EVTYPE_EVSEL(0), uncorepmu->regs + PM_EVTYPE(i));
+ }
+ writeq_relaxed(PM_CR_ENABLE, uncorepmu->regs + PM_CR);
+}
+
+static irqreturn_t fujitsu_uncore_handle_irq(int irq_num, void *data)
+{
+ struct uncore_pmu *uncorepmu = data;
+ /* Read the overflow status register */
+ long status = readq_relaxed(uncorepmu->regs + PM_OVSR);
+ int idx;
+
+ if (status == 0)
+ return IRQ_NONE;
+
+ /* Clear the bits we read on the overflow status register */
+ writeq_relaxed(status, uncorepmu->regs + PM_OVSR);
+
+ for_each_set_bit(idx, &status, uncorepmu->num_counters) {
+ struct perf_event *event;
+
+ event = uncorepmu->events[idx];
+ if (!event)
+ continue;
+
+ fujitsu_uncore_counter_update(event);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void fujitsu_uncore_pmu_enable(struct pmu *pmu)
+{
+ writeq_relaxed(PM_CR_ENABLE, to_uncore_pmu(pmu)->regs + PM_CR);
+}
+
+static void fujitsu_uncore_pmu_disable(struct pmu *pmu)
+{
+ writeq_relaxed(0, to_uncore_pmu(pmu)->regs + PM_CR);
+}
+
+static bool fujitsu_uncore_validate_event_group(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct perf_event *leader = event->group_leader;
+ struct perf_event *sibling;
+ int counters = 1;
+
+ if (leader == event)
+ return true;
+
+ if (leader->pmu == event->pmu)
+ counters++;
+
+ for_each_sibling_event(sibling, leader) {
+ if (sibling->pmu == event->pmu)
+ counters++;
+ }
+
+ /*
+ * If the group requires more counters than the HW has, it
+ * cannot ever be scheduled.
+ */
+ return counters <= uncorepmu->num_counters;
+}
+
+static int fujitsu_uncore_event_init(struct perf_event *event)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Is the event for this PMU? */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * Sampling not supported since these events are not
+ * core-attributable.
+ */
+ if (is_sampling_event(event))
+ return -EINVAL;
+
+ /*
+ * Task mode not available, we run the counters as socket counters,
+ * not attributable to any CPU and therefore cannot attribute per-task.
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ /* Validate the group */
+ if (!fujitsu_uncore_validate_event_group(event))
+ return -EINVAL;
+
+ hwc->idx = -1;
+
+ event->cpu = uncorepmu->cpu;
+
+ return 0;
+}
+
+static void fujitsu_uncore_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->state = 0;
+ fujitsu_uncore_counter_start(event);
+}
+
+static void fujitsu_uncore_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->state & PERF_HES_STOPPED)
+ return;
+
+ fujitsu_uncore_counter_stop(event);
+ if (flags & PERF_EF_UPDATE)
+ fujitsu_uncore_counter_update(event);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+}
+
+static int fujitsu_uncore_event_add(struct perf_event *event, int flags)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ /* Try to allocate a counter. */
+ idx = bitmap_find_free_region(uncorepmu->used_mask, uncorepmu->num_counters, 0);
+ if (idx < 0)
+ /* The counters are all in use. */
+ return -EAGAIN;
+
+ hwc->idx = idx;
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ uncorepmu->events[idx] = event;
+
+ if (flags & PERF_EF_START)
+ fujitsu_uncore_event_start(event, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+ return 0;
+}
+
+static void fujitsu_uncore_event_del(struct perf_event *event, int flags)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Stop and clean up */
+ fujitsu_uncore_event_stop(event, flags | PERF_EF_UPDATE);
+ uncorepmu->events[hwc->idx] = NULL;
+ bitmap_release_region(uncorepmu->used_mask, hwc->idx, 0);
+
+ /* Propagate changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+}
+
+static void fujitsu_uncore_event_read(struct perf_event *event)
+{
+ fujitsu_uncore_counter_update(event);
+}
+
+#define UNCORE_PMU_FORMAT_ATTR(_name, _config) \
+ (&((struct dev_ext_attribute[]) { \
+ { .attr = __ATTR(_name, 0444, device_show_string, NULL), \
+ .var = (void *)_config, } \
+ })[0].attr.attr)
+
+static struct attribute *fujitsu_uncore_pmu_formats[] = {
+ UNCORE_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_pmu_format_group = {
+ .name = "format",
+ .attrs = fujitsu_uncore_pmu_formats,
+};
+
+static ssize_t fujitsu_uncore_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr;
+
+ pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
+ return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id);
+}
+
+#define MAC_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
+
+static struct attribute *fujitsu_uncore_mac_pmu_events[] = {
+ MAC_EVENT_ATTR(cycles, 0x00),
+ MAC_EVENT_ATTR(read-count, 0x10),
+ MAC_EVENT_ATTR(read-count-request, 0x11),
+ MAC_EVENT_ATTR(read-count-return, 0x12),
+ MAC_EVENT_ATTR(read-count-request-pftgt, 0x13),
+ MAC_EVENT_ATTR(read-count-request-normal, 0x14),
+ MAC_EVENT_ATTR(read-count-return-pftgt-hit, 0x15),
+ MAC_EVENT_ATTR(read-count-return-pftgt-miss, 0x16),
+ MAC_EVENT_ATTR(read-wait, 0x17),
+ MAC_EVENT_ATTR(write-count, 0x20),
+ MAC_EVENT_ATTR(write-count-write, 0x21),
+ MAC_EVENT_ATTR(write-count-pwrite, 0x22),
+ MAC_EVENT_ATTR(memory-read-count, 0x40),
+ MAC_EVENT_ATTR(memory-write-count, 0x50),
+ MAC_EVENT_ATTR(memory-pwrite-count, 0x60),
+ MAC_EVENT_ATTR(ea-mac, 0x80),
+ MAC_EVENT_ATTR(ea-memory, 0x90),
+ MAC_EVENT_ATTR(ea-memory-mac-write, 0x92),
+ MAC_EVENT_ATTR(ea-ha, 0xa0),
+ NULL
+};
+
+#define PCI_EVENT_ATTR(_name, _id) \
+ PMU_EVENT_ATTR_ID(_name, fujitsu_uncore_pmu_event_show, _id)
+
+static struct attribute *fujitsu_uncore_pci_pmu_events[] = {
+ PCI_EVENT_ATTR(pci-port0-cycles, 0x00),
+ PCI_EVENT_ATTR(pci-port0-read-count, 0x10),
+ PCI_EVENT_ATTR(pci-port0-read-count-bus, 0x14),
+ PCI_EVENT_ATTR(pci-port0-write-count, 0x20),
+ PCI_EVENT_ATTR(pci-port0-write-count-bus, 0x24),
+ PCI_EVENT_ATTR(pci-port1-cycles, 0x40),
+ PCI_EVENT_ATTR(pci-port1-read-count, 0x50),
+ PCI_EVENT_ATTR(pci-port1-read-count-bus, 0x54),
+ PCI_EVENT_ATTR(pci-port1-write-count, 0x60),
+ PCI_EVENT_ATTR(pci-port1-write-count-bus, 0x64),
+ PCI_EVENT_ATTR(ea-pci, 0x80),
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_mac_pmu_events_group = {
+ .name = "events",
+ .attrs = fujitsu_uncore_mac_pmu_events,
+};
+
+static const struct attribute_group fujitsu_uncore_pci_pmu_events_group = {
+ .name = "events",
+ .attrs = fujitsu_uncore_pci_pmu_events,
+};
+
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uncore_pmu *uncorepmu = to_uncore_pmu(dev_get_drvdata(dev));
+
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(uncorepmu->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *fujitsu_uncore_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static const struct attribute_group fujitsu_uncore_pmu_cpumask_attr_group = {
+ .attrs = fujitsu_uncore_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *fujitsu_uncore_mac_pmu_attr_grps[] = {
+ &fujitsu_uncore_pmu_format_group,
+ &fujitsu_uncore_mac_pmu_events_group,
+ &fujitsu_uncore_pmu_cpumask_attr_group,
+ NULL
+};
+
+static const struct attribute_group *fujitsu_uncore_pci_pmu_attr_grps[] = {
+ &fujitsu_uncore_pmu_format_group,
+ &fujitsu_uncore_pci_pmu_events_group,
+ &fujitsu_uncore_pmu_cpumask_attr_group,
+ NULL
+};
+
+static void fujitsu_uncore_pmu_migrate(struct uncore_pmu *uncorepmu, unsigned int cpu)
+{
+ perf_pmu_migrate_context(&uncorepmu->pmu, uncorepmu->cpu, cpu);
+ irq_set_affinity(uncorepmu->irq, cpumask_of(cpu));
+ uncorepmu->cpu = cpu;
+}
+
+static int fujitsu_uncore_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct uncore_pmu *uncorepmu;
+ int node;
+
+ uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
+ node = dev_to_node(uncorepmu->dev);
+ if (cpu_to_node(uncorepmu->cpu) != node && cpu_to_node(cpu) == node)
+ fujitsu_uncore_pmu_migrate(uncorepmu, cpu);
+
+ return 0;
+}
+
+static int fujitsu_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
+{
+ struct uncore_pmu *uncorepmu;
+ unsigned int target;
+ int node;
+
+ uncorepmu = hlist_entry_safe(cpuhp_node, struct uncore_pmu, node);
+ if (cpu != uncorepmu->cpu)
+ return 0;
+
+ node = dev_to_node(uncorepmu->dev);
+ target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target < nr_cpu_ids)
+ fujitsu_uncore_pmu_migrate(uncorepmu, target);
+
+ return 0;
+}
+
+static int fujitsu_uncore_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ unsigned long device_type = (unsigned long)device_get_match_data(dev);
+ const struct attribute_group **attr_groups;
+ struct uncore_pmu *uncorepmu;
+ struct resource *memrc;
+ size_t alloc_size;
+ char *name;
+ int ret;
+ int irq;
+ u64 uid;
+
+ ret = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &uid);
+ if (ret)
+ return dev_err_probe(dev, ret, "unable to read ACPI uid\n");
+
+ uncorepmu = devm_kzalloc(dev, sizeof(*uncorepmu), GFP_KERNEL);
+ if (!uncorepmu)
+ return -ENOMEM;
+ uncorepmu->dev = dev;
+ uncorepmu->cpu = cpumask_local_spread(0, dev_to_node(dev));
+ platform_set_drvdata(pdev, uncorepmu);
+
+ switch (device_type) {
+ case FUJITSU_UNCORE_PMU_MAC:
+ uncorepmu->num_counters = MAC_NUM_COUNTERS;
+ attr_groups = fujitsu_uncore_mac_pmu_attr_grps;
+ name = devm_kasprintf(dev, GFP_KERNEL, "mac_iod%llu_mac%llu_ch%llu",
+ (uid >> 8) & 0xF, (uid >> 4) & 0xF, uid & 0xF);
+ break;
+ case FUJITSU_UNCORE_PMU_PCI:
+ uncorepmu->num_counters = PCI_NUM_COUNTERS;
+ attr_groups = fujitsu_uncore_pci_pmu_attr_grps;
+ name = devm_kasprintf(dev, GFP_KERNEL, "pci_iod%llu_pci%llu",
+ (uid >> 4) & 0xF, uid & 0xF);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "illegal device type: %lu\n", device_type);
+ }
+ if (!name)
+ return -ENOMEM;
+
+ uncorepmu->pmu = (struct pmu) {
+ .parent = dev,
+ .task_ctx_nr = perf_invalid_context,
+
+ .attr_groups = attr_groups,
+
+ .pmu_enable = fujitsu_uncore_pmu_enable,
+ .pmu_disable = fujitsu_uncore_pmu_disable,
+ .event_init = fujitsu_uncore_event_init,
+ .add = fujitsu_uncore_event_add,
+ .del = fujitsu_uncore_event_del,
+ .start = fujitsu_uncore_event_start,
+ .stop = fujitsu_uncore_event_stop,
+ .read = fujitsu_uncore_event_read,
+
+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
+ };
+
+ alloc_size = sizeof(uncorepmu->events[0]) * uncorepmu->num_counters;
+ uncorepmu->events = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!uncorepmu->events)
+ return -ENOMEM;
+
+ alloc_size = sizeof(uncorepmu->used_mask[0]) * BITS_TO_LONGS(uncorepmu->num_counters);
+ uncorepmu->used_mask = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!uncorepmu->used_mask)
+ return -ENOMEM;
+
+ uncorepmu->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &memrc);
+ if (IS_ERR(uncorepmu->regs))
+ return PTR_ERR(uncorepmu->regs);
+
+ fujitsu_uncore_init(uncorepmu);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, fujitsu_uncore_handle_irq,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ name, uncorepmu);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request IRQ:%d\n", irq);
+
+ ret = irq_set_affinity(irq, cpumask_of(uncorepmu->cpu));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set irq affinity:%d\n", irq);
+
+ uncorepmu->irq = irq;
+
+ /* Add this instance to the list used by the offline callback */
+ ret = cpuhp_state_add_instance(uncore_pmu_cpuhp_state, &uncorepmu->node);
+ if (ret)
+ return dev_err_probe(dev, ret, "Error registering hotplug");
+
+ ret = perf_pmu_register(&uncorepmu->pmu, name, -1);
+ if (ret < 0) {
+ cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
+ return dev_err_probe(dev, ret, "Failed to register %s PMU\n", name);
+ }
+
+ dev_dbg(dev, "Registered %s, type: %d\n", name, uncorepmu->pmu.type);
+
+ return 0;
+}
+
+static void fujitsu_uncore_pmu_remove(struct platform_device *pdev)
+{
+ struct uncore_pmu *uncorepmu = platform_get_drvdata(pdev);
+
+ writeq_relaxed(0, uncorepmu->regs + PM_CR);
+
+ perf_pmu_unregister(&uncorepmu->pmu);
+ cpuhp_state_remove_instance_nocalls(uncore_pmu_cpuhp_state, &uncorepmu->node);
+}
+
+static const struct acpi_device_id fujitsu_uncore_pmu_acpi_match[] = {
+ { "FUJI200C", FUJITSU_UNCORE_PMU_MAC },
+ { "FUJI200D", FUJITSU_UNCORE_PMU_PCI },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, fujitsu_uncore_pmu_acpi_match);
+
+static struct platform_driver fujitsu_uncore_pmu_driver = {
+ .driver = {
+ .name = "fujitsu-uncore-pmu",
+ .acpi_match_table = fujitsu_uncore_pmu_acpi_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = fujitsu_uncore_pmu_probe,
+ .remove = fujitsu_uncore_pmu_remove,
+};
+
+static int __init fujitsu_uncore_pmu_init(void)
+{
+ int ret;
+
+ /* Install a hook to update the reader CPU in case it goes offline */
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
+ "perf/fujitsu/uncore:online",
+ fujitsu_uncore_pmu_online_cpu,
+ fujitsu_uncore_pmu_offline_cpu);
+ if (ret < 0)
+ return ret;
+
+ uncore_pmu_cpuhp_state = ret;
+
+ ret = platform_driver_register(&fujitsu_uncore_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
+
+ return ret;
+}
+
+static void __exit fujitsu_uncore_pmu_exit(void)
+{
+ platform_driver_unregister(&fujitsu_uncore_pmu_driver);
+ cpuhp_remove_multi_state(uncore_pmu_cpuhp_state);
+}
+
+module_init(fujitsu_uncore_pmu_init);
+module_exit(fujitsu_uncore_pmu_exit);
+
+MODULE_AUTHOR("Koichi Okuno <fj2767dz@fujitsu.com>");
+MODULE_DESCRIPTION("Fujitsu Uncore PMU driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/perf/hisilicon/Makefile b/drivers/perf/hisilicon/Makefile
index 48dcc8381ea7..186be3d02238 100644
--- a/drivers/perf/hisilicon/Makefile
+++ b/drivers/perf/hisilicon/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_HISI_PMU) += hisi_uncore_pmu.o hisi_uncore_l3c_pmu.o \
hisi_uncore_hha_pmu.o hisi_uncore_ddrc_pmu.o hisi_uncore_sllc_pmu.o \
- hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o
+ hisi_uncore_pa_pmu.o hisi_uncore_cpa_pmu.o hisi_uncore_uc_pmu.o \
+ hisi_uncore_noc_pmu.o hisi_uncore_mn_pmu.o
obj-$(CONFIG_HISI_PCIE_PMU) += hisi_pcie_pmu.o
obj-$(CONFIG_HNS3_PMU) += hns3_pmu.o
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 412fc3a97963..bbd81a43047d 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -39,6 +39,7 @@
/* L3C has 8-counters */
#define L3C_NR_COUNTERS 0x8
+#define L3C_MAX_EXT 2
#define L3C_PERF_CTRL_EN 0x10000
#define L3C_TRACETAG_EN BIT(31)
@@ -55,59 +56,152 @@
#define L3C_V1_NR_EVENTS 0x59
#define L3C_V2_NR_EVENTS 0xFF
-HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config1, 7, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(ext, config, 17, 16);
HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_req, config1, 10, 8);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_cfg, config1, 15, 11);
HISI_PMU_EVENT_ATTR_EXTRACTOR(datasrc_skt, config1, 16, 16);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_core, config2, 15, 0);
-static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+struct hisi_l3c_pmu {
+ struct hisi_pmu l3c_pmu;
+
+ /* MMIO and IRQ resources for extension events */
+ void __iomem *ext_base[L3C_MAX_EXT];
+ int ext_irq[L3C_MAX_EXT];
+ int ext_num;
+};
+
+#define to_hisi_l3c_pmu(_l3c_pmu) \
+ container_of(_l3c_pmu, struct hisi_l3c_pmu, l3c_pmu)
+
+/*
+ * The hardware counter idx used in counter enable/disable,
+ * interrupt enable/disable and status check, etc.
+ */
+#define L3C_HW_IDX(_cntr_idx) ((_cntr_idx) % L3C_NR_COUNTERS)
+
+/* Range of ext counters in used mask. */
+#define L3C_CNTR_EXT_L(_ext) (((_ext) + 1) * L3C_NR_COUNTERS)
+#define L3C_CNTR_EXT_H(_ext) (((_ext) + 2) * L3C_NR_COUNTERS)
+
+struct hisi_l3c_pmu_ext {
+ bool support_ext;
+};
+
+static bool support_ext(struct hisi_l3c_pmu *pmu)
+{
+ struct hisi_l3c_pmu_ext *l3c_pmu_ext = pmu->l3c_pmu.dev_info->private;
+
+ return l3c_pmu_ext->support_ext;
+}
+
+static int hisi_l3c_pmu_get_event_idx(struct perf_event *event)
{
struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ int ext = hisi_get_ext(event);
+ int idx;
+
+ /*
+ * For an L3C PMU that supports extension events, we can monitor
+ * maximum 2 * num_counters to 3 * num_counters events, depending on
+ * the number of ext regions supported by hardware. Thus use bit
+ * [0, num_counters - 1] for normal events and bit
+ * [ext * num_counters, (ext + 1) * num_counters - 1] for extension
+ * events. The idx allocation will keep unchanged for normal events and
+ * we can also use the idx to distinguish whether it's an extension
+ * event or not.
+ *
+ * Since normal events and extension events locates on the different
+ * address space, save the base address to the event->hw.event_base.
+ */
+ if (ext && !support_ext(hisi_l3c_pmu))
+ return -EOPNOTSUPP;
+
+ if (ext)
+ event->hw.event_base = (unsigned long)hisi_l3c_pmu->ext_base[ext - 1];
+ else
+ event->hw.event_base = (unsigned long)l3c_pmu->base;
+
+ ext -= 1;
+ idx = find_next_zero_bit(used_mask, L3C_CNTR_EXT_H(ext), L3C_CNTR_EXT_L(ext));
+
+ if (idx >= L3C_CNTR_EXT_H(ext))
+ return -EAGAIN;
+
+ set_bit(idx, used_mask);
+
+ return idx;
+}
+
+static u32 hisi_l3c_pmu_event_readl(struct hw_perf_event *hwc, u32 reg)
+{
+ return readl((void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_event_writel(struct hw_perf_event *hwc, u32 reg, u32 val)
+{
+ writel(val, (void __iomem *)hwc->event_base + reg);
+}
+
+static u64 hisi_l3c_pmu_event_readq(struct hw_perf_event *hwc, u32 reg)
+{
+ return readq((void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_event_writeq(struct hw_perf_event *hwc, u32 reg, u64 val)
+{
+ writeq(val, (void __iomem *)hwc->event_base + reg);
+}
+
+static void hisi_l3c_pmu_config_req_tracetag(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Set request-type for tracetag */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val |= tt_req << L3C_TRACETAG_REQ_SHIFT;
val |= L3C_TRACETAG_REQ_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
/* Enable request-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val |= L3C_TRACETAG_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_req_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 tt_req = hisi_get_tt_req(event);
if (tt_req) {
u32 val;
/* Clear request-type */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val &= ~(tt_req << L3C_TRACETAG_REQ_SHIFT);
val &= ~L3C_TRACETAG_REQ_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
/* Disable request-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val &= ~L3C_TRACETAG_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
}
}
static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
u32 reg, reg_idx, shift, val;
- int idx = hwc->idx;
+ int idx = L3C_HW_IDX(hwc->idx);
/*
* Select the appropriate datasource register(L3C_DATSRC_TYPE0/1).
@@ -120,15 +214,15 @@ static void hisi_l3c_pmu_write_ds(struct perf_event *event, u32 ds_cfg)
reg_idx = idx % 4;
shift = 8 * reg_idx;
- val = readl(l3c_pmu->base + reg);
+ val = hisi_l3c_pmu_event_readl(hwc, reg);
val &= ~(L3C_DATSRC_MASK << shift);
val |= ds_cfg << shift;
- writel(val, l3c_pmu->base + reg);
+ hisi_l3c_pmu_event_writel(hwc, reg, val);
}
static void hisi_l3c_pmu_config_ds(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
@@ -138,15 +232,15 @@ static void hisi_l3c_pmu_config_ds(struct perf_event *event)
if (ds_skt) {
u32 val;
- val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
val |= L3C_DATSRC_SKT_EN;
- writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 ds_cfg = hisi_get_datasrc_cfg(event);
u32 ds_skt = hisi_get_datasrc_skt(event);
@@ -156,57 +250,63 @@ static void hisi_l3c_pmu_clear_ds(struct perf_event *event)
if (ds_skt) {
u32 val;
- val = readl(l3c_pmu->base + L3C_DATSRC_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_DATSRC_CTRL);
val &= ~L3C_DATSRC_SKT_EN;
- writel(val, l3c_pmu->base + L3C_DATSRC_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_DATSRC_CTRL, val);
}
}
static void hisi_l3c_pmu_config_core_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 core = hisi_get_tt_core(event);
if (core) {
u32 val;
/* Config and enable core information */
- writel(core, l3c_pmu->base + L3C_CORE_CTRL);
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, core);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val |= L3C_CORE_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
/* Enable core-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val |= L3C_TRACETAG_CORE_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
}
}
static void hisi_l3c_pmu_clear_core_tracetag(struct perf_event *event)
{
- struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
u32 core = hisi_get_tt_core(event);
if (core) {
u32 val;
/* Clear core information */
- writel(L3C_COER_NONE, l3c_pmu->base + L3C_CORE_CTRL);
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_CORE_CTRL, L3C_COER_NONE);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_PERF_CTRL);
val &= ~L3C_CORE_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_PERF_CTRL, val);
/* Disable core-tracetag statistics */
- val = readl(l3c_pmu->base + L3C_TRACETAG_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_TRACETAG_CTRL);
val &= ~L3C_TRACETAG_CORE_EN;
- writel(val, l3c_pmu->base + L3C_TRACETAG_CTRL);
+ hisi_l3c_pmu_event_writel(hwc, L3C_TRACETAG_CTRL, val);
}
}
+static bool hisi_l3c_pmu_have_filter(struct perf_event *event)
+{
+ return hisi_get_tt_req(event) || hisi_get_tt_core(event) ||
+ hisi_get_datasrc_cfg(event) || hisi_get_datasrc_skt(event);
+}
+
static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
{
- if (event->attr.config1 != 0x0) {
+ if (hisi_l3c_pmu_have_filter(event)) {
hisi_l3c_pmu_config_req_tracetag(event);
hisi_l3c_pmu_config_core_tracetag(event);
hisi_l3c_pmu_config_ds(event);
@@ -215,38 +315,53 @@ static void hisi_l3c_pmu_enable_filter(struct perf_event *event)
static void hisi_l3c_pmu_disable_filter(struct perf_event *event)
{
- if (event->attr.config1 != 0x0) {
+ if (hisi_l3c_pmu_have_filter(event)) {
hisi_l3c_pmu_clear_ds(event);
hisi_l3c_pmu_clear_core_tracetag(event);
hisi_l3c_pmu_clear_req_tracetag(event);
}
}
+static int hisi_l3c_pmu_check_filter(struct perf_event *event)
+{
+ struct hisi_pmu *l3c_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ext = hisi_get_ext(event);
+
+ if (ext < 0 || ext > hisi_l3c_pmu->ext_num)
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* Select the counter register offset using the counter index
*/
static u32 hisi_l3c_pmu_get_counter_offset(int cntr_idx)
{
- return (L3C_CNTR0_LOWER + (cntr_idx * 8));
+ return L3C_CNTR0_LOWER + L3C_HW_IDX(cntr_idx) * 8;
}
static u64 hisi_l3c_pmu_read_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc)
{
- return readq(l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
+ return hisi_l3c_pmu_event_readq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx));
}
static void hisi_l3c_pmu_write_counter(struct hisi_pmu *l3c_pmu,
struct hw_perf_event *hwc, u64 val)
{
- writeq(val, l3c_pmu->base + hisi_l3c_pmu_get_counter_offset(hwc->idx));
+ hisi_l3c_pmu_event_writeq(hwc, hisi_l3c_pmu_get_counter_offset(hwc->idx), val);
}
static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
u32 type)
{
+ struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
u32 reg, reg_idx, shift, val;
+ idx = L3C_HW_IDX(idx);
+
/*
* Select the appropriate event select register(L3C_EVENT_TYPE0/1).
* There are 2 event select registers for the 8 hardware counters.
@@ -259,36 +374,72 @@ static void hisi_l3c_pmu_write_evtype(struct hisi_pmu *l3c_pmu, int idx,
shift = 8 * reg_idx;
/* Write event code to L3C_EVENT_TYPEx Register */
- val = readl(l3c_pmu->base + reg);
+ val = hisi_l3c_pmu_event_readl(hwc, reg);
val &= ~(L3C_EVTYPE_NONE << shift);
- val |= (type << shift);
- writel(val, l3c_pmu->base + reg);
+ val |= type << shift;
+ hisi_l3c_pmu_event_writel(hwc, reg, val);
}
static void hisi_l3c_pmu_start_counters(struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
u32 val;
+ int i;
/*
- * Set perf_enable bit in L3C_PERF_CTRL register to start counting
- * for all enabled counters.
+ * Check if any counter belongs to the normal range (instead of ext
+ * range). If so, enable it.
*/
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
- val |= L3C_PERF_CTRL_EN;
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ if (used_cntr < L3C_NR_COUNTERS) {
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+
+ /* If not, do enable it on ext ranges. */
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ /* Find used counter in this ext range, skip the range if not. */
+ used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
+ if (used_cntr >= L3C_CNTR_EXT_H(i))
+ continue;
+
+ val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ val |= L3C_PERF_CTRL_EN;
+ writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ }
}
static void hisi_l3c_pmu_stop_counters(struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ unsigned long *used_mask = l3c_pmu->pmu_events.used_mask;
+ unsigned long used_cntr = find_first_bit(used_mask, l3c_pmu->num_counters);
u32 val;
+ int i;
/*
- * Clear perf_enable bit in L3C_PERF_CTRL register to stop counting
- * for all enabled counters.
+ * Check if any counter belongs to the normal range (instead of ext
+ * range). If so, stop it.
*/
- val = readl(l3c_pmu->base + L3C_PERF_CTRL);
- val &= ~(L3C_PERF_CTRL_EN);
- writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ if (used_cntr < L3C_NR_COUNTERS) {
+ val = readl(l3c_pmu->base + L3C_PERF_CTRL);
+ val &= ~L3C_PERF_CTRL_EN;
+ writel(val, l3c_pmu->base + L3C_PERF_CTRL);
+ }
+
+ /* If not, do stop it on ext ranges. */
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ /* Find used counter in this ext range, skip the range if not. */
+ used_cntr = find_next_bit(used_mask, L3C_CNTR_EXT_H(i), L3C_CNTR_EXT_L(i));
+ if (used_cntr >= L3C_CNTR_EXT_H(i))
+ continue;
+
+ val = readl(hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ val &= ~L3C_PERF_CTRL_EN;
+ writel(val, hisi_l3c_pmu->ext_base[i] + L3C_PERF_CTRL);
+ }
}
static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
@@ -297,9 +448,9 @@ static void hisi_l3c_pmu_enable_counter(struct hisi_pmu *l3c_pmu,
u32 val;
/* Enable counter index in L3C_EVENT_CTRL register */
- val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
- val |= (1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
+ val |= 1 << L3C_HW_IDX(hwc->idx);
+ hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
}
static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
@@ -308,9 +459,9 @@ static void hisi_l3c_pmu_disable_counter(struct hisi_pmu *l3c_pmu,
u32 val;
/* Clear counter index in L3C_EVENT_CTRL register */
- val = readl(l3c_pmu->base + L3C_EVENT_CTRL);
- val &= ~(1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_EVENT_CTRL);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_EVENT_CTRL);
+ val &= ~(1 << L3C_HW_IDX(hwc->idx));
+ hisi_l3c_pmu_event_writel(hwc, L3C_EVENT_CTRL, val);
}
static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
@@ -318,10 +469,10 @@ static void hisi_l3c_pmu_enable_counter_int(struct hisi_pmu *l3c_pmu,
{
u32 val;
- val = readl(l3c_pmu->base + L3C_INT_MASK);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
/* Write 0 to enable interrupt */
- val &= ~(1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_INT_MASK);
+ val &= ~(1 << L3C_HW_IDX(hwc->idx));
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
}
static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
@@ -329,28 +480,37 @@ static void hisi_l3c_pmu_disable_counter_int(struct hisi_pmu *l3c_pmu,
{
u32 val;
- val = readl(l3c_pmu->base + L3C_INT_MASK);
+ val = hisi_l3c_pmu_event_readl(hwc, L3C_INT_MASK);
/* Write 1 to mask interrupt */
- val |= (1 << hwc->idx);
- writel(val, l3c_pmu->base + L3C_INT_MASK);
+ val |= 1 << L3C_HW_IDX(hwc->idx);
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_MASK, val);
}
static u32 hisi_l3c_pmu_get_int_status(struct hisi_pmu *l3c_pmu)
{
- return readl(l3c_pmu->base + L3C_INT_STATUS);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ u32 ext_int, status, status_ext = 0;
+ int i;
+
+ status = readl(l3c_pmu->base + L3C_INT_STATUS);
+
+ if (!support_ext(hisi_l3c_pmu))
+ return status;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ ext_int = readl(hisi_l3c_pmu->ext_base[i] + L3C_INT_STATUS);
+ status_ext |= ext_int << (L3C_NR_COUNTERS * i);
+ }
+
+ return status | (status_ext << L3C_NR_COUNTERS);
}
static void hisi_l3c_pmu_clear_int_status(struct hisi_pmu *l3c_pmu, int idx)
{
- writel(1 << idx, l3c_pmu->base + L3C_INT_CLEAR);
-}
+ struct hw_perf_event *hwc = &l3c_pmu->pmu_events.hw_events[idx]->hw;
-static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
- { "HISI0213", },
- { "HISI0214", },
- {}
-};
-MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+ hisi_l3c_pmu_event_writel(hwc, L3C_INT_CLEAR, 1 << L3C_HW_IDX(idx));
+}
static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
@@ -371,6 +531,10 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return -EINVAL;
}
+ l3c_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!l3c_pmu->dev_info)
+ return -ENODEV;
+
l3c_pmu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(l3c_pmu->base)) {
dev_err(&pdev->dev, "ioremap failed for l3c_pmu resource\n");
@@ -382,6 +546,50 @@ static int hisi_l3c_pmu_init_data(struct platform_device *pdev,
return 0;
}
+static int hisi_l3c_pmu_init_ext(struct hisi_pmu *l3c_pmu, struct platform_device *pdev)
+{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, irq, ext_num, i;
+ char *irqname;
+
+ /* HiSilicon L3C PMU supporting ext should have more than 1 irq resources. */
+ ext_num = platform_irq_count(pdev);
+ if (ext_num < L3C_MAX_EXT)
+ return -ENODEV;
+
+ /*
+ * The number of ext supported equals the number of irq - 1, since one
+ * of the irqs belongs to the normal part of PMU.
+ */
+ hisi_l3c_pmu->ext_num = ext_num - 1;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++) {
+ hisi_l3c_pmu->ext_base[i] = devm_platform_ioremap_resource(pdev, i + 1);
+ if (IS_ERR(hisi_l3c_pmu->ext_base[i]))
+ return PTR_ERR(hisi_l3c_pmu->ext_base[i]);
+
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0)
+ return irq;
+
+ irqname = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s ext%d",
+ dev_name(&pdev->dev), i + 1);
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(&pdev->dev, irq, hisi_uncore_pmu_isr,
+ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ irqname, l3c_pmu);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Fail to request EXT IRQ: %d.\n", irq);
+
+ hisi_l3c_pmu->ext_irq[i] = irq;
+ }
+
+ return 0;
+}
+
static struct attribute *hisi_l3c_pmu_v1_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
NULL,
@@ -394,7 +602,7 @@ static const struct attribute_group hisi_l3c_pmu_v1_format_group = {
static struct attribute *hisi_l3c_pmu_v2_format_attr[] = {
HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
- HISI_PMU_FORMAT_ATTR(tt_core, "config1:0-7"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
HISI_PMU_FORMAT_ATTR(datasrc_cfg, "config1:11-15"),
HISI_PMU_FORMAT_ATTR(datasrc_skt, "config1:16"),
@@ -406,6 +614,19 @@ static const struct attribute_group hisi_l3c_pmu_v2_format_group = {
.attrs = hisi_l3c_pmu_v2_format_attr,
};
+static struct attribute *hisi_l3c_pmu_v3_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(ext, "config:16-17"),
+ HISI_PMU_FORMAT_ATTR(tt_req, "config1:8-10"),
+ HISI_PMU_FORMAT_ATTR(tt_core, "config2:0-15"),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v3_format_group = {
+ .name = "format",
+ .attrs = hisi_l3c_pmu_v3_format_attr,
+};
+
static struct attribute *hisi_l3c_pmu_v1_events_attr[] = {
HISI_PMU_EVENT_ATTR(rd_cpipe, 0x00),
HISI_PMU_EVENT_ATTR(wr_cpipe, 0x01),
@@ -441,6 +662,26 @@ static const struct attribute_group hisi_l3c_pmu_v2_events_group = {
.attrs = hisi_l3c_pmu_v2_events_attr,
};
+static struct attribute *hisi_l3c_pmu_v3_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(rd_spipe, 0x18),
+ HISI_PMU_EVENT_ATTR(rd_hit_spipe, 0x19),
+ HISI_PMU_EVENT_ATTR(wr_spipe, 0x1a),
+ HISI_PMU_EVENT_ATTR(wr_hit_spipe, 0x1b),
+ HISI_PMU_EVENT_ATTR(io_rd_spipe, 0x1c),
+ HISI_PMU_EVENT_ATTR(io_rd_hit_spipe, 0x1d),
+ HISI_PMU_EVENT_ATTR(io_wr_spipe, 0x1e),
+ HISI_PMU_EVENT_ATTR(io_wr_hit_spipe, 0x1f),
+ HISI_PMU_EVENT_ATTR(cycles, 0x7f),
+ HISI_PMU_EVENT_ATTR(l3c_ref, 0xbc),
+ HISI_PMU_EVENT_ATTR(l3c2ring, 0xbd),
+ NULL
+};
+
+static const struct attribute_group hisi_l3c_pmu_v3_events_group = {
+ .name = "events",
+ .attrs = hisi_l3c_pmu_v3_events_attr,
+};
+
static const struct attribute_group *hisi_l3c_pmu_v1_attr_groups[] = {
&hisi_l3c_pmu_v1_format_group,
&hisi_l3c_pmu_v1_events_group,
@@ -457,9 +698,46 @@ static const struct attribute_group *hisi_l3c_pmu_v2_attr_groups[] = {
NULL
};
+static const struct attribute_group *hisi_l3c_pmu_v3_attr_groups[] = {
+ &hisi_l3c_pmu_v3_format_group,
+ &hisi_l3c_pmu_v3_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static struct hisi_l3c_pmu_ext hisi_l3c_pmu_support_ext = {
+ .support_ext = true,
+};
+
+static struct hisi_l3c_pmu_ext hisi_l3c_pmu_not_support_ext = {
+ .support_ext = false,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v1 = {
+ .attr_groups = hisi_l3c_pmu_v1_attr_groups,
+ .counter_bits = 48,
+ .check_event = L3C_V1_NR_EVENTS,
+ .private = &hisi_l3c_pmu_not_support_ext,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v2 = {
+ .attr_groups = hisi_l3c_pmu_v2_attr_groups,
+ .counter_bits = 64,
+ .check_event = L3C_V2_NR_EVENTS,
+ .private = &hisi_l3c_pmu_not_support_ext,
+};
+
+static const struct hisi_pmu_dev_info hisi_l3c_pmu_v3 = {
+ .attr_groups = hisi_l3c_pmu_v3_attr_groups,
+ .counter_bits = 64,
+ .check_event = L3C_V2_NR_EVENTS,
+ .private = &hisi_l3c_pmu_support_ext,
+};
+
static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.write_evtype = hisi_l3c_pmu_write_evtype,
- .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .get_event_idx = hisi_l3c_pmu_get_event_idx,
.start_counters = hisi_l3c_pmu_start_counters,
.stop_counters = hisi_l3c_pmu_stop_counters,
.enable_counter = hisi_l3c_pmu_enable_counter,
@@ -472,11 +750,14 @@ static const struct hisi_uncore_ops hisi_uncore_l3c_ops = {
.clear_int_status = hisi_l3c_pmu_clear_int_status,
.enable_filter = hisi_l3c_pmu_enable_filter,
.disable_filter = hisi_l3c_pmu_disable_filter,
+ .check_filter = hisi_l3c_pmu_check_filter,
};
static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
struct hisi_pmu *l3c_pmu)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ struct hisi_l3c_pmu_ext *l3c_pmu_dev_ext;
int ret;
ret = hisi_l3c_pmu_init_data(pdev, l3c_pmu);
@@ -487,42 +768,55 @@ static int hisi_l3c_pmu_dev_probe(struct platform_device *pdev,
if (ret)
return ret;
- if (l3c_pmu->identifier >= HISI_PMU_V2) {
- l3c_pmu->counter_bits = 64;
- l3c_pmu->check_event = L3C_V2_NR_EVENTS;
- l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v2_attr_groups;
- } else {
- l3c_pmu->counter_bits = 48;
- l3c_pmu->check_event = L3C_V1_NR_EVENTS;
- l3c_pmu->pmu_events.attr_groups = hisi_l3c_pmu_v1_attr_groups;
- }
-
+ l3c_pmu->pmu_events.attr_groups = l3c_pmu->dev_info->attr_groups;
+ l3c_pmu->counter_bits = l3c_pmu->dev_info->counter_bits;
+ l3c_pmu->check_event = l3c_pmu->dev_info->check_event;
l3c_pmu->num_counters = L3C_NR_COUNTERS;
l3c_pmu->ops = &hisi_uncore_l3c_ops;
l3c_pmu->dev = &pdev->dev;
l3c_pmu->on_cpu = -1;
+ l3c_pmu_dev_ext = l3c_pmu->dev_info->private;
+ if (l3c_pmu_dev_ext->support_ext) {
+ ret = hisi_l3c_pmu_init_ext(l3c_pmu, pdev);
+ if (ret)
+ return ret;
+ /*
+ * The extension events have their own counters with the
+ * same number of the normal events counters. So we can
+ * have at maximum num_counters * ext events monitored.
+ */
+ l3c_pmu->num_counters += hisi_l3c_pmu->ext_num * L3C_NR_COUNTERS;
+ }
+
return 0;
}
static int hisi_l3c_pmu_probe(struct platform_device *pdev)
{
+ struct hisi_l3c_pmu *hisi_l3c_pmu;
struct hisi_pmu *l3c_pmu;
char *name;
int ret;
- l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*l3c_pmu), GFP_KERNEL);
- if (!l3c_pmu)
+ hisi_l3c_pmu = devm_kzalloc(&pdev->dev, sizeof(*hisi_l3c_pmu), GFP_KERNEL);
+ if (!hisi_l3c_pmu)
return -ENOMEM;
+ l3c_pmu = &hisi_l3c_pmu->l3c_pmu;
platform_set_drvdata(pdev, l3c_pmu);
ret = hisi_l3c_pmu_dev_probe(pdev, l3c_pmu);
if (ret)
return ret;
- name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
- l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
+ if (l3c_pmu->topo.sub_id >= 0)
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d_%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id,
+ l3c_pmu->topo.sub_id);
+ else
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_l3c%d",
+ l3c_pmu->topo.sccl_id, l3c_pmu->topo.ccl_id);
if (!name)
return -ENOMEM;
@@ -554,6 +848,14 @@ static void hisi_l3c_pmu_remove(struct platform_device *pdev)
&l3c_pmu->node);
}
+static const struct acpi_device_id hisi_l3c_pmu_acpi_match[] = {
+ { "HISI0213", (kernel_ulong_t)&hisi_l3c_pmu_v1 },
+ { "HISI0214", (kernel_ulong_t)&hisi_l3c_pmu_v2 },
+ { "HISI0215", (kernel_ulong_t)&hisi_l3c_pmu_v3 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_l3c_pmu_acpi_match);
+
static struct platform_driver hisi_l3c_pmu_driver = {
.driver = {
.name = "hisi_l3c_pmu",
@@ -564,14 +866,60 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.remove = hisi_l3c_pmu_remove,
};
+static int hisi_l3c_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, i;
+
+ ret = hisi_uncore_pmu_online_cpu(cpu, node);
+ if (ret)
+ return ret;
+
+ /* Avoid L3C pmu not supporting ext from ext irq migrating. */
+ if (!support_ext(hisi_l3c_pmu))
+ return 0;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
+ WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
+ cpumask_of(l3c_pmu->on_cpu)));
+
+ return 0;
+}
+
+static int hisi_l3c_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct hisi_pmu *l3c_pmu = hlist_entry_safe(node, struct hisi_pmu, node);
+ struct hisi_l3c_pmu *hisi_l3c_pmu = to_hisi_l3c_pmu(l3c_pmu);
+ int ret, i;
+
+ ret = hisi_uncore_pmu_offline_cpu(cpu, node);
+ if (ret)
+ return ret;
+
+ /* If failed to find any available CPU, skip irq migration. */
+ if (l3c_pmu->on_cpu < 0)
+ return 0;
+
+ /* Avoid L3C pmu not supporting ext from ext irq migrating. */
+ if (!support_ext(hisi_l3c_pmu))
+ return 0;
+
+ for (i = 0; i < hisi_l3c_pmu->ext_num; i++)
+ WARN_ON(irq_set_affinity(hisi_l3c_pmu->ext_irq[i],
+ cpumask_of(l3c_pmu->on_cpu)));
+
+ return 0;
+}
+
static int __init hisi_l3c_pmu_module_init(void)
{
int ret;
ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_L3_ONLINE,
"AP_PERF_ARM_HISI_L3_ONLINE",
- hisi_uncore_pmu_online_cpu,
- hisi_uncore_pmu_offline_cpu);
+ hisi_l3c_pmu_online_cpu,
+ hisi_l3c_pmu_offline_cpu);
if (ret) {
pr_err("L3C PMU: Error setup hotplug, ret = %d\n", ret);
return ret;
diff --git a/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c b/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
new file mode 100644
index 000000000000..4df4eebe243e
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_mn_pmu.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon SoC MN uncore Hardware event counters support
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ */
+#include <linux/cpuhotplug.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+
+#include "hisi_uncore_pmu.h"
+
+/* Dynamic CPU hotplug state used by MN PMU */
+static enum cpuhp_state hisi_mn_pmu_online;
+
+/* MN register definition */
+#define HISI_MN_DYNAMIC_CTRL_REG 0x400
+#define HISI_MN_DYNAMIC_CTRL_EN BIT(0)
+#define HISI_MN_PERF_CTRL_REG 0x408
+#define HISI_MN_PERF_CTRL_EN BIT(6)
+#define HISI_MN_INT_MASK_REG 0x800
+#define HISI_MN_INT_STATUS_REG 0x808
+#define HISI_MN_INT_CLEAR_REG 0x80C
+#define HISI_MN_EVENT_CTRL_REG 0x1C00
+#define HISI_MN_VERSION_REG 0x1C04
+#define HISI_MN_EVTYPE0_REG 0x1d00
+#define HISI_MN_EVTYPE_MASK GENMASK(7, 0)
+#define HISI_MN_CNTR0_REG 0x1e00
+#define HISI_MN_EVTYPE_REGn(evtype0, n) ((evtype0) + (n) * 4)
+#define HISI_MN_CNTR_REGn(cntr0, n) ((cntr0) + (n) * 8)
+
+#define HISI_MN_NR_COUNTERS 4
+#define HISI_MN_TIMEOUT_US 500U
+
+struct hisi_mn_pmu_regs {
+ u32 version;
+ u32 dyn_ctrl;
+ u32 perf_ctrl;
+ u32 int_mask;
+ u32 int_clear;
+ u32 int_status;
+ u32 event_ctrl;
+ u32 event_type0;
+ u32 event_cntr0;
+};
+
+/*
+ * Each event request takes a certain amount of time to complete. If
+ * we counting the latency related event, we need to wait for the all
+ * requests complete. Otherwise, the value of counter is slightly larger.
+ */
+static void hisi_mn_pmu_counter_flush(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ int ret;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->dyn_ctrl);
+ val |= HISI_MN_DYNAMIC_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->dyn_ctrl);
+
+ ret = readl_poll_timeout_atomic(mn_pmu->base + reg_info->dyn_ctrl,
+ val, !(val & HISI_MN_DYNAMIC_CTRL_EN),
+ 1, HISI_MN_TIMEOUT_US);
+ if (ret)
+ dev_warn(mn_pmu->dev, "Counter flush timeout\n");
+}
+
+static u64 hisi_mn_pmu_read_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ return readq(mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_mn_pmu_write_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ writeq(val, mn_pmu->base + HISI_MN_CNTR_REGn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_mn_pmu_write_evtype(struct hisi_pmu *mn_pmu, int idx, u32 type)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ /*
+ * Select the appropriate event select register.
+ * There are 2 32-bit event select registers for the
+ * 8 hardware counters, each event code is 8-bit wide.
+ */
+ val = readl(mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
+ val &= ~(HISI_MN_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
+ val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
+ writel(val, mn_pmu->base + HISI_MN_EVTYPE_REGn(reg_info->event_type0, idx / 4));
+}
+
+static void hisi_mn_pmu_start_counters(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->perf_ctrl);
+ val |= HISI_MN_PERF_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->perf_ctrl);
+}
+
+static void hisi_mn_pmu_stop_counters(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->perf_ctrl);
+ val &= ~HISI_MN_PERF_CTRL_EN;
+ writel(val, mn_pmu->base + reg_info->perf_ctrl);
+
+ hisi_mn_pmu_counter_flush(mn_pmu);
+}
+
+static void hisi_mn_pmu_enable_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->event_ctrl);
+ val |= BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->event_ctrl);
+}
+
+static void hisi_mn_pmu_disable_counter(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->event_ctrl);
+ val &= ~BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->event_ctrl);
+}
+
+static void hisi_mn_pmu_enable_counter_int(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->int_mask);
+ val &= ~BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->int_mask);
+}
+
+static void hisi_mn_pmu_disable_counter_int(struct hisi_pmu *mn_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+ u32 val;
+
+ val = readl(mn_pmu->base + reg_info->int_mask);
+ val |= BIT(hwc->idx);
+ writel(val, mn_pmu->base + reg_info->int_mask);
+}
+
+static u32 hisi_mn_pmu_get_int_status(struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ return readl(mn_pmu->base + reg_info->int_status);
+}
+
+static void hisi_mn_pmu_clear_int_status(struct hisi_pmu *mn_pmu, int idx)
+{
+ struct hisi_mn_pmu_regs *reg_info = mn_pmu->dev_info->private;
+
+ writel(BIT(idx), mn_pmu->base + reg_info->int_clear);
+}
+
+static struct attribute *hisi_mn_pmu_format_attr[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ NULL
+};
+
+static const struct attribute_group hisi_mn_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_mn_pmu_format_attr,
+};
+
+static struct attribute *hisi_mn_pmu_events_attr[] = {
+ HISI_PMU_EVENT_ATTR(req_eobarrier_num, 0x00),
+ HISI_PMU_EVENT_ATTR(req_ecbarrier_num, 0x01),
+ HISI_PMU_EVENT_ATTR(req_dvmop_num, 0x02),
+ HISI_PMU_EVENT_ATTR(req_dvmsync_num, 0x03),
+ HISI_PMU_EVENT_ATTR(req_retry_num, 0x04),
+ HISI_PMU_EVENT_ATTR(req_writenosnp_num, 0x05),
+ HISI_PMU_EVENT_ATTR(req_readnosnp_num, 0x06),
+ HISI_PMU_EVENT_ATTR(snp_dvm_num, 0x07),
+ HISI_PMU_EVENT_ATTR(snp_dvmsync_num, 0x08),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvm_num, 0x09),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_num, 0x0A),
+ HISI_PMU_EVENT_ATTR(mn_req_dvm_num, 0x0B),
+ HISI_PMU_EVENT_ATTR(mn_req_dvmsync_num, 0x0C),
+ HISI_PMU_EVENT_ATTR(pa_req_dvm_num, 0x0D),
+ HISI_PMU_EVENT_ATTR(pa_req_dvmsync_num, 0x0E),
+ HISI_PMU_EVENT_ATTR(snp_dvm_latency, 0x80),
+ HISI_PMU_EVENT_ATTR(snp_dvmsync_latency, 0x81),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvm_latency, 0x82),
+ HISI_PMU_EVENT_ATTR(l3t_req_dvmsync_latency, 0x83),
+ HISI_PMU_EVENT_ATTR(mn_req_dvm_latency, 0x84),
+ HISI_PMU_EVENT_ATTR(mn_req_dvmsync_latency, 0x85),
+ HISI_PMU_EVENT_ATTR(pa_req_dvm_latency, 0x86),
+ HISI_PMU_EVENT_ATTR(pa_req_dvmsync_latency, 0x87),
+ NULL
+};
+
+static const struct attribute_group hisi_mn_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_mn_pmu_events_attr,
+};
+
+static const struct attribute_group *hisi_mn_pmu_attr_groups[] = {
+ &hisi_mn_pmu_format_group,
+ &hisi_mn_pmu_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static const struct hisi_uncore_ops hisi_uncore_mn_ops = {
+ .write_evtype = hisi_mn_pmu_write_evtype,
+ .get_event_idx = hisi_uncore_pmu_get_event_idx,
+ .start_counters = hisi_mn_pmu_start_counters,
+ .stop_counters = hisi_mn_pmu_stop_counters,
+ .enable_counter = hisi_mn_pmu_enable_counter,
+ .disable_counter = hisi_mn_pmu_disable_counter,
+ .enable_counter_int = hisi_mn_pmu_enable_counter_int,
+ .disable_counter_int = hisi_mn_pmu_disable_counter_int,
+ .write_counter = hisi_mn_pmu_write_counter,
+ .read_counter = hisi_mn_pmu_read_counter,
+ .get_int_status = hisi_mn_pmu_get_int_status,
+ .clear_int_status = hisi_mn_pmu_clear_int_status,
+};
+
+static int hisi_mn_pmu_dev_init(struct platform_device *pdev,
+ struct hisi_pmu *mn_pmu)
+{
+ struct hisi_mn_pmu_regs *reg_info;
+ int ret;
+
+ hisi_uncore_pmu_init_topology(mn_pmu, &pdev->dev);
+
+ if (mn_pmu->topo.scl_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to read MN scl id\n");
+
+ if (mn_pmu->topo.index_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Failed to read MN index id\n");
+
+ mn_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mn_pmu->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(mn_pmu->base),
+ "Failed to ioremap resource\n");
+
+ ret = hisi_uncore_pmu_init_irq(mn_pmu, pdev);
+ if (ret)
+ return ret;
+
+ mn_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!mn_pmu->dev_info)
+ return -ENODEV;
+
+ mn_pmu->pmu_events.attr_groups = mn_pmu->dev_info->attr_groups;
+ mn_pmu->counter_bits = mn_pmu->dev_info->counter_bits;
+ mn_pmu->check_event = mn_pmu->dev_info->check_event;
+ mn_pmu->num_counters = HISI_MN_NR_COUNTERS;
+ mn_pmu->ops = &hisi_uncore_mn_ops;
+ mn_pmu->dev = &pdev->dev;
+ mn_pmu->on_cpu = -1;
+
+ reg_info = mn_pmu->dev_info->private;
+ mn_pmu->identifier = readl(mn_pmu->base + reg_info->version);
+
+ return 0;
+}
+
+static void hisi_mn_pmu_remove_cpuhp(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_mn_pmu_online, hotplug_node);
+}
+
+static void hisi_mn_pmu_unregister(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_mn_pmu_probe(struct platform_device *pdev)
+{
+ struct hisi_pmu *mn_pmu;
+ char *name;
+ int ret;
+
+ mn_pmu = devm_kzalloc(&pdev->dev, sizeof(*mn_pmu), GFP_KERNEL);
+ if (!mn_pmu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, mn_pmu);
+
+ ret = hisi_mn_pmu_dev_init(pdev, mn_pmu);
+ if (ret)
+ return ret;
+
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_scl%d_mn%d",
+ mn_pmu->topo.scl_id, mn_pmu->topo.index_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = cpuhp_state_add_instance(hisi_mn_pmu_online, &mn_pmu->node);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register cpu hotplug\n");
+
+ ret = devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_remove_cpuhp, &mn_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(mn_pmu, THIS_MODULE);
+
+ ret = perf_pmu_register(&mn_pmu->pmu, name, -1);
+ if (ret)
+ return dev_err_probe(mn_pmu->dev, ret, "Failed to register MN PMU\n");
+
+ return devm_add_action_or_reset(&pdev->dev, hisi_mn_pmu_unregister, &mn_pmu->pmu);
+}
+
+static struct hisi_mn_pmu_regs hisi_mn_v1_pmu_regs = {
+ .version = HISI_MN_VERSION_REG,
+ .dyn_ctrl = HISI_MN_DYNAMIC_CTRL_REG,
+ .perf_ctrl = HISI_MN_PERF_CTRL_REG,
+ .int_mask = HISI_MN_INT_MASK_REG,
+ .int_clear = HISI_MN_INT_CLEAR_REG,
+ .int_status = HISI_MN_INT_STATUS_REG,
+ .event_ctrl = HISI_MN_EVENT_CTRL_REG,
+ .event_type0 = HISI_MN_EVTYPE0_REG,
+ .event_cntr0 = HISI_MN_CNTR0_REG,
+};
+
+static const struct hisi_pmu_dev_info hisi_mn_v1 = {
+ .attr_groups = hisi_mn_pmu_attr_groups,
+ .counter_bits = 48,
+ .check_event = HISI_MN_EVTYPE_MASK,
+ .private = &hisi_mn_v1_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_mn_pmu_acpi_match[] = {
+ { "HISI0222", (kernel_ulong_t) &hisi_mn_v1 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_mn_pmu_acpi_match);
+
+static struct platform_driver hisi_mn_pmu_driver = {
+ .driver = {
+ .name = "hisi_mn_pmu",
+ .acpi_match_table = hisi_mn_pmu_acpi_match,
+ /*
+ * We have not worked out a safe bind/unbind process,
+ * Forcefully unbinding during sampling will lead to a
+ * kernel panic, so this is not supported yet.
+ */
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_mn_pmu_probe,
+};
+
+static int __init hisi_mn_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/mn:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("hisi_mn_pmu: Failed to setup MN PMU hotplug: %d\n", ret);
+ return ret;
+ }
+ hisi_mn_pmu_online = ret;
+
+ ret = platform_driver_register(&hisi_mn_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_mn_pmu_online);
+
+ return ret;
+}
+module_init(hisi_mn_pmu_module_init);
+
+static void __exit hisi_mn_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_mn_pmu_driver);
+ cpuhp_remove_multi_state(hisi_mn_pmu_online);
+}
+module_exit(hisi_mn_pmu_module_exit);
+
+MODULE_IMPORT_NS("HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC MN uncore PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Junhao He <hejunhao3@huawei.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
new file mode 100644
index 000000000000..de3b9cc7aada
--- /dev/null
+++ b/drivers/perf/hisilicon/hisi_uncore_noc_pmu.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Uncore NoC (Network on Chip) PMU device
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ */
+#include <linux/bitops.h>
+#include <linux/cpuhotplug.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/sysfs.h>
+
+#include "hisi_uncore_pmu.h"
+
+#define NOC_PMU_VERSION 0x1e00
+#define NOC_PMU_GLOBAL_CTRL 0x1e04
+#define NOC_PMU_GLOBAL_CTRL_PMU_EN BIT(0)
+#define NOC_PMU_GLOBAL_CTRL_TT_EN BIT(1)
+#define NOC_PMU_CNT_INFO 0x1e08
+#define NOC_PMU_CNT_INFO_OVERFLOW(n) BIT(n)
+#define NOC_PMU_EVENT_CTRL0 0x1e20
+#define NOC_PMU_EVENT_CTRL_TYPE GENMASK(4, 0)
+/*
+ * Note channel of 0x0 will reset the counter value, so don't do it before
+ * we read out the counter.
+ */
+#define NOC_PMU_EVENT_CTRL_CHANNEL GENMASK(10, 8)
+#define NOC_PMU_EVENT_CTRL_EN BIT(11)
+#define NOC_PMU_EVENT_COUNTER0 0x1e80
+
+#define NOC_PMU_NR_COUNTERS 4
+#define NOC_PMU_CH_DEFAULT 0x7
+
+#define NOC_PMU_EVENT_CTRLn(ctrl0, n) ((ctrl0) + 4 * (n))
+#define NOC_PMU_EVENT_CNTRn(cntr0, n) ((cntr0) + 8 * (n))
+
+HISI_PMU_EVENT_ATTR_EXTRACTOR(ch, config1, 2, 0);
+HISI_PMU_EVENT_ATTR_EXTRACTOR(tt_en, config1, 3, 3);
+
+/* Dynamic CPU hotplug state used by this PMU driver */
+static enum cpuhp_state hisi_noc_pmu_cpuhp_state;
+
+struct hisi_noc_pmu_regs {
+ u32 version;
+ u32 pmu_ctrl;
+ u32 event_ctrl0;
+ u32 event_cntr0;
+ u32 overflow_status;
+};
+
+/*
+ * Tracetag filtering is not per event and all the events should keep
+ * the consistence. Return true if the new comer doesn't match the
+ * tracetag filtering configuration of the current scheduled events.
+ */
+static bool hisi_noc_pmu_check_global_filter(struct perf_event *curr,
+ struct perf_event *new)
+{
+ return hisi_get_tt_en(curr) == hisi_get_tt_en(new);
+}
+
+static void hisi_noc_pmu_write_evtype(struct hisi_pmu *noc_pmu, int idx, u32 type)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_TYPE;
+ reg |= FIELD_PREP(NOC_PMU_EVENT_CTRL_TYPE, type);
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, idx));
+}
+
+static int hisi_noc_pmu_get_event_idx(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_pmu_hwevents *pmu_events = &noc_pmu->pmu_events;
+ int cur_idx;
+
+ cur_idx = find_first_bit(pmu_events->used_mask, noc_pmu->num_counters);
+ if (cur_idx != noc_pmu->num_counters &&
+ !hisi_noc_pmu_check_global_filter(pmu_events->hw_events[cur_idx], event))
+ return -EAGAIN;
+
+ return hisi_uncore_pmu_get_event_idx(event);
+}
+
+static u64 hisi_noc_pmu_read_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ return readq(noc_pmu->base + NOC_PMU_EVENT_CNTRn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_noc_pmu_write_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc, u64 val)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ writeq(val, noc_pmu->base + NOC_PMU_EVENT_CNTRn(reg_info->event_cntr0, hwc->idx));
+}
+
+static void hisi_noc_pmu_enable_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg |= NOC_PMU_EVENT_CTRL_EN;
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+}
+
+static void hisi_noc_pmu_disable_counter(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_EN;
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+}
+
+static void hisi_noc_pmu_enable_counter_int(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+ /* We don't support interrupt, so a stub here. */
+}
+
+static void hisi_noc_pmu_disable_counter_int(struct hisi_pmu *noc_pmu,
+ struct hw_perf_event *hwc)
+{
+}
+
+static void hisi_noc_pmu_start_counters(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg |= NOC_PMU_GLOBAL_CTRL_PMU_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+}
+
+static void hisi_noc_pmu_stop_counters(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg &= ~NOC_PMU_GLOBAL_CTRL_PMU_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+}
+
+static u32 hisi_noc_pmu_get_int_status(struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+
+ return readl(noc_pmu->base + reg_info->overflow_status);
+}
+
+static void hisi_noc_pmu_clear_int_status(struct hisi_pmu *noc_pmu, int idx)
+{
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 reg;
+
+ reg = readl(noc_pmu->base + reg_info->overflow_status);
+ reg &= ~NOC_PMU_CNT_INFO_OVERFLOW(idx);
+ writel(reg, noc_pmu->base + reg_info->overflow_status);
+}
+
+static void hisi_noc_pmu_enable_filter(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ struct hw_perf_event *hwc = &event->hw;
+ u32 tt_en = hisi_get_tt_en(event);
+ u32 ch = hisi_get_ch(event);
+ u32 reg;
+
+ if (!ch)
+ ch = NOC_PMU_CH_DEFAULT;
+
+ reg = readl(noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+ reg &= ~NOC_PMU_EVENT_CTRL_CHANNEL;
+ reg |= FIELD_PREP(NOC_PMU_EVENT_CTRL_CHANNEL, ch);
+ writel(reg, noc_pmu->base + NOC_PMU_EVENT_CTRLn(reg_info->event_ctrl0, hwc->idx));
+
+ /*
+ * Since tracetag filter applies to all the counters, don't touch it
+ * if user doesn't specify it explicitly.
+ */
+ if (tt_en) {
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg |= NOC_PMU_GLOBAL_CTRL_TT_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+ }
+}
+
+static void hisi_noc_pmu_disable_filter(struct perf_event *event)
+{
+ struct hisi_pmu *noc_pmu = to_hisi_pmu(event->pmu);
+ struct hisi_noc_pmu_regs *reg_info = noc_pmu->dev_info->private;
+ u32 tt_en = hisi_get_tt_en(event);
+ u32 reg;
+
+ /*
+ * If we're not the last counter, don't touch the global tracetag
+ * configuration.
+ */
+ if (bitmap_weight(noc_pmu->pmu_events.used_mask, noc_pmu->num_counters) > 1)
+ return;
+
+ if (tt_en) {
+ reg = readl(noc_pmu->base + reg_info->pmu_ctrl);
+ reg &= ~NOC_PMU_GLOBAL_CTRL_TT_EN;
+ writel(reg, noc_pmu->base + reg_info->pmu_ctrl);
+ }
+}
+
+static const struct hisi_uncore_ops hisi_uncore_noc_ops = {
+ .write_evtype = hisi_noc_pmu_write_evtype,
+ .get_event_idx = hisi_noc_pmu_get_event_idx,
+ .read_counter = hisi_noc_pmu_read_counter,
+ .write_counter = hisi_noc_pmu_write_counter,
+ .enable_counter = hisi_noc_pmu_enable_counter,
+ .disable_counter = hisi_noc_pmu_disable_counter,
+ .enable_counter_int = hisi_noc_pmu_enable_counter_int,
+ .disable_counter_int = hisi_noc_pmu_disable_counter_int,
+ .start_counters = hisi_noc_pmu_start_counters,
+ .stop_counters = hisi_noc_pmu_stop_counters,
+ .get_int_status = hisi_noc_pmu_get_int_status,
+ .clear_int_status = hisi_noc_pmu_clear_int_status,
+ .enable_filter = hisi_noc_pmu_enable_filter,
+ .disable_filter = hisi_noc_pmu_disable_filter,
+};
+
+static struct attribute *hisi_noc_pmu_format_attrs[] = {
+ HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
+ HISI_PMU_FORMAT_ATTR(ch, "config1:0-2"),
+ HISI_PMU_FORMAT_ATTR(tt_en, "config1:3"),
+ NULL
+};
+
+static const struct attribute_group hisi_noc_pmu_format_group = {
+ .name = "format",
+ .attrs = hisi_noc_pmu_format_attrs,
+};
+
+static struct attribute *hisi_noc_pmu_events_attrs[] = {
+ HISI_PMU_EVENT_ATTR(cycles, 0x0e),
+ /* Flux on/off the ring */
+ HISI_PMU_EVENT_ATTR(ingress_flow_sum, 0x1a),
+ HISI_PMU_EVENT_ATTR(egress_flow_sum, 0x17),
+ /* Buffer full duration on/off the ring */
+ HISI_PMU_EVENT_ATTR(ingress_buf_full, 0x19),
+ HISI_PMU_EVENT_ATTR(egress_buf_full, 0x12),
+ /* Failure packets count on/off the ring */
+ HISI_PMU_EVENT_ATTR(cw_ingress_fail, 0x01),
+ HISI_PMU_EVENT_ATTR(cc_ingress_fail, 0x09),
+ HISI_PMU_EVENT_ATTR(cw_egress_fail, 0x03),
+ HISI_PMU_EVENT_ATTR(cc_egress_fail, 0x0b),
+ /* Flux of the ring */
+ HISI_PMU_EVENT_ATTR(cw_main_flow_sum, 0x05),
+ HISI_PMU_EVENT_ATTR(cc_main_flow_sum, 0x0d),
+ NULL
+};
+
+static const struct attribute_group hisi_noc_pmu_events_group = {
+ .name = "events",
+ .attrs = hisi_noc_pmu_events_attrs,
+};
+
+static const struct attribute_group *hisi_noc_pmu_attr_groups[] = {
+ &hisi_noc_pmu_format_group,
+ &hisi_noc_pmu_events_group,
+ &hisi_pmu_cpumask_attr_group,
+ &hisi_pmu_identifier_group,
+ NULL
+};
+
+static int hisi_noc_pmu_dev_init(struct platform_device *pdev, struct hisi_pmu *noc_pmu)
+{
+ struct hisi_noc_pmu_regs *reg_info;
+
+ hisi_uncore_pmu_init_topology(noc_pmu, &pdev->dev);
+
+ if (noc_pmu->topo.scl_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get scl-id\n");
+
+ if (noc_pmu->topo.index_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get idx-id\n");
+
+ if (noc_pmu->topo.sub_id < 0)
+ return dev_err_probe(&pdev->dev, -EINVAL, "failed to get sub-id\n");
+
+ noc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(noc_pmu->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(noc_pmu->base),
+ "fail to remap io memory\n");
+
+ noc_pmu->dev_info = device_get_match_data(&pdev->dev);
+ if (!noc_pmu->dev_info)
+ return -ENODEV;
+
+ noc_pmu->pmu_events.attr_groups = noc_pmu->dev_info->attr_groups;
+ noc_pmu->counter_bits = noc_pmu->dev_info->counter_bits;
+ noc_pmu->check_event = noc_pmu->dev_info->check_event;
+ noc_pmu->num_counters = NOC_PMU_NR_COUNTERS;
+ noc_pmu->ops = &hisi_uncore_noc_ops;
+ noc_pmu->dev = &pdev->dev;
+ noc_pmu->on_cpu = -1;
+
+ reg_info = noc_pmu->dev_info->private;
+ noc_pmu->identifier = readl(noc_pmu->base + reg_info->version);
+
+ return 0;
+}
+
+static void hisi_noc_pmu_remove_cpuhp_instance(void *hotplug_node)
+{
+ cpuhp_state_remove_instance_nocalls(hisi_noc_pmu_cpuhp_state, hotplug_node);
+}
+
+static void hisi_noc_pmu_unregister_pmu(void *pmu)
+{
+ perf_pmu_unregister(pmu);
+}
+
+static int hisi_noc_pmu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_pmu *noc_pmu;
+ char *name;
+ int ret;
+
+ noc_pmu = devm_kzalloc(dev, sizeof(*noc_pmu), GFP_KERNEL);
+ if (!noc_pmu)
+ return -ENOMEM;
+
+ /*
+ * HiSilicon Uncore PMU framework needs to get common hisi_pmu device
+ * from device's drvdata.
+ */
+ platform_set_drvdata(pdev, noc_pmu);
+
+ ret = hisi_noc_pmu_dev_init(pdev, noc_pmu);
+ if (ret)
+ return ret;
+
+ ret = cpuhp_state_add_instance(hisi_noc_pmu_cpuhp_state, &noc_pmu->node);
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to register cpuhp instance\n");
+
+ ret = devm_add_action_or_reset(dev, hisi_noc_pmu_remove_cpuhp_instance,
+ &noc_pmu->node);
+ if (ret)
+ return ret;
+
+ hisi_pmu_init(noc_pmu, THIS_MODULE);
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "hisi_scl%d_noc%d_%d",
+ noc_pmu->topo.scl_id, noc_pmu->topo.index_id,
+ noc_pmu->topo.sub_id);
+ if (!name)
+ return -ENOMEM;
+
+ ret = perf_pmu_register(&noc_pmu->pmu, name, -1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Fail to register PMU\n");
+
+ return devm_add_action_or_reset(dev, hisi_noc_pmu_unregister_pmu,
+ &noc_pmu->pmu);
+}
+
+static struct hisi_noc_pmu_regs hisi_noc_v1_pmu_regs = {
+ .version = NOC_PMU_VERSION,
+ .pmu_ctrl = NOC_PMU_GLOBAL_CTRL,
+ .event_ctrl0 = NOC_PMU_EVENT_CTRL0,
+ .event_cntr0 = NOC_PMU_EVENT_COUNTER0,
+ .overflow_status = NOC_PMU_CNT_INFO,
+};
+
+static const struct hisi_pmu_dev_info hisi_noc_v1 = {
+ .attr_groups = hisi_noc_pmu_attr_groups,
+ .counter_bits = 64,
+ .check_event = NOC_PMU_EVENT_CTRL_TYPE,
+ .private = &hisi_noc_v1_pmu_regs,
+};
+
+static const struct acpi_device_id hisi_noc_pmu_ids[] = {
+ { "HISI04E0", (kernel_ulong_t) &hisi_noc_v1 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_noc_pmu_ids);
+
+static struct platform_driver hisi_noc_pmu_driver = {
+ .driver = {
+ .name = "hisi_noc_pmu",
+ .acpi_match_table = hisi_noc_pmu_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = hisi_noc_pmu_probe,
+};
+
+static int __init hisi_noc_pmu_module_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "perf/hisi/noc:online",
+ hisi_uncore_pmu_online_cpu,
+ hisi_uncore_pmu_offline_cpu);
+ if (ret < 0) {
+ pr_err("hisi_noc_pmu: Fail to setup cpuhp callbacks, ret = %d\n", ret);
+ return ret;
+ }
+ hisi_noc_pmu_cpuhp_state = ret;
+
+ ret = platform_driver_register(&hisi_noc_pmu_driver);
+ if (ret)
+ cpuhp_remove_multi_state(hisi_noc_pmu_cpuhp_state);
+
+ return ret;
+}
+module_init(hisi_noc_pmu_module_init);
+
+static void __exit hisi_noc_pmu_module_exit(void)
+{
+ platform_driver_unregister(&hisi_noc_pmu_driver);
+ cpuhp_remove_multi_state(hisi_noc_pmu_cpuhp_state);
+}
+module_exit(hisi_noc_pmu_module_exit);
+
+MODULE_IMPORT_NS("HISI_PMU");
+MODULE_DESCRIPTION("HiSilicon SoC Uncore NoC PMU driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index a449651f79c9..de71dcf11653 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -149,7 +149,7 @@ static void hisi_uncore_pmu_clear_event_idx(struct hisi_pmu *hisi_pmu, int idx)
clear_bit(idx, hisi_pmu->pmu_events.used_mask);
}
-static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
+irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
{
struct hisi_pmu *hisi_pmu = data;
struct perf_event *event;
@@ -178,6 +178,7 @@ static irqreturn_t hisi_uncore_pmu_isr(int irq, void *data)
return IRQ_HANDLED;
}
+EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_isr, "HISI_PMU");
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev)
@@ -234,7 +235,7 @@ int hisi_uncore_pmu_event_init(struct perf_event *event)
return -EINVAL;
hisi_pmu = to_hisi_pmu(event->pmu);
- if (event->attr.config > hisi_pmu->check_event)
+ if ((event->attr.config & HISI_EVENTID_MASK) > hisi_pmu->check_event)
return -EINVAL;
if (hisi_pmu->on_cpu == -1)
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index 777675838b80..3ffe6acda653 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -24,7 +24,7 @@
#define pr_fmt(fmt) "hisi_pmu: " fmt
#define HISI_PMU_V2 0x30
-#define HISI_MAX_COUNTERS 0x10
+#define HISI_MAX_COUNTERS 0x18
#define to_hisi_pmu(p) (container_of(p, struct hisi_pmu, pmu))
#define HISI_PMU_ATTR(_name, _func, _config) \
@@ -43,7 +43,8 @@
return FIELD_GET(GENMASK_ULL(hi, lo), event->attr.config); \
}
-#define HISI_GET_EVENTID(ev) (ev->hw.config_base & 0xff)
+#define HISI_EVENTID_MASK GENMASK(7, 0)
+#define HISI_GET_EVENTID(ev) ((ev)->hw.config_base & HISI_EVENTID_MASK)
#define HISI_PMU_EVTYPE_BITS 8
#define HISI_PMU_EVTYPE_SHIFT(idx) ((idx) % 4 * HISI_PMU_EVTYPE_BITS)
@@ -164,6 +165,7 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node);
ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
struct device_attribute *attr,
char *page);
+irqreturn_t hisi_uncore_pmu_isr(int irq, void *data);
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev);
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 698de8ddf895..3fc16bbab025 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -339,7 +339,7 @@ static bool pmu_sbi_ctr_is_fw(int cidx)
if (!info)
return false;
- return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false;
+ return info->type == SBI_PMU_CTR_TYPE_FW;
}
/*
@@ -877,8 +877,10 @@ static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt,
for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) {
ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask;
/* Start all the counters that did not overflow in a single shot */
- sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG, ctr_start_mask,
- 0, 0, 0, 0);
+ if (ctr_start_mask) {
+ sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, i * BITS_PER_LONG,
+ ctr_start_mask, 0, 0, 0, 0);
+ }
}
/* Reinitialize and start all the counter that overflowed */
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 1b2f132d76f0..b405dfa20891 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -108,6 +108,9 @@
#define JTAG_UDI_EN_MASK BIT(4)
#define JTAG_DFD_EN_MASK BIT(3)
+#define REG_FORCE_GPIO_EN 0x0228
+#define FORCE_GPIO_EN(n) BIT(n)
+
/* LED MAP */
#define REG_LAN_LED0_MAPPING 0x027c
#define REG_LAN_LED1_MAPPING 0x0280
@@ -719,16 +722,16 @@ static const struct airoha_pinctrl_func_group mdio_func_group[] = {
.name = "mdio",
.regmap[0] = {
AIROHA_FUNC_MUX,
- REG_GPIO_PON_MODE,
- GPIO_SGMII_MDIO_MODE_MASK,
- GPIO_SGMII_MDIO_MODE_MASK
- },
- .regmap[1] = {
- AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
GPIO_MDC_IO_MASTER_MODE_MODE,
GPIO_MDC_IO_MASTER_MODE_MODE
},
+ .regmap[1] = {
+ AIROHA_FUNC_MUX,
+ REG_FORCE_GPIO_EN,
+ FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2),
+ FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2)
+ },
.regmap_size = 2,
},
};
@@ -1752,8 +1755,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1816,8 +1819,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1880,8 +1883,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1944,8 +1947,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index 18fb44139de2..d63aaad7ef59 100644
--- a/drivers/platform/x86/amd/pmc/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
@@ -239,6 +239,14 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
}
},
+ {
+ .ident = "MECHREVO Yilong15Pro Series GM5HG7A",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MECHREVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Yilong15Pro Series GM5HG7A"),
+ }
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
{
.ident = "PCSpecialist Lafite Pro V 14M",
@@ -249,6 +257,13 @@ static const struct dmi_system_id fwbug_list[] = {
}
},
{
+ .ident = "TUXEDO Stellaris Slim 15 AMD Gen6",
+ .driver_data = &quirk_spurious_8042,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ }
+ },
+ {
.ident = "TUXEDO InfinityBook Pro 14/15 AMD Gen10",
.driver_data = &quirk_spurious_8042,
.matches = {
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index ef988605c4da..bc544a4a5266 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -403,6 +403,7 @@ static const struct acpi_device_id amd_pmf_acpi_ids[] = {
{"AMDI0103", 0},
{"AMDI0105", 0},
{"AMDI0107", 0},
+ {"AMDI0108", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 3a488cf9ca06..6a62bc5b02fd 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -673,6 +673,8 @@ static void asus_nb_wmi_key_filter(struct asus_wmi_driver *asus_wmi, int *code,
if (atkbd_reports_vol_keys)
*code = ASUS_WMI_KEY_IGNORE;
break;
+ case 0x5D: /* Wireless console Toggle */
+ case 0x5E: /* Wireless console Enable / Keyboard Attach, Detach */
case 0x5F: /* Wireless console Disable / Special Key */
if (quirks->key_wlan_event)
*code = quirks->key_wlan_event;
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
index 732de5f556f8..77905a9ddde9 100644
--- a/drivers/platform/x86/dell/dell-lis3lv02d.c
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -48,6 +48,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
DELL_LIS3LV02D_DMI_ENTRY("Latitude 5500", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Latitude E6530", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision 3551", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
diff --git a/drivers/platform/x86/dell/dell-pc.c b/drivers/platform/x86/dell/dell-pc.c
index 48cc7511905a..becdd9aaef29 100644
--- a/drivers/platform/x86/dell/dell-pc.c
+++ b/drivers/platform/x86/dell/dell-pc.c
@@ -228,6 +228,8 @@ static int thermal_platform_profile_get(struct device *dev,
static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
{
+ int current_mode;
+
if (supported_modes & DELL_QUIET)
__set_bit(PLATFORM_PROFILE_QUIET, choices);
if (supported_modes & DELL_COOL_BOTTOM)
@@ -237,6 +239,13 @@ static int thermal_platform_profile_probe(void *drvdata, unsigned long *choices)
if (supported_modes & DELL_PERFORMANCE)
__set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
+ /* Make sure that ACPI is in sync with the profile set by USTT */
+ current_mode = thermal_get_mode();
+ if (current_mode < 0)
+ return current_mode;
+
+ thermal_set_mode(current_mode);
+
return 0;
}
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index 71e104a068e9..7449873c3d40 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -790,7 +790,7 @@ static const struct x86_cpu_id isst_cpu_ids[] = {
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
- X86_MATCH_VFM(INTEL_PANTHERCOVE_X, SST_HPM_SUPPORTED),
+ X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, SST_HPM_SUPPORTED),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
X86_MATCH_VFM(INTEL_SKYLAKE_X, SST_MBOX_SUPPORTED),
{}
diff --git a/drivers/platform/x86/intel/tpmi_power_domains.c b/drivers/platform/x86/intel/tpmi_power_domains.c
index 8641353b2e06..7d93119a4c30 100644
--- a/drivers/platform/x86/intel/tpmi_power_domains.c
+++ b/drivers/platform/x86/intel/tpmi_power_domains.c
@@ -85,7 +85,7 @@ static const struct x86_cpu_id tpmi_cpu_ids[] = {
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, NULL),
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, NULL),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_D, NULL),
- X86_MATCH_VFM(INTEL_PANTHERCOVE_X, NULL),
+ X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, tpmi_cpu_ids);
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 4b57102c7f62..6af6cf477c5b 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/dev_printk.h>
@@ -75,6 +76,9 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages");
#define WMBB_USB_CHARGE 0x10B
#define WMBB_BATT_LIMIT 0x10C
+#define FAN_MODE_LOWER GENMASK(1, 0)
+#define FAN_MODE_UPPER GENMASK(5, 4)
+
#define PLATFORM_NAME "lg-laptop"
MODULE_ALIAS("wmi:" WMI_EVENT_GUID0);
@@ -274,29 +278,19 @@ static ssize_t fan_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- bool value;
+ unsigned long value;
union acpi_object *r;
- u32 m;
int ret;
- ret = kstrtobool(buffer, &value);
+ ret = kstrtoul(buffer, 10, &value);
if (ret)
return ret;
+ if (value >= 3)
+ return -EINVAL;
- r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
- if (!r)
- return -EIO;
-
- if (r->type != ACPI_TYPE_INTEGER) {
- kfree(r);
- return -EIO;
- }
-
- m = r->integer.value;
- kfree(r);
- r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xffffff0f) | (value << 4));
- kfree(r);
- r = lg_wmab(dev, WM_FAN_MODE, WM_SET, (m & 0xfffffff0) | value);
+ r = lg_wmab(dev, WM_FAN_MODE, WM_SET,
+ FIELD_PREP(FAN_MODE_LOWER, value) |
+ FIELD_PREP(FAN_MODE_UPPER, value));
kfree(r);
return count;
@@ -305,7 +299,7 @@ static ssize_t fan_mode_store(struct device *dev,
static ssize_t fan_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- unsigned int status;
+ unsigned int mode;
union acpi_object *r;
r = lg_wmab(dev, WM_FAN_MODE, WM_GET, 0);
@@ -317,10 +311,10 @@ static ssize_t fan_mode_show(struct device *dev,
return -EIO;
}
- status = r->integer.value & 0x01;
+ mode = FIELD_GET(FAN_MODE_LOWER, r->integer.value);
kfree(r);
- return sysfs_emit(buffer, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", mode);
}
static ssize_t usb_charge_store(struct device *dev,
diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c
index eb076bb4099b..54377b282ff8 100644
--- a/drivers/platform/x86/oxpec.c
+++ b/drivers/platform/x86/oxpec.c
@@ -126,6 +126,13 @@ static const struct dmi_system_id dmi_table[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOKZOE"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AOKZOE A1X"),
+ },
+ .driver_data = (void *)oxp_fly,
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
},
@@ -306,6 +313,13 @@ static const struct dmi_system_id dmi_table[] = {
},
.driver_data = (void *)oxp_x1,
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONEXPLAYER X1Pro EVA-02"),
+ },
+ .driver_data = (void *)oxp_x1,
+ },
{},
};
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 0006ab3d0789..61c2277c9ce3 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -187,6 +187,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = {
#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
#define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
+#define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -1357,7 +1358,6 @@ err_poweroff:
return ret;
}
-#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
@@ -1382,9 +1382,6 @@ static int __init genpd_power_off_unused(void)
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- genpd_lock(genpd);
- genpd->stay_on = false;
- genpd_unlock(genpd);
genpd_queue_power_off_work(genpd);
}
@@ -1393,7 +1390,6 @@ static int __init genpd_power_off_unused(void)
return 0;
}
late_initcall_sync(genpd_power_off_unused);
-#endif
#ifdef CONFIG_PM_SLEEP
@@ -2367,6 +2363,18 @@ static void genpd_lock_init(struct generic_pm_domain *genpd)
}
}
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
+{
+ genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off;
+}
+#else
+static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
+{
+ genpd->stay_on = false;
+}
+#endif
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -2392,7 +2400,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
- genpd->stay_on = !is_off;
+ genpd_set_stay_on(genpd, is_off);
genpd->sync_state = GENPD_SYNC_STATE_OFF;
genpd->device_count = 0;
genpd->provider = NULL;
diff --git a/drivers/pmdomain/mediatek/mt8195-pm-domains.h b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
index 59aa031ae632..414dfe82361f 100644
--- a/drivers/pmdomain/mediatek/mt8195-pm-domains.h
+++ b/drivers/pmdomain/mediatek/mt8195-pm-domains.h
@@ -123,6 +123,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = {
MT8195_TOP_AXI_PROT_EN_2_CLR,
MT8195_TOP_AXI_PROT_EN_2_STA1),
},
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP,
},
[MT8195_POWER_DOMAIN_MFG0] = {
.name = "mfg0",
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 5aa7fa1df8fe..7434bf42d215 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
@@ -251,6 +251,7 @@ static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
genpd->detach_dev = cpg_mssr_detach_dev;
}
+ genpd->flags |= GENPD_FLAG_NO_STAY_ON;
genpd->power_off = rcar_gen4_sysc_pd_power_off;
genpd->power_on = rcar_gen4_sysc_pd_power_on;
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index 4b310c1d35fa..d8a8ffcde38d 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -241,6 +241,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
}
}
+ genpd->flags |= GENPD_FLAG_NO_STAY_ON;
genpd->power_off = rcar_sysc_pd_power_off;
genpd->power_on = rcar_sysc_pd_power_on;
@@ -342,7 +343,7 @@ struct rcar_pm_domains {
};
static struct genpd_onecell_data *rcar_sysc_onecell_data;
-static struct device_node *rcar_sysc_onecell_np;
+static struct device_node *rcar_sysc_onecell_np __initdata = NULL;
static int __init rcar_sysc_pd_init(void)
{
diff --git a/drivers/pmdomain/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c
index 8eedc9a1d825..a6bf7295e909 100644
--- a/drivers/pmdomain/renesas/rmobile-sysc.c
+++ b/drivers/pmdomain/renesas/rmobile-sysc.c
@@ -100,7 +100,8 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
struct generic_pm_domain *genpd = &rmobile_pd->genpd;
struct dev_power_governor *gov = rmobile_pd->gov;
- genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP |
+ GENPD_FLAG_NO_STAY_ON;
genpd->attach_dev = cpg_mstp_attach_dev;
genpd->detach_dev = cpg_mstp_detach_dev;
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index 242570c505fb..1955c6d453e4 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -865,7 +865,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
- pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ pd->genpd.flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_NO_STAY_ON;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&pd->genpd, NULL,
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 93dcebbe1141..ad2d9ecf32a5 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1919,8 +1919,8 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
- if ((cache.flags & 0xff) == 0xff)
- cache.flags = -1; /* read error */
+ if (di->chip == BQ27000 && (cache.flags & 0xff) == 0xff)
+ cache.flags = -ENODEV; /* bq27000 hdq read error */
if (cache.flags >= 0) {
cache.capacity = bq27xxx_battery_read_soc(di);
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index a12a1ad9b5fe..3d4d343ee0c8 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/string_choices.h>
#include <asm/lv1call.h>
#include <asm/ps3stor.h>
@@ -265,7 +266,7 @@ u64 ps3stor_read_write_sectors(struct ps3_storage_device *dev, u64 lpar,
u64 start_sector, u64 sectors, int write)
{
unsigned int region_id = dev->regions[dev->region_idx].id;
- const char *op = write ? "write" : "read";
+ const char *op = str_write_read(write);
int res;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
diff --git a/drivers/ras/ras.c b/drivers/ras/ras.c
index a6e4792a1b2e..ac0e132ccc3e 100644
--- a/drivers/ras/ras.c
+++ b/drivers/ras/ras.c
@@ -51,6 +51,7 @@ void log_non_standard_event(const guid_t *sec_type, const guid_t *fru_id,
{
trace_non_standard_event(sec_type, fru_id, fru_text, sev, err, len);
}
+EXPORT_SYMBOL_GPL(log_non_standard_event);
void log_arm_hw_error(struct cper_sec_proc_arm *err)
{
diff --git a/drivers/reset/reset-eyeq.c b/drivers/reset/reset-eyeq.c
index 02d50041048b..2d3998368a1c 100644
--- a/drivers/reset/reset-eyeq.c
+++ b/drivers/reset/reset-eyeq.c
@@ -410,6 +410,13 @@ static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
}
+static void eqr_of_node_put(void *_dev)
+{
+ struct device *dev = _dev;
+
+ of_node_put(dev->of_node);
+}
+
static int eqr_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -428,6 +435,10 @@ static int eqr_probe(struct auxiliary_device *adev,
if (!dev->of_node)
return -ENODEV;
+ ret = devm_add_action_or_reset(dev, eqr_of_node_put, dev);
+ if (ret)
+ return ret;
+
/*
* Using our newfound OF node, we can get match data. We cannot use
* device_get_match_data() because it does not match reused OF nodes.
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index 8c1c908d2c6e..877a9bc7f04b 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -5,19 +5,11 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
prompt "DCSSBLK support"
- depends on S390 && BLOCK && (DAX || DAX=n)
+ depends on S390 && BLOCK && ZONE_DEVICE
+ select FS_DAX
help
Support for dcss block device
-config DCSSBLK_DAX
- def_bool y
- depends on DCSSBLK
- # requires S390 ZONE_DEVICE support
- depends on BROKEN
- prompt "DCSSBLK DAX support"
- help
- Enable DAX operation for the dcss block device
-
config DASD
def_tristate y
prompt "Support for DASD devices"
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 94fa5edecadd..86fef4b15015 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -79,6 +79,8 @@ struct dcssblk_dev_info {
int num_of_segments;
struct list_head seg_list;
struct dax_device *dax_dev;
+ struct dev_pagemap pgmap;
+ void *pgmap_addr;
};
struct segment_info {
@@ -415,6 +417,8 @@ removeseg:
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
@@ -537,9 +541,6 @@ static int dcssblk_setup_dax(struct dcssblk_dev_info *dev_info)
{
struct dax_device *dax_dev;
- if (!IS_ENABLED(CONFIG_DCSSBLK_DAX))
- return 0;
-
dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
if (IS_ERR(dax_dev))
return PTR_ERR(dax_dev);
@@ -562,6 +563,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
char *local_buf;
+ void *addr;
unsigned long seg_byte_size;
dev_info = NULL;
@@ -687,9 +689,26 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- rc = dcssblk_setup_dax(dev_info);
- if (rc)
- goto out_dax;
+ if (!IS_ALIGNED(dev_info->start, SUBSECTION_SIZE) ||
+ !IS_ALIGNED(dev_info->end + 1, SUBSECTION_SIZE)) {
+ pr_info("DCSS %s is not aligned to %lu bytes, DAX support disabled\n",
+ local_buf, SUBSECTION_SIZE);
+ } else {
+ dev_info->pgmap.type = MEMORY_DEVICE_FS_DAX;
+ dev_info->pgmap.range.start = dev_info->start;
+ dev_info->pgmap.range.end = dev_info->end;
+ dev_info->pgmap.nr_range = 1;
+ addr = devm_memremap_pages(&dev_info->dev, &dev_info->pgmap);
+ if (IS_ERR(addr)) {
+ rc = PTR_ERR(addr);
+ goto put_dev;
+ }
+ dev_info->pgmap_addr = addr;
+ rc = dcssblk_setup_dax(dev_info);
+ if (rc)
+ goto out_dax;
+ pr_info("DAX support enabled for DCSS %s\n", local_buf);
+ }
get_device(&dev_info->dev);
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
@@ -716,6 +735,8 @@ out_dax_host:
out_dax:
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
put_dev:
list_del(&dev_info->lh);
put_disk(dev_info->gd);
@@ -801,6 +822,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
+ if (dev_info->pgmap_addr)
+ devm_memunmap_pages(&dev_info->dev, &dev_info->pgmap);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 81d6744e1861..dcbd51152ee3 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -21,6 +21,7 @@ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o sclp_sd.o
+obj-$(CONFIG_MEMORY_HOTPLUG) += sclp_mem.o
obj-$(CONFIG_TN3270) += raw3270.o con3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
diff --git a/drivers/s390/char/hmcdrv_dev.c b/drivers/s390/char/hmcdrv_dev.c
index e069dd685899..b26fcf6849f2 100644
--- a/drivers/s390/char/hmcdrv_dev.c
+++ b/drivers/s390/char/hmcdrv_dev.c
@@ -244,24 +244,17 @@ static ssize_t hmcdrv_dev_write(struct file *fp, const char __user *ubuf,
size_t len, loff_t *pos)
{
ssize_t retlen;
+ void *pdata;
pr_debug("writing file '/dev/%pD' at pos. %lld with length %zd\n",
fp, (long long) *pos, len);
if (!fp->private_data) { /* first expect a cmd write */
- fp->private_data = kmalloc(len + 1, GFP_KERNEL);
-
- if (!fp->private_data)
- return -ENOMEM;
-
- if (!copy_from_user(fp->private_data, ubuf, len)) {
- ((char *)fp->private_data)[len] = '\0';
- return len;
- }
-
- kfree(fp->private_data);
- fp->private_data = NULL;
- return -EFAULT;
+ pdata = memdup_user_nul(ubuf, len);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ fp->private_data = pdata;
+ return len;
}
retlen = hmcdrv_dev_transfer((char *) fp->private_data,
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 16469678548f..3480198eac02 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -8,31 +8,46 @@
#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/cpufeature.h>
#include <linux/completion.h>
-#include <linux/init.h>
-#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <linux/memory.h>
-#include <linux/memory_hotplug.h>
-#include <linux/module.h>
-#include <asm/ctlreg.h>
#include <asm/chpid.h>
-#include <asm/setup.h>
-#include <asm/page.h>
+#include <asm/ctlreg.h>
#include <asm/sclp.h>
-#include <asm/numa.h>
-#include <asm/facility.h>
-#include <asm/page-states.h>
#include "sclp.h"
-#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
-#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+/* CPU configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
+/* Channel path configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
+
+struct cpu_configure_sccb {
+ struct sccb_header header;
+} __packed __aligned(8);
+
+struct chp_cfg_sccb {
+ struct sccb_header header;
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
+
+struct chp_info_sccb {
+ struct sccb_header header;
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
@@ -64,13 +79,11 @@ int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
request->callback_data = &completion;
init_completion(&completion);
- /* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
- /* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
cmd, request->status);
@@ -81,22 +94,15 @@ out:
return rc;
}
-/*
- * CPU configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
-#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
-
int _sclp_get_core_info(struct sclp_core_info *info)
{
- int rc;
- int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
struct read_cpu_info_sccb *sccb;
+ int rc, length;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
+ length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
if (!sccb)
return -ENOMEM;
@@ -114,14 +120,10 @@ int _sclp_get_core_info(struct sclp_core_info *info)
}
sclp_fill_core_info(info, sccb);
out:
- free_pages((unsigned long) sccb, get_order(length));
+ free_pages((unsigned long)sccb, get_order(length));
return rc;
}
-struct cpu_configure_sccb {
- struct sccb_header header;
-} __attribute__((packed, aligned(8)));
-
static int do_core_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
@@ -130,8 +132,8 @@ static int do_core_configure(sclp_cmdw_t cmd)
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
- * This is not going to cross a page boundary since we force
- * kmalloc to have a minimum alignment of 8 bytes on s390.
+ * Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb
+ * is not going to cross a page boundary.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
@@ -165,394 +167,6 @@ int sclp_core_deconfigure(u8 core)
return do_core_configure(SCLP_CMDW_DECONFIGURE_CPU | core << 8);
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-
-static DEFINE_MUTEX(sclp_mem_mutex);
-static LIST_HEAD(sclp_mem_list);
-static u8 sclp_max_storage_id;
-static DECLARE_BITMAP(sclp_storage_ids, 256);
-
-struct memory_increment {
- struct list_head list;
- u16 rn;
- int standby;
-};
-
-struct assign_storage_sccb {
- struct sccb_header header;
- u16 rn;
-} __packed;
-
-int arch_get_memory_phys_device(unsigned long start_pfn)
-{
- if (!sclp.rzm)
- return 0;
- return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
-}
-
-static unsigned long long rn2addr(u16 rn)
-{
- return (unsigned long long) (rn - 1) * sclp.rzm;
-}
-
-static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
-{
- struct assign_storage_sccb *sccb;
- int rc;
-
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
- sccb->header.length = PAGE_SIZE;
- sccb->rn = rn;
- rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- case 0x0120:
- break;
- default:
- pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
- cmd, sccb->header.response_code, rn);
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-static int sclp_assign_storage(u16 rn)
-{
- unsigned long long start;
- int rc;
-
- rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
- if (rc)
- return rc;
- start = rn2addr(rn);
- storage_key_init_range(start, start + sclp.rzm);
- return 0;
-}
-
-static int sclp_unassign_storage(u16 rn)
-{
- return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
-}
-
-struct attach_storage_sccb {
- struct sccb_header header;
- u16 :16;
- u16 assigned;
- u32 :32;
- u32 entries[];
-} __packed;
-
-static int sclp_attach_storage(u8 id)
-{
- struct attach_storage_sccb *sccb;
- int rc;
- int i;
-
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- return -ENOMEM;
- sccb->header.length = PAGE_SIZE;
- sccb->header.function_code = 0x40;
- rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
- SCLP_QUEUE_INTERVAL);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0020:
- set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++) {
- if (sccb->entries[i])
- sclp_unassign_storage(sccb->entries[i] >> 16);
- }
- break;
- default:
- rc = -EIO;
- break;
- }
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-
-static int sclp_mem_change_state(unsigned long start, unsigned long size,
- int online)
-{
- struct memory_increment *incr;
- unsigned long long istart;
- int rc = 0;
-
- list_for_each_entry(incr, &sclp_mem_list, list) {
- istart = rn2addr(incr->rn);
- if (start + size - 1 < istart)
- break;
- if (start > istart + sclp.rzm - 1)
- continue;
- if (online)
- rc |= sclp_assign_storage(incr->rn);
- else
- sclp_unassign_storage(incr->rn);
- if (rc == 0)
- incr->standby = online ? 0 : 1;
- }
- return rc ? -EIO : 0;
-}
-
-static bool contains_standby_increment(unsigned long start, unsigned long end)
-{
- struct memory_increment *incr;
- unsigned long istart;
-
- list_for_each_entry(incr, &sclp_mem_list, list) {
- istart = rn2addr(incr->rn);
- if (end - 1 < istart)
- continue;
- if (start > istart + sclp.rzm - 1)
- continue;
- if (incr->standby)
- return true;
- }
- return false;
-}
-
-static int sclp_mem_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- unsigned long start, size;
- struct memory_notify *arg;
- unsigned char id;
- int rc = 0;
-
- arg = data;
- start = arg->start_pfn << PAGE_SHIFT;
- size = arg->nr_pages << PAGE_SHIFT;
- mutex_lock(&sclp_mem_mutex);
- for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
- sclp_attach_storage(id);
- switch (action) {
- case MEM_GOING_OFFLINE:
- /*
- * We do not allow to set memory blocks offline that contain
- * standby memory. This is done to simplify the "memory online"
- * case.
- */
- if (contains_standby_increment(start, start + size))
- rc = -EPERM;
- break;
- case MEM_PREPARE_ONLINE:
- /*
- * Access the altmap_start_pfn and altmap_nr_pages fields
- * within the struct memory_notify specifically when dealing
- * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
- *
- * When altmap is in use, take the specified memory range
- * online, which includes the altmap.
- */
- if (arg->altmap_nr_pages) {
- start = PFN_PHYS(arg->altmap_start_pfn);
- size += PFN_PHYS(arg->altmap_nr_pages);
- }
- rc = sclp_mem_change_state(start, size, 1);
- if (rc || !arg->altmap_nr_pages)
- break;
- /*
- * Set CMMA state to nodat here, since the struct page memory
- * at the beginning of the memory block will not go through the
- * buddy allocator later.
- */
- __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
- break;
- case MEM_FINISH_OFFLINE:
- /*
- * When altmap is in use, take the specified memory range
- * offline, which includes the altmap.
- */
- if (arg->altmap_nr_pages) {
- start = PFN_PHYS(arg->altmap_start_pfn);
- size += PFN_PHYS(arg->altmap_nr_pages);
- }
- sclp_mem_change_state(start, size, 0);
- break;
- default:
- break;
- }
- mutex_unlock(&sclp_mem_mutex);
- return rc ? NOTIFY_BAD : NOTIFY_OK;
-}
-
-static struct notifier_block sclp_mem_nb = {
- .notifier_call = sclp_mem_notifier,
-};
-
-static void __init align_to_block_size(unsigned long long *start,
- unsigned long long *size,
- unsigned long long alignment)
-{
- unsigned long long start_align, size_align;
-
- start_align = roundup(*start, alignment);
- size_align = rounddown(*start + *size, alignment) - start_align;
-
- pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
- *start, size_align >> 20, *size >> 20);
- *start = start_align;
- *size = size_align;
-}
-
-static void __init add_memory_merged(u16 rn)
-{
- unsigned long long start, size, addr, block_size;
- static u16 first_rn, num;
-
- if (rn && first_rn && (first_rn + num == rn)) {
- num++;
- return;
- }
- if (!first_rn)
- goto skip_add;
- start = rn2addr(first_rn);
- size = (unsigned long long) num * sclp.rzm;
- if (start >= ident_map_size)
- goto skip_add;
- if (start + size > ident_map_size)
- size = ident_map_size - start;
- block_size = memory_block_size_bytes();
- align_to_block_size(&start, &size, block_size);
- if (!size)
- goto skip_add;
- for (addr = start; addr < start + size; addr += block_size)
- add_memory(0, addr, block_size,
- cpu_has_edat1() ?
- MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
-skip_add:
- first_rn = rn;
- num = 1;
-}
-
-static void __init sclp_add_standby_memory(void)
-{
- struct memory_increment *incr;
-
- list_for_each_entry(incr, &sclp_mem_list, list)
- if (incr->standby)
- add_memory_merged(incr->rn);
- add_memory_merged(0);
-}
-
-static void __init insert_increment(u16 rn, int standby, int assigned)
-{
- struct memory_increment *incr, *new_incr;
- struct list_head *prev;
- u16 last_rn;
-
- new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
- if (!new_incr)
- return;
- new_incr->rn = rn;
- new_incr->standby = standby;
- last_rn = 0;
- prev = &sclp_mem_list;
- list_for_each_entry(incr, &sclp_mem_list, list) {
- if (assigned && incr->rn > rn)
- break;
- if (!assigned && incr->rn - last_rn > 1)
- break;
- last_rn = incr->rn;
- prev = &incr->list;
- }
- if (!assigned)
- new_incr->rn = last_rn + 1;
- if (new_incr->rn > sclp.rnmax) {
- kfree(new_incr);
- return;
- }
- list_add(&new_incr->list, prev);
-}
-
-static int __init sclp_detect_standby_memory(void)
-{
- struct read_storage_sccb *sccb;
- int i, id, assigned, rc;
-
- if (oldmem_data.start) /* No standby memory in kdump mode */
- return 0;
- if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
- return 0;
- rc = -ENOMEM;
- sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
- if (!sccb)
- goto out;
- assigned = 0;
- for (id = 0; id <= sclp_max_storage_id; id++) {
- memset(sccb, 0, PAGE_SIZE);
- sccb->header.length = PAGE_SIZE;
- rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
- if (rc)
- goto out;
- switch (sccb->header.response_code) {
- case 0x0010:
- set_bit(id, sclp_storage_ids);
- for (i = 0; i < sccb->assigned; i++) {
- if (!sccb->entries[i])
- continue;
- assigned++;
- insert_increment(sccb->entries[i] >> 16, 0, 1);
- }
- break;
- case 0x0310:
- break;
- case 0x0410:
- for (i = 0; i < sccb->assigned; i++) {
- if (!sccb->entries[i])
- continue;
- assigned++;
- insert_increment(sccb->entries[i] >> 16, 1, 1);
- }
- break;
- default:
- rc = -EIO;
- break;
- }
- if (!rc)
- sclp_max_storage_id = sccb->max_id;
- }
- if (rc || list_empty(&sclp_mem_list))
- goto out;
- for (i = 1; i <= sclp.rnmax - assigned; i++)
- insert_increment(0, 1, 0);
- rc = register_memory_notifier(&sclp_mem_nb);
- if (rc)
- goto out;
- sclp_add_standby_memory();
-out:
- free_page((unsigned long) sccb);
- return rc;
-}
-__initcall(sclp_detect_standby_memory);
-
-#endif /* CONFIG_MEMORY_HOTPLUG */
-
-/*
- * Channel path configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
-#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
-#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
-
-struct chp_cfg_sccb {
- struct sccb_header header;
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
@@ -560,8 +174,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
@@ -581,7 +194,7 @@ static int do_chp_configure(sclp_cmdw_t cmd)
break;
}
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
@@ -609,16 +222,6 @@ int sclp_chp_deconfigure(struct chp_id chpid)
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
-struct chp_info_sccb {
- struct sccb_header header;
- u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
- u8 standby[SCLP_CHP_INFO_MASK_SIZE];
- u8 configured[SCLP_CHP_INFO_MASK_SIZE];
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
@@ -634,8 +237,7 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
@@ -652,6 +254,6 @@ int sclp_chp_read_info(struct sclp_chp_info *info)
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c
new file mode 100644
index 000000000000..27f49f5fd358
--- /dev/null
+++ b/drivers/s390/char/sclp_mem.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory hotplug support via sclp
+ *
+ * Copyright IBM Corp. 2025
+ */
+
+#define KMSG_COMPONENT "sclp_mem"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/slab.h>
+#include <asm/facility.h>
+#include <asm/page.h>
+#include <asm/page-states.h>
+#include <asm/sclp.h>
+
+#include "sclp.h"
+
+#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
+#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+
+static DEFINE_MUTEX(sclp_mem_mutex);
+static LIST_HEAD(sclp_mem_list);
+static u8 sclp_max_storage_id;
+static DECLARE_BITMAP(sclp_storage_ids, 256);
+
+struct memory_increment {
+ struct list_head list;
+ u16 rn;
+ int standby;
+};
+
+struct assign_storage_sccb {
+ struct sccb_header header;
+ u16 rn;
+} __packed;
+
+struct attach_storage_sccb {
+ struct sccb_header header;
+ u16 :16;
+ u16 assigned;
+ u32 :32;
+ u32 entries[];
+} __packed;
+
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+ if (!sclp.rzm)
+ return 0;
+ return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
+}
+
+static unsigned long rn2addr(u16 rn)
+{
+ return (unsigned long)(rn - 1) * sclp.rzm;
+}
+
+static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
+{
+ struct assign_storage_sccb *sccb;
+ int rc;
+
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+ sccb->header.length = PAGE_SIZE;
+ sccb->rn = rn;
+ rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ case 0x0120:
+ break;
+ default:
+ pr_warn("assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x)\n",
+ cmd, sccb->header.response_code, rn);
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+
+static int sclp_assign_storage(u16 rn)
+{
+ unsigned long start;
+ int rc;
+
+ rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
+ if (rc)
+ return rc;
+ start = rn2addr(rn);
+ storage_key_init_range(start, start + sclp.rzm);
+ return 0;
+}
+
+static int sclp_unassign_storage(u16 rn)
+{
+ return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
+}
+
+static int sclp_attach_storage(u8 id)
+{
+ struct attach_storage_sccb *sccb;
+ int rc, i;
+
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ return -ENOMEM;
+ sccb->header.length = PAGE_SIZE;
+ sccb->header.function_code = 0x40;
+ rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
+ SCLP_QUEUE_INTERVAL);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0020:
+ set_bit(id, sclp_storage_ids);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (sccb->entries[i])
+ sclp_unassign_storage(sccb->entries[i] >> 16);
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+
+static int sclp_mem_change_state(unsigned long start, unsigned long size,
+ int online)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+ int rc = 0;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (start + size - 1 < istart)
+ break;
+ if (start > istart + sclp.rzm - 1)
+ continue;
+ if (online)
+ rc |= sclp_assign_storage(incr->rn);
+ else
+ sclp_unassign_storage(incr->rn);
+ if (rc == 0)
+ incr->standby = online ? 0 : 1;
+ }
+ return rc ? -EIO : 0;
+}
+
+static bool contains_standby_increment(unsigned long start, unsigned long end)
+{
+ struct memory_increment *incr;
+ unsigned long istart;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ istart = rn2addr(incr->rn);
+ if (end - 1 < istart)
+ continue;
+ if (start > istart + sclp.rzm - 1)
+ continue;
+ if (incr->standby)
+ return true;
+ }
+ return false;
+}
+
+static int sclp_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ unsigned long start, size;
+ struct memory_notify *arg;
+ unsigned char id;
+ int rc = 0;
+
+ arg = data;
+ start = arg->start_pfn << PAGE_SHIFT;
+ size = arg->nr_pages << PAGE_SHIFT;
+ mutex_lock(&sclp_mem_mutex);
+ for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
+ sclp_attach_storage(id);
+ switch (action) {
+ case MEM_GOING_OFFLINE:
+ /*
+ * Do not allow to set memory blocks offline that contain
+ * standby memory. This is done to simplify the "memory online"
+ * case.
+ */
+ if (contains_standby_increment(start, start + size))
+ rc = -EPERM;
+ break;
+ case MEM_PREPARE_ONLINE:
+ /*
+ * Access the altmap_start_pfn and altmap_nr_pages fields
+ * within the struct memory_notify specifically when dealing
+ * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
+ *
+ * When altmap is in use, take the specified memory range
+ * online, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
+ rc = sclp_mem_change_state(start, size, 1);
+ if (rc || !arg->altmap_nr_pages)
+ break;
+ /*
+ * Set CMMA state to nodat here, since the struct page memory
+ * at the beginning of the memory block will not go through the
+ * buddy allocator later.
+ */
+ __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
+ break;
+ case MEM_FINISH_OFFLINE:
+ /*
+ * When altmap is in use, take the specified memory range
+ * offline, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
+ sclp_mem_change_state(start, size, 0);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&sclp_mem_mutex);
+ return rc ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static struct notifier_block sclp_mem_nb = {
+ .notifier_call = sclp_mem_notifier,
+};
+
+static void __init align_to_block_size(unsigned long *start,
+ unsigned long *size,
+ unsigned long alignment)
+{
+ unsigned long start_align, size_align;
+
+ start_align = roundup(*start, alignment);
+ size_align = rounddown(*start + *size, alignment) - start_align;
+
+ pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n",
+ *start, size_align >> 20, *size >> 20);
+ *start = start_align;
+ *size = size_align;
+}
+
+static void __init add_memory_merged(u16 rn)
+{
+ unsigned long start, size, addr, block_size;
+ static u16 first_rn, num;
+
+ if (rn && first_rn && (first_rn + num == rn)) {
+ num++;
+ return;
+ }
+ if (!first_rn)
+ goto skip_add;
+ start = rn2addr(first_rn);
+ size = (unsigned long)num * sclp.rzm;
+ if (start >= ident_map_size)
+ goto skip_add;
+ if (start + size > ident_map_size)
+ size = ident_map_size - start;
+ block_size = memory_block_size_bytes();
+ align_to_block_size(&start, &size, block_size);
+ if (!size)
+ goto skip_add;
+ for (addr = start; addr < start + size; addr += block_size) {
+ add_memory(0, addr, block_size,
+ cpu_has_edat1() ?
+ MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
+ }
+skip_add:
+ first_rn = rn;
+ num = 1;
+}
+
+static void __init sclp_add_standby_memory(void)
+{
+ struct memory_increment *incr;
+
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ if (incr->standby)
+ add_memory_merged(incr->rn);
+ }
+ add_memory_merged(0);
+}
+
+static void __init insert_increment(u16 rn, int standby, int assigned)
+{
+ struct memory_increment *incr, *new_incr;
+ struct list_head *prev;
+ u16 last_rn;
+
+ new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
+ if (!new_incr)
+ return;
+ new_incr->rn = rn;
+ new_incr->standby = standby;
+ last_rn = 0;
+ prev = &sclp_mem_list;
+ list_for_each_entry(incr, &sclp_mem_list, list) {
+ if (assigned && incr->rn > rn)
+ break;
+ if (!assigned && incr->rn - last_rn > 1)
+ break;
+ last_rn = incr->rn;
+ prev = &incr->list;
+ }
+ if (!assigned)
+ new_incr->rn = last_rn + 1;
+ if (new_incr->rn > sclp.rnmax) {
+ kfree(new_incr);
+ return;
+ }
+ list_add(&new_incr->list, prev);
+}
+
+static int __init sclp_detect_standby_memory(void)
+{
+ struct read_storage_sccb *sccb;
+ int i, id, assigned, rc;
+
+ /* No standby memory in kdump mode */
+ if (oldmem_data.start)
+ return 0;
+ if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL)
+ return 0;
+ rc = -ENOMEM;
+ sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!sccb)
+ goto out;
+ assigned = 0;
+ for (id = 0; id <= sclp_max_storage_id; id++) {
+ memset(sccb, 0, PAGE_SIZE);
+ sccb->header.length = PAGE_SIZE;
+ rc = sclp_sync_request(SCLP_CMDW_READ_STORAGE_INFO | id << 8, sccb);
+ if (rc)
+ goto out;
+ switch (sccb->header.response_code) {
+ case 0x0010:
+ set_bit(id, sclp_storage_ids);
+ for (i = 0; i < sccb->assigned; i++) {
+ if (!sccb->entries[i])
+ continue;
+ assigned++;
+ insert_increment(sccb->entries[i] >> 16, 0, 1);
+ }
+ break;
+ case 0x0310:
+ break;
+ case 0x0410:
+ for (i = 0; i < sccb->assigned; i++) {
+ if (!sccb->entries[i])
+ continue;
+ assigned++;
+ insert_increment(sccb->entries[i] >> 16, 1, 1);
+ }
+ break;
+ default:
+ rc = -EIO;
+ break;
+ }
+ if (!rc)
+ sclp_max_storage_id = sccb->max_id;
+ }
+ if (rc || list_empty(&sclp_mem_list))
+ goto out;
+ for (i = 1; i <= sclp.rnmax - assigned; i++)
+ insert_increment(0, 1, 0);
+ rc = register_memory_notifier(&sclp_mem_nb);
+ if (rc)
+ goto out;
+ sclp_add_standby_memory();
+out:
+ free_page((unsigned long)sccb);
+ return rc;
+}
+__initcall(sclp_detect_standby_memory);
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a1bafaf73f87..2a2931d303cb 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1671,7 +1671,7 @@ tape_3590_init(void)
DBF_EVENT(3, "3590 init\n");
- tape_3590_wq = alloc_workqueue("tape_3590", 0, 0);
+ tape_3590_wq = alloc_workqueue("tape_3590", WQ_PERCPU, 0);
if (!tape_3590_wq)
return -ENOMEM;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 3bf09a89a089..e92e2fd8ce5d 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -1405,7 +1405,9 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 3: import the encrypted key value as a new key */
rc = ep11_unwrapkey(card, domain, kek, keklen,
encbuf, encbuflen, 0, def_iv,
- keybitsize, 0, keybuf, keybufsize, keytype, xflags);
+ keybitsize, keygenflags,
+ keybuf, keybufsize,
+ keytype, xflags);
if (rc) {
ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
__func__, rc);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 9bf823348cd3..d288e9d9c187 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -46,6 +46,7 @@ static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX);
#define CQSPI_DMA_SET_MASK BIT(7)
#define CQSPI_SUPPORT_DEVICE_RESET BIT(8)
#define CQSPI_DISABLE_STIG_MODE BIT(9)
+#define CQSPI_DISABLE_RUNTIME_PM BIT(10)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -1468,14 +1469,17 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
int ret;
struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
struct device *dev = &cqspi->pdev->dev;
+ const struct cqspi_driver_platdata *ddata = of_device_get_match_data(dev);
if (refcount_read(&cqspi->inflight_ops) == 0)
return -ENODEV;
- ret = pm_runtime_resume_and_get(dev);
- if (ret) {
- dev_err(&mem->spi->dev, "resume failed with %d\n", ret);
- return ret;
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(&mem->spi->dev, "resume failed with %d\n", ret);
+ return ret;
+ }
}
if (!refcount_read(&cqspi->refcount))
@@ -1491,7 +1495,8 @@ static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
ret = cqspi_mem_process(mem, op);
- pm_runtime_put_autosuspend(dev);
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
+ pm_runtime_put_autosuspend(dev);
if (ret)
dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
@@ -1985,11 +1990,12 @@ static int cqspi_probe(struct platform_device *pdev)
goto probe_setup_failed;
}
- pm_runtime_enable(dev);
-
- pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT);
- pm_runtime_use_autosuspend(dev);
- pm_runtime_get_noresume(dev);
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ }
ret = spi_register_controller(host);
if (ret) {
@@ -1997,12 +2003,17 @@ static int cqspi_probe(struct platform_device *pdev)
goto probe_setup_failed;
}
- pm_runtime_put_autosuspend(dev);
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
+ pm_runtime_put_autosuspend(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
return 0;
probe_setup_failed:
cqspi_controller_enable(cqspi, 0);
- pm_runtime_disable(dev);
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
+ pm_runtime_disable(dev);
probe_reset_failed:
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
@@ -2013,7 +2024,11 @@ probe_clk_failed:
static void cqspi_remove(struct platform_device *pdev)
{
+ const struct cqspi_driver_platdata *ddata;
struct cqspi_st *cqspi = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ ddata = of_device_get_match_data(dev);
refcount_set(&cqspi->refcount, 0);
@@ -2026,14 +2041,17 @@ static void cqspi_remove(struct platform_device *pdev)
if (cqspi->rx_chan)
dma_release_channel(cqspi->rx_chan);
- if (pm_runtime_get_sync(&pdev->dev) >= 0)
- clk_disable(cqspi->clk);
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
+ if (pm_runtime_get_sync(&pdev->dev) >= 0)
+ clk_disable(cqspi->clk);
if (cqspi->is_jh7110)
cqspi_jh7110_disable_clk(pdev, cqspi);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ }
}
static int cqspi_runtime_suspend(struct device *dev)
@@ -2112,7 +2130,8 @@ static const struct cqspi_driver_platdata socfpga_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE
| CQSPI_NO_SUPPORT_WR_COMPLETION
| CQSPI_SLOW_SRAM
- | CQSPI_DISABLE_STIG_MODE,
+ | CQSPI_DISABLE_STIG_MODE
+ | CQSPI_DISABLE_RUNTIME_PM,
};
static const struct cqspi_driver_platdata versal_ospi = {
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 6dc58a30804a..69c2e9d9be3c 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -988,6 +988,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
else
l &= ~OMAP2_MCSPI_CHCONF_PHA;
+ mcspi_write_chconf0(spi, l | OMAP2_MCSPI_CHCONF_FORCE);
mcspi_write_chconf0(spi, l);
cs->mode = spi->mode;
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 1e50675772fe..cc88aaa106da 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -243,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
&hwq->sqe_dma_addr,
GFP_KERNEL);
- if (!hwq->sqe_dma_addr) {
+ if (!hwq->sqe_base_addr) {
dev_err(hba->dev, "SQE allocation failed\n");
return -ENOMEM;
}
@@ -252,7 +252,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
&hwq->cqe_dma_addr,
GFP_KERNEL);
- if (!hwq->cqe_dma_addr) {
+ if (!hwq->cqe_base_addr) {
dev_err(hba->dev, "CQE allocation failed\n");
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 08a251df20c4..5246fa6af3d6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1891,7 +1891,7 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
/* Super block */
static const struct super_operations ffs_sb_operations = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
struct ffs_sb_fill_data {
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index b51e132b0cd2..13c3da49348c 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -2011,7 +2011,7 @@ gadgetfs_create_file (struct super_block *sb, char const *name,
static const struct super_operations gadget_fs_operations = {
.statfs = simple_statfs,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static int
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c6508fe0d5c8..35ded4330431 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -765,11 +765,11 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int err;
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
- bool busyloop_intr;
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
do {
- busyloop_intr = false;
+ bool busyloop_intr = false;
+
if (nvq->done_idx == VHOST_NET_BATCH)
vhost_tx_batch(net, nvq, sock, &msg);
@@ -780,10 +780,18 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- /* Kicks are disabled at this point, break loop and
- * process any remaining batched packets. Queue will
- * be re-enabled afterwards.
+ /* Flush batched packets to handle pending RX
+ * work (if busyloop_intr is set) and to avoid
+ * unnecessary virtqueue kicks.
*/
+ vhost_tx_batch(net, nvq, sock, &msg);
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev,
+ vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
break;
}
@@ -839,22 +847,7 @@ done:
++nvq->done_idx;
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
- /* Kicks are still disabled, dispatch any remaining batched msgs. */
vhost_tx_batch(net, nvq, sock, &msg);
-
- if (unlikely(busyloop_intr))
- /* If interrupted while doing busy polling, requeue the
- * handler to be fair handle_rx as well as other tasks
- * waiting on cpu.
- */
- vhost_poll_queue(&vq->poll);
- else
- /* All of our work has been completed; however, before
- * leaving the TX handler, do one last check for work,
- * and requeue handler if necessary. If there is no work,
- * queue will be reenabled.
- */
- vhost_net_busy_poll_try_queue(net, vq);
}
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
@@ -1014,7 +1007,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
- bool *busyloop_intr, unsigned int count)
+ bool *busyloop_intr, unsigned int *count)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
@@ -1024,7 +1017,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
if (!len && rvq->busyloop_timeout) {
/* Flush batched heads first */
- vhost_net_signal_used(rnvq, count);
+ vhost_net_signal_used(rnvq, *count);
+ *count = 0;
/* Both tx vq and rx socket were polled here */
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
@@ -1180,7 +1174,7 @@ static void handle_rx(struct vhost_net *net)
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr, count);
+ &busyloop_intr, &count);
if (!sock_len)
break;
sock_len += sock_hlen;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index abf51332a5c5..98e4f68f4e3c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2884,7 +2884,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf,
check_len:
if (strlen(name) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated %s Address: %s, exceeds"
- " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+ " max: %d\n", vhost_scsi_dump_proto_id(tport), name,
VHOST_SCSI_NAMELEN);
kfree(tport);
return ERR_PTR(-EINVAL);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 55f5731e94c3..5940e2eb9231 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2504,7 +2504,7 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
unsigned charcount = font->charcount;
int w = font->width;
int h = font->height;
- int size;
+ int size, alloc_size;
int i, csum;
u8 *new_data, *data = font->data;
int pitch = PITCH(font->width);
@@ -2531,9 +2531,16 @@ static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
if (fbcon_invalid_charcount(info, charcount))
return -EINVAL;
- size = CALC_FONTSZ(h, pitch, charcount);
+ /* Check for integer overflow in font size calculation */
+ if (check_mul_overflow(h, pitch, &size) ||
+ check_mul_overflow(size, charcount, &size))
+ return -EINVAL;
+
+ /* Check for overflow in allocation size calculation */
+ if (check_add_overflow(FONT_EXTRA_WORDS * sizeof(int), size, &alloc_size))
+ return -EINVAL;
- new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
+ new_data = kmalloc(alloc_size, GFP_USER);
if (!new_data)
return -ENOMEM;
diff --git a/drivers/virt/coco/efi_secret/Kconfig b/drivers/virt/coco/efi_secret/Kconfig
index 4404d198f3b2..94d88e5da707 100644
--- a/drivers/virt/coco/efi_secret/Kconfig
+++ b/drivers/virt/coco/efi_secret/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config EFI_SECRET
tristate "EFI secret area securityfs support"
- depends on EFI && X86_64
+ depends on EFI && (X86_64 || ARM64)
select EFI_COCO_SECRET
select SECURITYFS
help
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 2de37dcd7556..49c3f9926394 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -302,7 +302,7 @@ static enum bp_state reserve_additional_memory(void)
* are not restored since this region is now known not to
* conflict with any devices.
*/
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (xen_pv_domain()) {
unsigned long pfn, i;
pfn = PFN_DOWN(resource->start);
@@ -626,7 +626,7 @@ int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (xen_pv_domain()) {
ret = xen_alloc_p2m_entry(page_to_pfn(page));
if (ret < 0)
goto out_undo;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 41309d38f78c..9478fae014e5 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1314,14 +1314,17 @@ int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
-static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
+static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn,
+ bool percpu)
{
struct evtchn_status status;
evtchn_port_t port;
- int rc = -ENOENT;
+ bool exists = false;
memset(&status, 0, sizeof(status));
for (port = 0; port < xen_evtchn_max_channels(); port++) {
+ int rc;
+
status.dom = DOMID_SELF;
status.port = port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
@@ -1329,12 +1332,16 @@ static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
continue;
if (status.status != EVTCHNSTAT_virq)
continue;
- if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
+ if (status.u.virq != virq)
+ continue;
+ if (status.vcpu == xen_vcpu_nr(cpu)) {
*evtchn = port;
- break;
+ return 0;
+ } else if (!percpu) {
+ exists = true;
}
}
- return rc;
+ return exists ? -EEXIST : -ENOENT;
}
/**
@@ -1381,8 +1388,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
evtchn = bind_virq.port;
else {
if (ret == -EEXIST)
- ret = find_virq(virq, cpu, &evtchn);
- BUG_ON(ret < 0);
+ ret = find_virq(virq, cpu, &evtchn, percpu);
+ if (ret) {
+ __unbind_from_irq(info, info->irq);
+ goto out;
+ }
}
ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
@@ -1787,9 +1797,20 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
* virq or IPI channel, which don't actually need to be rebound. Ignore
* it, but don't do the xenlinux-level rebind in that case.
*/
- if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) {
+ int old_cpu = info->cpu;
+
bind_evtchn_to_cpu(info, tcpu, false);
+ if (info->type == IRQT_VIRQ) {
+ int virq = info->u.virq;
+ int irq = per_cpu(virq_to_irq, old_cpu)[virq];
+
+ per_cpu(virq_to_irq, old_cpu)[virq] = -1;
+ per_cpu(virq_to_irq, tcpu)[virq] = irq;
+ }
+ }
+
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
return 0;
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 82855105ab85..550980dd3b0b 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -720,16 +720,15 @@ static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
/* DMA buffer IOCTL support. */
-long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
+long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
{
struct ioctl_gntdev_dmabuf_exp_from_refs op;
u32 *refs;
long ret;
- if (use_ptemod) {
- pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
- use_ptemod);
+ if (xen_pv_domain()) {
+ pr_debug("Cannot provide dma-buf in a PV domain\n");
return -EINVAL;
}
diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
index 3d9b9cf9d5a1..9adf96ac74d3 100644
--- a/drivers/xen/gntdev-dmabuf.h
+++ b/drivers/xen/gntdev-dmabuf.h
@@ -18,7 +18,7 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp);
void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
-long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
+long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv,
struct ioctl_gntdev_dmabuf_exp_from_refs __user *u);
long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1f2160765618..91ba5078c9d9 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -73,9 +73,6 @@ module_param(limit, uint, 0644);
MODULE_PARM_DESC(limit,
"Maximum number of grants that may be mapped by one mapping request");
-/* True in PV mode, false otherwise */
-static int use_ptemod;
-
static void unmap_grant_pages(struct gntdev_grant_map *map,
int offset, int pages);
@@ -163,7 +160,7 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
NULL == add->pages ||
NULL == add->being_removed)
goto err;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
GFP_KERNEL);
add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
@@ -211,7 +208,7 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
add->grants[i].ref = INVALID_GRANT_REF;
add->map_ops[i].handle = INVALID_GRANT_HANDLE;
add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
}
@@ -268,7 +265,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
- if (map->pages && !use_ptemod) {
+ if (map->pages && !xen_pv_domain()) {
/*
* Increment the reference count. This ensures that the
* subsequent call to unmap_grant_pages() will not wind up
@@ -298,7 +295,7 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
*/
}
- if (use_ptemod && map->notifier_init)
+ if (xen_pv_domain() && map->notifier_init)
mmu_interval_notifier_remove(&map->notifier);
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
@@ -334,7 +331,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
size_t alloced = 0;
int i, err = 0;
- if (!use_ptemod) {
+ if (!xen_pv_domain()) {
/* Note: it could already be mapped */
if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
return 0;
@@ -389,7 +386,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
if (map->kmap_ops[i].status == GNTST_okay) {
alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
@@ -421,7 +418,7 @@ static void __unmap_grant_pages_done(int result,
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
if (map->kunmap_ops[offset + i].status == GNTST_okay &&
map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE)
successful_unmaps++;
@@ -464,7 +461,7 @@ static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
}
map->unmap_data.unmap_ops = map->unmap_ops + offset;
- map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.kunmap_ops = xen_pv_domain() ? map->kunmap_ops + offset : NULL;
map->unmap_data.pages = map->pages + offset;
map->unmap_data.count = pages;
map->unmap_data.done = __unmap_grant_pages_done;
@@ -1039,7 +1036,7 @@ static long gntdev_ioctl(struct file *flip,
#ifdef CONFIG_XEN_GNTDEV_DMABUF
case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS:
- return gntdev_ioctl_dmabuf_exp_from_refs(priv, use_ptemod, ptr);
+ return gntdev_ioctl_dmabuf_exp_from_refs(priv, ptr);
case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED:
return gntdev_ioctl_dmabuf_exp_wait_released(priv, ptr);
@@ -1086,7 +1083,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
- if (use_ptemod)
+ if (xen_pv_domain())
vm_flags_set(vma, VM_DONTCOPY);
vma->vm_private_data = map;
@@ -1102,7 +1099,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
map->pages_vm_start = vma->vm_start;
- if (use_ptemod) {
+ if (xen_pv_domain()) {
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
@@ -1113,7 +1110,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
}
mutex_unlock(&priv->lock);
- if (use_ptemod) {
+ if (xen_pv_domain()) {
/*
* gntdev takes the address of the PTE in find_grant_ptes() and
* passes it to the hypervisor in gntdev_map_grant_pages(). The
@@ -1139,7 +1136,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
if (err)
goto out_put_map;
- if (!use_ptemod) {
+ if (!xen_pv_domain()) {
err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
@@ -1154,7 +1151,7 @@ unlock_out:
out_unlock_put:
mutex_unlock(&priv->lock);
out_put_map:
- if (use_ptemod)
+ if (xen_pv_domain())
unmap_grant_pages(map, 0, map->count);
gntdev_put_map(priv, map);
return err;
@@ -1183,8 +1180,6 @@ static int __init gntdev_init(void)
if (!xen_domain())
return -ENODEV;
- use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
-
err = misc_register(&gntdev_miscdev);
if (err != 0) {
pr_err("Could not register gntdev device\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 04a6b470b15d..478d2ad725ac 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1449,7 +1449,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
unsigned int nr_gframes = end_idx + 1;
int rc;
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (!xen_pv_domain()) {
struct xen_add_to_physmap xatp;
unsigned int i = end_idx;
rc = 0;
@@ -1570,7 +1570,7 @@ static int gnttab_setup(void)
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
- if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
+ if (!xen_pv_domain() && gnttab_shared.addr == NULL) {
gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
if (gnttab_shared.addr == NULL) {
pr_warn("gnttab share frames is not mapped!\n");
@@ -1588,7 +1588,7 @@ int gnttab_resume(void)
int gnttab_suspend(void)
{
- if (!xen_feature(XENFEAT_auto_translated_physmap))
+ if (xen_pv_domain())
gnttab_interface->unmap_frames();
return 0;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 841afa4933c7..e20c40a62e64 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -11,6 +11,7 @@
#include <linux/reboot.h>
#include <linux/sysrq.h>
#include <linux/stop_machine.h>
+#include <linux/suspend.h>
#include <linux/freezer.h>
#include <linux/syscore_ops.h>
#include <linux/export.h>
@@ -95,10 +96,16 @@ static void do_suspend(void)
shutting_down = SHUTDOWN_SUSPEND;
+ if (!mutex_trylock(&system_transition_mutex))
+ {
+ pr_err("%s: failed to take system_transition_mutex\n", __func__);
+ goto out;
+ }
+
err = freeze_processes();
if (err) {
pr_err("%s: freeze processes failed %d\n", __func__, err);
- goto out;
+ goto out_unlock;
}
err = freeze_kernel_threads();
@@ -110,7 +117,7 @@ static void do_suspend(void)
err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
pr_err("%s: dpm_suspend_start %d\n", __func__, err);
- goto out_thaw;
+ goto out_resume_end;
}
printk(KERN_DEBUG "suspending xenstore...\n");
@@ -150,10 +157,13 @@ out_resume:
else
xs_suspend_cancel();
+out_resume_end:
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
out_thaw:
thaw_processes();
+out_unlock:
+ mutex_unlock(&system_transition_mutex);
out:
shutting_down = SHUTDOWN_INVALID;
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 13a10f3294a8..f52a457b302d 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -271,7 +271,7 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
struct mmap_gfn_state state;
/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ if (!xen_pv_domain())
return -ENOSYS;
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
@@ -353,7 +353,7 @@ static int mmap_batch_fn(void *data, int nr, void *state)
struct page **cur_pages = NULL;
int ret;
- if (xen_feature(XENFEAT_auto_translated_physmap))
+ if (!xen_pv_domain())
cur_pages = &pages[st->index];
BUG_ON(nr < 0);
@@ -535,7 +535,7 @@ static long privcmd_ioctl_mmap_batch(
ret = -EINVAL;
goto out_unlock;
}
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (!xen_pv_domain()) {
ret = alloc_empty_pages(vma, nr_pages);
if (ret < 0)
goto out_unlock;
@@ -779,8 +779,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
goto out;
}
- if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
- xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && !xen_pv_domain()) {
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
struct page **pages;
unsigned int i;
@@ -811,8 +810,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
if (rc)
goto out;
- if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
- xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) && !xen_pv_domain()) {
rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
} else {
unsigned int domid =
@@ -1591,7 +1589,7 @@ static void privcmd_close(struct vm_area_struct *vma)
int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
int rc;
- if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
+ if (xen_pv_domain() || !numpgs || !pages)
return;
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index a39f2d36dd9c..d6fc2aefe264 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -105,7 +105,7 @@ static int fill_list(unsigned int nr_pages)
* are not restored since this region is now known not to
* conflict with any devices.
*/
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (xen_pv_domain()) {
xen_pfn_t pfn = PFN_DOWN(res->start);
for (i = 0; i < alloc_pages; i++) {
@@ -184,7 +184,7 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
pages[i] = pg;
#ifdef CONFIG_XEN_HAVE_PVMMU
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (xen_pv_domain()) {
ret = xen_alloc_p2m_entry(page_to_pfn(pg));
if (ret < 0) {
unsigned int j;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e73ec225d4a6..2dc874fb5506 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -955,7 +955,7 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
void __init xenbus_ring_ops_init(void)
{
#ifdef CONFIG_XEN_PV
- if (!xen_feature(XENFEAT_auto_translated_physmap))
+ if (xen_pv_domain())
ring_ops = &ring_ops_pv;
else
#endif
diff --git a/drivers/zorro/names.c b/drivers/zorro/names.c
index 077114ccc840..b44f90989a66 100644
--- a/drivers/zorro/names.c
+++ b/drivers/zorro/names.c
@@ -36,21 +36,21 @@ struct zorro_manuf_info {
* real memory.. Parse the same file multiple times
* to get all the info.
*/
-#define MANUF( manuf, name ) static char __manufstr_##manuf[] __initdata = name;
+#define MANUF(manuf, name) static char __manufstr_##manuf[] __initdata = name;
#define ENDMANUF()
-#define PRODUCT( manuf, prod, name ) static char __prodstr_##manuf##prod[] __initdata = name;
+#define PRODUCT(manuf, prod, name) static char __prodstr_##manuf##prod[] __initdata = name;
#include "devlist.h"
-#define MANUF( manuf, name ) static struct zorro_prod_info __prods_##manuf[] __initdata = {
+#define MANUF(manuf, name) static struct zorro_prod_info __prods_##manuf[] __initdata = {
#define ENDMANUF() };
-#define PRODUCT( manuf, prod, name ) { 0x##prod, 0, __prodstr_##manuf##prod },
+#define PRODUCT(manuf, prod, name) { 0x##prod, 0, __prodstr_##manuf##prod },
#include "devlist.h"
static struct zorro_manuf_info __initdata zorro_manuf_list[] = {
-#define MANUF( manuf, name ) { 0x##manuf, ARRAY_SIZE(__prods_##manuf), __manufstr_##manuf, __prods_##manuf },
+#define MANUF(manuf, name) { 0x##manuf, ARRAY_SIZE(__prods_##manuf), __manufstr_##manuf, __prods_##manuf },
#define ENDMANUF()
-#define PRODUCT( manuf, prod, name )
+#define PRODUCT(manuf, prod, name)
#include "devlist.h"
};