summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_processor.c2
-rw-r--r--drivers/acpi/acpi_tad.c4
-rw-r--r--drivers/acpi/acpica/extrace.c4
-rw-r--r--drivers/acpi/apei/apei-internal.h2
-rw-r--r--drivers/acpi/apei/einj-core.c386
-rw-r--r--drivers/acpi/apei/einj-cxl.c2
-rw-r--r--drivers/acpi/apei/ghes.c90
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/device_pm.c8
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c7
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/fan_attr.c8
-rw-r--r--drivers/acpi/fan_core.c2
-rw-r--r--drivers/acpi/nfit/intel.c119
-rw-r--r--drivers/acpi/pci_link.c2
-rw-r--r--drivers/acpi/pfr_update.c63
-rw-r--r--drivers/acpi/prmt.c26
-rw-r--r--drivers/acpi/proc.c17
-rw-r--r--drivers/acpi/processor_perflib.c10
-rw-r--r--drivers/acpi/processor_throttling.c2
-rw-r--r--drivers/acpi/riscv/cppc.c2
-rw-r--r--drivers/acpi/wakeup.c4
-rw-r--r--drivers/acpi/x86/lpss.c3
-rw-r--r--drivers/amba/bus.c4
-rw-r--r--drivers/android/binder.c22
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/android/binderfs.c15
-rw-r--r--drivers/ata/Kconfig36
-rw-r--r--drivers/ata/ahci.c25
-rw-r--r--drivers/ata/ahci_da850.c6
-rw-r--r--drivers/ata/ahci_dm816.c2
-rw-r--r--drivers/ata/ahci_imx.c13
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_xgene.c8
-rw-r--r--drivers/ata/ata_piix.c4
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libata-core.c137
-rw-r--r--drivers/ata/libata-eh.c433
-rw-r--r--drivers/ata/libata-pmp.c26
-rw-r--r--drivers/ata/libata-sata.c7
-rw-r--r--drivers/ata/libata-scsi.c25
-rw-r--r--drivers/ata/libata-sff.c18
-rw-r--r--drivers/ata/libata-transport.c4
-rw-r--r--drivers/ata/libata.h24
-rw-r--r--drivers/ata/pata_acpi.c2
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c4
-rw-r--r--drivers/ata/pata_artop.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_efar.c2
-rw-r--r--drivers/ata/pata_ep93xx.c4
-rw-r--r--drivers/ata/pata_hpt366.c2
-rw-r--r--drivers/ata/pata_hpt37x.c4
-rw-r--r--drivers/ata/pata_hpt3x2n.c2
-rw-r--r--drivers/ata/pata_icside.c2
-rw-r--r--drivers/ata/pata_it8213.c2
-rw-r--r--drivers/ata/pata_jmicron.c2
-rw-r--r--drivers/ata/pata_marvell.c2
-rw-r--r--drivers/ata/pata_mpiix.c2
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/pata_oldpiix.c2
-rw-r--r--drivers/ata/pata_opti.c2
-rw-r--r--drivers/ata/pata_optidma.c6
-rw-r--r--drivers/ata/pata_parport/pata_parport.c4
-rw-r--r--drivers/ata/pata_pcmcia.c4
-rw-r--r--drivers/ata/pata_pdc2027x.c4
-rw-r--r--drivers/ata/pata_rdc.c6
-rw-r--r--drivers/ata/pata_sis.c2
-rw-r--r--drivers/ata/pata_sl82c105.c2
-rw-r--r--drivers/ata/pata_triflex.c2
-rw-r--r--drivers/ata/pata_via.c2
-rw-r--r--drivers/ata/pdc_adma.c2
-rw-r--r--drivers/ata/sata_dwc_460ex.c2
-rw-r--r--drivers/ata/sata_fsl.c6
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c10
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_promise.c4
-rw-r--r--drivers/ata/sata_qstor.c4
-rw-r--r--drivers/ata/sata_rcar.c2
-rw-r--r--drivers/ata/sata_sil.c2
-rw-r--r--drivers/ata/sata_sil24.c8
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c2
-rw-r--r--drivers/ata/sata_uli.c2
-rw-r--r--drivers/ata/sata_via.c4
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/firmware_loader/main.c31
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/common.c9
-rw-r--r--drivers/base/power/main.c174
-rw-r--r--drivers/base/power/runtime.c154
-rw-r--r--drivers/base/regmap/regmap-debugfs.c10
-rw-r--r--drivers/base/regmap/regmap-kunit.c2
-rw-r--r--drivers/base/regmap/regmap.c2
-rw-r--r--drivers/block/Kconfig43
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c43
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c27
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/pktcdvd.c2916
-rw-r--r--drivers/block/sunvdc.c4
-rw-r--r--drivers/block/ublk_drv.c578
-rw-r--r--drivers/block/virtio_blk.c5
-rw-r--r--drivers/block/zram/zcomp.c15
-rw-r--r--drivers/block/zram/zcomp.h2
-rw-r--r--drivers/block/zram/zram_drv.c31
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btbcm.c8
-rw-r--r--drivers/bluetooth/btintel.c30
-rw-r--r--drivers/bluetooth/btintel_pcie.c8
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btnxpuart.c2
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btqcomsmd.c2
-rw-r--r--drivers/bluetooth/btrtl.c10
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btusb.c148
-rw-r--r--drivers/bluetooth/hci_aml.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c4
-rw-r--r--drivers/bluetooth/hci_bcm4377.c10
-rw-r--r--drivers/bluetooth/hci_intel.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_ll.c4
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bluetooth/hci_qca.c14
-rw-r--r--drivers/bluetooth/hci_serdev.c8
-rw-r--r--drivers/bluetooth/hci_vhci.c8
-rw-r--r--drivers/bluetooth/virtio_bt.c10
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c19
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/tpm/eventlog/common.c46
-rw-r--r--drivers/char/tpm/eventlog/of.c8
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-interface.c39
-rw-r--r--drivers/char/tpm/tpm2-sessions.c12
-rw-r--r--drivers/char/tpm/tpm_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_crb.c2
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.c94
-rw-r--r--drivers/char/tpm/tpm_crb_ffa.h2
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.c66
-rw-r--r--drivers/char/tpm/tpm_ftpm_tee.h4
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c3
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c6
-rw-r--r--drivers/char/tpm/tpm_infineon.c3
-rw-r--r--drivers/char/tpm/tpm_nsc.c3
-rw-r--r--drivers/char/tpm/tpm_ppi.c52
-rw-r--r--drivers/char/tpm/tpm_svsm.c28
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/tpm/tpm_tis_i2c_cr50.c6
-rw-r--r--drivers/char/tpm/tpm_vtpm_proxy.c4
-rw-r--r--drivers/char/tpm/xen-tpmfront.c3
-rw-r--r--drivers/clk/qcom/apcs-sdx55.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c14
-rw-r--r--drivers/clocksource/hyperv_timer.c1
-rw-r--r--drivers/clocksource/timer-orion.c2
-rw-r--r--drivers/comedi/comedi_fops.c30
-rw-r--r--drivers/comedi/drivers.c17
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/comedi_test.c2
-rw-r--r--drivers/comedi/drivers/das16m1.c3
-rw-r--r--drivers/comedi/drivers/das6402.c3
-rw-r--r--drivers/comedi/drivers/pcl812.c3
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/armada-8k-cpufreq.c5
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c61
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq-dt.h2
-rw-r--r--drivers/cpufreq/cpufreq.c56
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c6
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c49
-rw-r--r--drivers/cpuidle/cpuidle-psci.c23
-rw-r--r--drivers/cpuidle/dt_idle_states.c14
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c10
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c1
-rw-r--r--drivers/crypto/inside-secure/safexcel.h1
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c92
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c14
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c16
-rw-r--r--drivers/crypto/stm32/Kconfig9
-rw-r--r--drivers/crypto/stm32/Makefile1
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c480
-rw-r--r--drivers/devfreq/Kconfig11
-rw-r--r--drivers/devfreq/Makefile1
-rw-r--r--drivers/devfreq/devfreq.c23
-rw-r--r--drivers/devfreq/governor_userspace.c6
-rw-r--r--drivers/devfreq/hisi_uncore_freq.c658
-rw-r--r--drivers/devfreq/sun8i-a33-mbus.c38
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c60
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/nbpfaxi.c11
-rw-r--r--drivers/firmware/arm_ffa/driver.c2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c45
-rw-r--r--drivers/firmware/efi/libstub/Makefile8
-rw-r--r--drivers/gpio/Kconfig24
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/TODO19
-rw-r--r--drivers/gpio/gpio-74xx-mmio.c32
-rw-r--r--drivers/gpio/gpio-adp5585.c364
-rw-r--r--drivers/gpio/gpio-arizona.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c6
-rw-r--r--drivers/gpio/gpio-cadence.c59
-rw-r--r--drivers/gpio/gpio-clps711x.c28
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-em.c3
-rw-r--r--drivers/gpio/gpio-en7523.c36
-rw-r--r--drivers/gpio/gpio-grgpio.c5
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c6
-rw-r--r--drivers/gpio/gpio-lpc18xx.c4
-rw-r--r--drivers/gpio/gpio-macsmc.c292
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c12
-rw-r--r--drivers/gpio/gpio-mmio.c131
-rw-r--r--drivers/gpio/gpio-moxtet.c16
-rw-r--r--drivers/gpio/gpio-mpc5200.c12
-rw-r--r--drivers/gpio/gpio-mpfs.c11
-rw-r--r--drivers/gpio/gpio-mpsse.c22
-rw-r--r--drivers/gpio/gpio-msc313.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c4
-rw-r--r--drivers/gpio/gpio-mxc.c89
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpio-nomadik.c8
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c10
-rw-r--r--drivers/gpio/gpio-octeon.c7
-rw-r--r--drivers/gpio/gpio-omap.c14
-rw-r--r--drivers/gpio/gpio-palmas.c26
-rw-r--r--drivers/gpio/gpio-pca953x.c169
-rw-r--r--drivers/gpio/gpio-pca9570.c5
-rw-r--r--drivers/gpio/gpio-pcf857x.c17
-rw-r--r--drivers/gpio/gpio-pch.c6
-rw-r--r--drivers/gpio/gpio-pisosr.c8
-rw-r--r--drivers/gpio/gpio-pl061.c6
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c7
-rw-r--r--drivers/gpio/gpio-pxa.c11
-rw-r--r--drivers/gpio/gpio-raspberrypi-exp.c10
-rw-r--r--drivers/gpio/gpio-rc5t583.c19
-rw-r--r--drivers/gpio/gpio-rcar.c35
-rw-r--r--drivers/gpio/gpio-rdc321x.c8
-rw-r--r--drivers/gpio/gpio-reg.c16
-rw-r--r--drivers/gpio/gpio-rockchip.c12
-rw-r--r--drivers/gpio/gpio-rtd.c6
-rw-r--r--drivers/gpio/gpio-sa1100.c7
-rw-r--r--drivers/gpio/gpio-sama5d2-piobu.c8
-rw-r--r--drivers/gpio/gpio-sch.c9
-rw-r--r--drivers/gpio/gpio-sch311x.c8
-rw-r--r--drivers/gpio/gpio-sim.c83
-rw-r--r--drivers/gpio/gpio-siox.c11
-rw-r--r--drivers/gpio/gpio-sloppy-logic-analyzer.c2
-rw-r--r--drivers/gpio/gpio-sodaville.c4
-rw-r--r--drivers/gpio/gpio-spear-spics.c21
-rw-r--r--drivers/gpio/gpio-sprd.c8
-rw-r--r--drivers/gpio/gpio-stmpe.c15
-rw-r--r--drivers/gpio/gpio-stp-xway.c10
-rw-r--r--drivers/gpio/gpio-syscon.c33
-rw-r--r--drivers/gpio/gpio-tangier.c6
-rw-r--r--drivers/gpio/gpio-tb10x.c5
-rw-r--r--drivers/gpio/gpio-tc3589x.c11
-rw-r--r--drivers/gpio/gpio-tegra.c8
-rw-r--r--drivers/gpio/gpio-tegra186.c49
-rw-r--r--drivers/gpio/gpio-thunderx.c18
-rw-r--r--drivers/gpio/gpio-timberdale.c7
-rw-r--r--drivers/gpio/gpio-tpic2810.c27
-rw-r--r--drivers/gpio/gpio-tps65086.c16
-rw-r--r--drivers/gpio/gpio-tps65218.c31
-rw-r--r--drivers/gpio/gpio-tps65219.c124
-rw-r--r--drivers/gpio/gpio-tps6586x.c15
-rw-r--r--drivers/gpio/gpio-tps65910.c21
-rw-r--r--drivers/gpio/gpio-tps65912.c17
-rw-r--r--drivers/gpio/gpio-tps68470.c14
-rw-r--r--drivers/gpio/gpio-tqmx86.c8
-rw-r--r--drivers/gpio/gpio-ts4900.c14
-rw-r--r--drivers/gpio/gpio-ts5500.c6
-rw-r--r--drivers/gpio/gpio-twl4030.c25
-rw-r--r--drivers/gpio/gpio-twl6040.c23
-rw-r--r--drivers/gpio/gpio-uniphier.c16
-rw-r--r--drivers/gpio/gpio-viperboard.c130
-rw-r--r--drivers/gpio/gpio-virtio.c16
-rw-r--r--drivers/gpio/gpio-virtuser.c4
-rw-r--r--drivers/gpio/gpio-vx855.c9
-rw-r--r--drivers/gpio/gpio-wcd934x.c16
-rw-r--r--drivers/gpio/gpio-wcove.c11
-rw-r--r--drivers/gpio/gpio-winbond.c16
-rw-r--r--drivers/gpio/gpio-wm831x.c13
-rw-r--r--drivers/gpio/gpio-wm8350.c15
-rw-r--r--drivers/gpio/gpio-wm8994.c8
-rw-r--r--drivers/gpio/gpio-xgene.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c14
-rw-r--r--drivers/gpio/gpio-xlp.c10
-rw-r--r--drivers/gpio/gpio-xra1403.c13
-rw-r--r--drivers/gpio/gpio-xtensa.c13
-rw-r--r--drivers/gpio/gpio-zevio.c6
-rw-r--r--drivers/gpio/gpio-zynq.c8
-rw-r--r--drivers/gpio/gpio-zynqmp-modepin.c10
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c13
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpio/gpiolib-legacy.c38
-rw-r--r--drivers/gpio/gpiolib-of.h2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c676
-rw-r--r--drivers/gpio/gpiolib.c47
-rw-r--r--drivers/gpio/gpiolib.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c43
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c4
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c115
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c21
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_drv.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/chan.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c23
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c21
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c13
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c27
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c22
-rw-r--r--drivers/hid/hid-core.c22
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/channel.c1
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hv/connection.c5
-rw-r--r--drivers/hv/hv.c6
-rw-r--r--drivers/hv/hv_proc.c1
-rw-r--r--drivers/hv/mshv_common.c1
-rw-r--r--drivers/hv/mshv_eventfd.c14
-rw-r--r--drivers/hv/mshv_root_hv_call.c1
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/corsair-cpro.c5
-rw-r--r--drivers/hwmon/ina238.c8
-rw-r--r--drivers/hwmon/pmbus/ucd9000.c8
-rw-r--r--drivers/i2c/busses/i2c-omap.c7
-rw-r--r--drivers/i2c/busses/i2c-qup.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32.c8
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c24
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/i2c/busses/i2c-virtio.c15
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/iio/accel/fxls8962af-core.c2
-rw-r--r--drivers/iio/accel/st_accel_core.c10
-rw-r--r--drivers/iio/adc/ad7380.c5
-rw-r--r--drivers/iio/adc/ad7949.c7
-rw-r--r--drivers/iio/adc/adi-axi-adc.c6
-rw-r--r--drivers/iio/adc/axp20x_adc.c1
-rw-r--r--drivers/iio/adc/max1363.c43
-rw-r--r--drivers/iio/adc/stm32-adc-core.c7
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c36
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c20
-rw-r--r--drivers/iio/dac/ad3530r.c4
-rw-r--r--drivers/iio/industrialio-backend.c5
-rw-r--r--drivers/iio/industrialio-core.c5
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/Kconfig21
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/adp5585-keys.c371
-rw-r--r--drivers/input/keyboard/adp5589-keys.c1066
-rw-r--r--drivers/interconnect/core.c34
-rw-r--r--drivers/interconnect/icc-clk.c2
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c7
-rw-r--r--drivers/interconnect/qcom/osm-l3.c7
-rw-r--r--drivers/interconnect/qcom/sc7280.c1
-rw-r--r--drivers/interconnect/samsung/exynos.c5
-rw-r--r--drivers/iommu/hyperv-iommu.c33
-rw-r--r--drivers/md/bcache/super.c22
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-crypt.c53
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-stripe.c1
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm.c54
-rw-r--r--drivers/md/md.c73
-rw-r--r--drivers/md/md.h26
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/mfd/Kconfig18
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/adp5585.c739
-rw-r--r--drivers/mfd/macsmc.c498
-rw-r--r--drivers/mfd/vexpress-sysreg.c46
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c24
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/kstack_erase.c (renamed from drivers/misc/lkdtm/stackleak.c)26
-rw-r--r--drivers/misc/ti_fpc202.c13
-rw-r--r--drivers/mmc/core/sdio_bus.c2
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c3
-rw-r--r--drivers/mmc/host/sdhci_am654.c9
-rw-r--r--drivers/mux/Kconfig1
-rw-r--r--drivers/mux/core.c7
-rw-r--r--drivers/net/can/dev/dev.c12
-rw-r--r--drivers/net/can/dev/netlink.c12
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c61
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c15
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c108
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c8
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c158
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h80
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c20
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switch_map.h3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c9
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c5
-rw-r--r--drivers/net/ovpn/io.c7
-rw-r--r--drivers/net/ovpn/netlink-gen.c61
-rw-r--r--drivers/net/ovpn/netlink-gen.h6
-rw-r--r--drivers/net/ovpn/netlink.c51
-rw-r--r--drivers/net/ovpn/udp.c1
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/usb/sierra_net.c4
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c8
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvme/host/apple.c4
-rw-r--r--drivers/nvme/host/constants.c4
-rw-r--r--drivers/nvme/host/core.c56
-rw-r--r--drivers/nvme/host/fc.c10
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c640
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c11
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/passthru.c4
-rw-r--r--drivers/nvme/target/pci-epf.c25
-rw-r--r--drivers/nvme/target/tcp.c4
-rw-r--r--drivers/nvme/target/zns.c2
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c5
-rw-r--r--drivers/nvmem/imx-ocotp.c5
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c8
-rw-r--r--drivers/pci/controller/pci-hyperv-intf.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c21
-rw-r--r--drivers/pci/pci-driver.c4
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/phy/phy-core.c5
-rw-r--r--drivers/phy/phy-snps-eusb2.c6
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c75
-rw-r--r--drivers/phy/tegra/xusb.h1
-rw-r--r--drivers/platform/arm64/huawei-gaokun-ec.c2
-rw-r--r--drivers/platform/cznic/turris-omnia-mcu-gpio.c35
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c25
-rw-r--r--drivers/platform/x86/Makefile3
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c17
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c1
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c4
-rw-r--r--drivers/platform/x86/lenovo-wmi-hotkey-utilities.c30
-rw-r--r--drivers/pmdomain/governor.c18
-rw-r--r--drivers/power/reset/Kconfig9
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/macsmc-reboot.c290
-rw-r--r--drivers/power/sequencing/Kconfig10
-rw-r--r--drivers/power/sequencing/Makefile1
-rw-r--r--drivers/power/sequencing/core.c6
-rw-r--r--drivers/power/sequencing/pwrseq-qcom-wcn.c10
-rw-r--r--drivers/power/sequencing/pwrseq-thead-gpu.c249
-rw-r--r--drivers/power/supply/power_supply_core.c82
-rw-r--r--drivers/power/supply/test_power.c4
-rw-r--r--drivers/powercap/dtpm_cpu.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c1
-rw-r--r--drivers/powercap/intel_rapl_msr.c1
-rw-r--r--drivers/pwm/pwm-adp5585.c78
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/s390/net/ism_drv.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c10
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/sd_dif.c3
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c5
-rw-r--r--drivers/scsi/virtio_scsi.c1
-rw-r--r--drivers/soc/apple/rtkit.c3
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c13
-rw-r--r--drivers/soc/ti/pm33xx.c2
-rw-r--r--drivers/soundwire/amd_manager.c4
-rw-r--r--drivers/soundwire/bus_type.c2
-rw-r--r--drivers/soundwire/qcom.c26
-rw-r--r--drivers/spi/spi-qpic-snand.c2
-rw-r--r--drivers/spi/spi.c16
-rw-r--r--drivers/staging/gpib/common/gpib_os.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c98
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c1
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h2
-rw-r--r--drivers/thermal/armada_thermal.c2
-rw-r--r--drivers/thermal/da9062-thermal.c2
-rw-r--r--drivers/thermal/dove_thermal.c2
-rw-r--r--drivers/thermal/imx_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/platform_temperature_control.c72
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c4
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c1
-rw-r--r--drivers/thermal/kirkwood_thermal.c2
-rw-r--r--drivers/thermal/loongson2_thermal.c15
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c76
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c596
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c2
-rw-r--r--drivers/thermal/rockchip_thermal.c251
-rw-r--r--drivers/thermal/spear_thermal.c2
-rw-r--r--drivers/thermal/st/st_thermal.c2
-rw-r--r--drivers/thermal/tegra/soctherm.c13
-rw-r--r--drivers/thermal/testing/zone.c2
-rw-r--r--drivers/thermal/thermal_sysfs.c9
-rw-r--r--drivers/thunderbolt/switch.c10
-rw-r--r--drivers/thunderbolt/tb.h2
-rw-r--r--drivers/thunderbolt/usb4.c12
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/serial_base_bus.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/gadget.c38
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c7
-rw-r--r--drivers/usb/gadget/configfs.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c3
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c5
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h3
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/virt/coco/efi_secret/efi_secret.c47
-rw-r--r--drivers/virtio/virtio_vdpa.c9
601 files changed, 10934 insertions, 8980 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7bc40c2735ac..b594780a57d7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -394,6 +394,7 @@ config ACPI_TABLE_OVERRIDE_VIA_BUILTIN_INITRD
config ACPI_DEBUG
bool "Debug Statements"
+ default y
help
The ACPI subsystem can produce debug output. Saying Y enables this
output and increases the kernel size by around 50K.
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 7cf6101cb4c7..2a99f5eb6962 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -275,7 +275,7 @@ static inline int acpi_processor_hotadd_init(struct acpi_processor *pr,
static int acpi_processor_get_info(struct acpi_device *device)
{
- union acpi_object object = { 0 };
+ union acpi_object object = { .processor = { 0 } };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
int device_declaration = 0;
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 825c2a8acea4..91d7d90c47da 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -233,7 +233,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return sprintf(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
+ return sysfs_emit(buf, "%u:%u:%u:%u:%u:%u:%d:%u\n",
rt.year, rt.month, rt.day, rt.hour, rt.minute, rt.second,
rt.tz, rt.daylight);
}
@@ -428,7 +428,7 @@ static ssize_t caps_show(struct device *dev, struct device_attribute *attr,
{
struct acpi_tad_driver_data *dd = dev_get_drvdata(dev);
- return sprintf(buf, "0x%02X\n", dd->capabilities);
+ return sysfs_emit(buf, "0x%02X\n", dd->capabilities);
}
static DEVICE_ATTR_RO(caps);
diff --git a/drivers/acpi/acpica/extrace.c b/drivers/acpi/acpica/extrace.c
index d34497f3576a..36934d4f26fb 100644
--- a/drivers/acpi/acpica/extrace.c
+++ b/drivers/acpi/acpica/extrace.c
@@ -136,9 +136,9 @@ acpi_ex_trace_point(acpi_trace_event_type type,
if (pathname) {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
- "%s %s [0x%p:%s] execution.\n",
+ "%s %s [%s] execution.\n",
acpi_ex_get_trace_event_name(type),
- begin ? "Begin" : "End", aml, pathname));
+ begin ? "Begin" : "End", pathname));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT,
"%s %s [0x%p] execution.\n",
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cd2766c69d78..77c10a7a7a9f 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -131,7 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
int apei_osc_setup(void);
-int einj_get_available_error_type(u32 *type);
+int einj_get_available_error_type(u32 *type, int einj_action);
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4);
int einj_cxl_rch_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 9b041415a9d0..bf8dc92a373a 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -33,6 +33,8 @@
#define SLEEP_UNIT_MAX 5000 /* 5ms */
/* Firmware should respond within 1 seconds */
#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
+#define COMPONENT_LEN 16
+#define ACPI65_EINJV2_SUPP BIT(30)
#define ACPI5_VENDOR_BIT BIT(31)
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
@@ -49,6 +51,28 @@
*/
static int acpi5;
+struct syndrome_array {
+ union {
+ u8 acpi_id[COMPONENT_LEN];
+ u8 device_id[COMPONENT_LEN];
+ u8 pcie_sbdf[COMPONENT_LEN];
+ u8 vendor_id[COMPONENT_LEN];
+ } comp_id;
+ union {
+ u8 proc_synd[COMPONENT_LEN];
+ u8 mem_synd[COMPONENT_LEN];
+ u8 pcie_synd[COMPONENT_LEN];
+ u8 vendor_synd[COMPONENT_LEN];
+ } comp_synd;
+};
+
+struct einjv2_extension_struct {
+ u32 length;
+ u16 revision;
+ u16 component_arr_count;
+ struct syndrome_array component_arr[] __counted_by(component_arr_count);
+};
+
struct set_error_type_with_address {
u32 type;
u32 vendor_extension;
@@ -57,11 +81,13 @@ struct set_error_type_with_address {
u64 memory_address;
u64 memory_address_range;
u32 pcie_sbdf;
+ struct einjv2_extension_struct einjv2_struct;
};
enum {
SETWA_FLAGS_APICID = 1,
SETWA_FLAGS_MEM = 2,
SETWA_FLAGS_PCIE_SBDF = 4,
+ SETWA_FLAGS_EINJV2 = 8,
};
/*
@@ -83,7 +109,10 @@ static struct debugfs_blob_wrapper vendor_blob;
static struct debugfs_blob_wrapper vendor_errors;
static char vendor_dev[64];
+static u32 max_nr_components;
static u32 available_error_type;
+static u32 available_error_type_v2;
+static struct syndrome_array *syndrome_data;
/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
@@ -151,7 +180,9 @@ static DEFINE_MUTEX(einj_mutex);
*/
bool einj_initialized __ro_after_init;
-static void *einj_param;
+static void __iomem *einj_param;
+static u32 v5param_size;
+static bool is_v2;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
{
@@ -159,13 +190,13 @@ static void einj_exec_ctx_init(struct apei_exec_context *ctx)
EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
}
-static int __einj_get_available_error_type(u32 *type)
+static int __einj_get_available_error_type(u32 *type, int einj_action)
{
struct apei_exec_context ctx;
int rc;
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ rc = apei_exec_run(&ctx, einj_action);
if (rc)
return rc;
*type = apei_exec_ctx_get_output(&ctx);
@@ -174,17 +205,34 @@ static int __einj_get_available_error_type(u32 *type)
}
/* Get error injection capabilities of the platform */
-int einj_get_available_error_type(u32 *type)
+int einj_get_available_error_type(u32 *type, int einj_action)
{
int rc;
mutex_lock(&einj_mutex);
- rc = __einj_get_available_error_type(type);
+ rc = __einj_get_available_error_type(type, einj_action);
mutex_unlock(&einj_mutex);
return rc;
}
+static int einj_get_available_error_types(u32 *type1, u32 *type2)
+{
+ int rc;
+
+ rc = einj_get_available_error_type(type1, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (*type1 & ACPI65_EINJV2_SUPP) {
+ rc = einj_get_available_error_type(type2,
+ ACPI_EINJV2_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int einj_timedout(u64 *t)
{
if ((s64)*t < SLEEP_UNIT_MIN) {
@@ -216,24 +264,26 @@ static void check_vendor_extension(u64 paddr,
struct set_error_type_with_address *v5param)
{
int offset = v5param->vendor_extension;
- struct vendor_error_type_extension *v;
+ struct vendor_error_type_extension v;
+ struct vendor_error_type_extension __iomem *p;
u32 sbdf;
if (!offset)
return;
- v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
- if (!v)
+ p = acpi_os_map_iomem(paddr + offset, sizeof(*p));
+ if (!p)
return;
- get_oem_vendor_struct(paddr, offset, v);
- sbdf = v->pcie_sbdf;
+ memcpy_fromio(&v, p, sizeof(v));
+ get_oem_vendor_struct(paddr, offset, &v);
+ sbdf = v.pcie_sbdf;
sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
sbdf >> 24, (sbdf >> 16) & 0xff,
(sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
- v->vendor_id, v->device_id, v->rev_id);
- acpi_os_unmap_iomem(v, sizeof(*v));
+ v.vendor_id, v.device_id, v.rev_id);
+ acpi_os_unmap_iomem(p, sizeof(v));
}
-static void *einj_get_parameter_address(void)
+static void __iomem *einj_get_parameter_address(void)
{
int i;
u64 pa_v4 = 0, pa_v5 = 0;
@@ -254,26 +304,50 @@ static void *einj_get_parameter_address(void)
entry++;
}
if (pa_v5) {
- struct set_error_type_with_address *v5param;
+ struct set_error_type_with_address v5param;
+ struct set_error_type_with_address __iomem *p;
+
+ v5param_size = sizeof(v5param);
+ p = acpi_os_map_iomem(pa_v5, sizeof(*p));
+ if (p) {
+ int offset, len;
- v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param));
- if (v5param) {
+ memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
- check_vendor_extension(pa_v5, v5param);
- return v5param;
+ check_vendor_extension(pa_v5, &v5param);
+ if (available_error_type & ACPI65_EINJV2_SUPP) {
+ len = v5param.einjv2_struct.length;
+ offset = offsetof(struct einjv2_extension_struct, component_arr);
+ max_nr_components = (len - offset) /
+ sizeof(v5param.einjv2_struct.component_arr[0]);
+ /*
+ * The first call to acpi_os_map_iomem above does not include the
+ * component array, instead it is used to read and calculate maximum
+ * number of components supported by the system. Below, the mapping
+ * is expanded to include the component array.
+ */
+ acpi_os_unmap_iomem(p, v5param_size);
+ offset = offsetof(struct set_error_type_with_address, einjv2_struct);
+ v5param_size = offset + struct_size(&v5param.einjv2_struct,
+ component_arr, max_nr_components);
+ p = acpi_os_map_iomem(pa_v5, v5param_size);
+ }
+ return p;
}
}
if (param_extension && pa_v4) {
- struct einj_parameter *v4param;
+ struct einj_parameter v4param;
+ struct einj_parameter __iomem *p;
- v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param));
- if (!v4param)
+ p = acpi_os_map_iomem(pa_v4, sizeof(*p));
+ if (!p)
return NULL;
- if (v4param->reserved1 || v4param->reserved2) {
- acpi_os_unmap_iomem(v4param, sizeof(*v4param));
+ memcpy_fromio(&v4param, p, sizeof(v4param));
+ if (v4param.reserved1 || v4param.reserved2) {
+ acpi_os_unmap_iomem(p, sizeof(v4param));
return NULL;
}
- return v4param;
+ return p;
}
return NULL;
@@ -319,7 +393,8 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u64 param1, u64 param2)
{
- struct acpi_einj_trigger *trigger_tab = NULL;
+ struct acpi_einj_trigger trigger_tab;
+ struct acpi_einj_trigger *full_trigger_tab;
struct apei_exec_context trigger_ctx;
struct apei_resources trigger_resources;
struct acpi_whea_header *trigger_entry;
@@ -327,54 +402,60 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
u32 table_size;
int rc = -EIO;
struct acpi_generic_address *trigger_param_region = NULL;
+ struct acpi_einj_trigger __iomem *p = NULL;
- r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ r = request_mem_region(trigger_paddr, sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
(unsigned long long)trigger_paddr +
- sizeof(*trigger_tab) - 1);
+ sizeof(trigger_tab) - 1);
goto out;
}
- trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
- if (!trigger_tab) {
+ p = ioremap_cache(trigger_paddr, sizeof(*p));
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_header;
}
- rc = einj_check_trigger_header(trigger_tab);
+ memcpy_fromio(&trigger_tab, p, sizeof(trigger_tab));
+ rc = einj_check_trigger_header(&trigger_tab);
if (rc) {
pr_warn(FW_BUG "Invalid trigger error action table.\n");
goto out_rel_header;
}
/* No action structures in the TRIGGER_ERROR table, nothing to do */
- if (!trigger_tab->entry_count)
+ if (!trigger_tab.entry_count)
goto out_rel_header;
rc = -EIO;
- table_size = trigger_tab->table_size;
- r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab),
+ table_size = trigger_tab.table_size;
+ full_trigger_tab = kmalloc(table_size, GFP_KERNEL);
+ if (!full_trigger_tab)
+ goto out_rel_header;
+ r = request_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err("Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
- (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + sizeof(trigger_tab),
(unsigned long long)trigger_paddr + table_size - 1);
- goto out_rel_header;
+ goto out_free_trigger_tab;
}
- iounmap(trigger_tab);
- trigger_tab = ioremap_cache(trigger_paddr, table_size);
- if (!trigger_tab) {
+ iounmap(p);
+ p = ioremap_cache(trigger_paddr, table_size);
+ if (!p) {
pr_err("Failed to map trigger table!\n");
goto out_rel_entry;
}
+ memcpy_fromio(full_trigger_tab, p, table_size);
trigger_entry = (struct acpi_whea_header *)
- ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ ((char *)full_trigger_tab + sizeof(struct acpi_einj_trigger));
apei_resources_init(&trigger_resources);
apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
ARRAY_SIZE(einj_ins_type),
- trigger_entry, trigger_tab->entry_count);
+ trigger_entry, trigger_tab.entry_count);
rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
if (rc)
goto out_fini;
@@ -392,7 +473,7 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type,
apei_resources_init(&addr_resources);
trigger_param_region = einj_get_trigger_parameter_region(
- trigger_tab, param1, param2);
+ full_trigger_tab, param1, param2);
if (trigger_param_region) {
rc = apei_resources_add(&addr_resources,
trigger_param_region->address,
@@ -421,23 +502,33 @@ out_release:
out_fini:
apei_resources_fini(&trigger_resources);
out_rel_entry:
- release_mem_region(trigger_paddr + sizeof(*trigger_tab),
- table_size - sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr + sizeof(trigger_tab),
+ table_size - sizeof(trigger_tab));
+out_free_trigger_tab:
+ kfree(full_trigger_tab);
out_rel_header:
- release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+ release_mem_region(trigger_paddr, sizeof(trigger_tab));
out:
- if (trigger_tab)
- iounmap(trigger_tab);
+ if (p)
+ iounmap(p);
return rc;
}
+static bool is_end_of_list(u8 *val)
+{
+ for (int i = 0; i < COMPONENT_LEN; ++i) {
+ if (val[i] != 0xFF)
+ return false;
+ }
+ return true;
+}
static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
struct apei_exec_context ctx;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
- int rc;
+ int i, rc;
einj_exec_ctx_init(&ctx);
@@ -446,8 +537,10 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
apei_exec_ctx_set_input(&ctx, type);
if (acpi5) {
- struct set_error_type_with_address *v5param = einj_param;
+ struct set_error_type_with_address *v5param;
+ v5param = kmalloc(v5param_size, GFP_KERNEL);
+ memcpy_fromio(v5param, einj_param, v5param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
switch (vendor_flags) {
@@ -467,8 +560,21 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
v5param->flags = flags;
v5param->memory_address = param1;
v5param->memory_address_range = param2;
- v5param->apicid = param3;
- v5param->pcie_sbdf = param4;
+
+ if (is_v2) {
+ for (i = 0; i < max_nr_components; i++) {
+ if (is_end_of_list(syndrome_data[i].comp_id.acpi_id))
+ break;
+ v5param->einjv2_struct.component_arr[i].comp_id =
+ syndrome_data[i].comp_id;
+ v5param->einjv2_struct.component_arr[i].comp_synd =
+ syndrome_data[i].comp_synd;
+ }
+ v5param->einjv2_struct.component_arr_count = i;
+ } else {
+ v5param->apicid = param3;
+ v5param->pcie_sbdf = param4;
+ }
} else {
switch (type) {
case ACPI_EINJ_PROCESSOR_CORRECTABLE:
@@ -492,15 +598,19 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
break;
}
}
+ memcpy_toio(einj_param, v5param, v5param_size);
+ kfree(v5param);
} else {
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
if (rc)
return rc;
if (einj_param) {
- struct einj_parameter *v4param = einj_param;
+ struct einj_parameter v4param;
- v4param->param1 = param1;
- v4param->param2 = param2;
+ memcpy_fromio(&v4param, einj_param, sizeof(v4param));
+ v4param.param1 = param1;
+ v4param.param2 = param2;
+ memcpy_toio(einj_param, &v4param, sizeof(v4param));
}
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
@@ -551,10 +661,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 base_addr, size;
/* If user manually set "flags", make sure it is legal */
- if (flags && (flags &
- ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
+ if (flags && (flags & ~(SETWA_FLAGS_APICID | SETWA_FLAGS_MEM |
+ SETWA_FLAGS_PCIE_SBDF | SETWA_FLAGS_EINJV2)))
return -EINVAL;
+ /* check if type is a valid EINJv2 error type */
+ if (is_v2) {
+ if (!(type & available_error_type_v2))
+ return -EINVAL;
+ }
/*
* We need extra sanity checks for memory errors.
* Other types leap directly to injection.
@@ -632,6 +747,8 @@ static u64 error_param2;
static u64 error_param3;
static u64 error_param4;
static struct dentry *einj_debug_dir;
+static char einj_buf[32];
+static bool einj_v2_enabled;
static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(0), "Processor Correctable" },
{ BIT(1), "Processor Uncorrectable non-fatal" },
@@ -648,6 +765,12 @@ static struct { u32 mask; const char *str; } const einj_error_type_string[] = {
{ BIT(31), "Vendor Defined Error Types" },
};
+static struct { u32 mask; const char *str; } const einjv2_error_type_string[] = {
+ { BIT(0), "EINJV2 Processor Error" },
+ { BIT(1), "EINJV2 Memory Error" },
+ { BIT(2), "EINJV2 PCI Express Error" },
+};
+
static int available_error_type_show(struct seq_file *m, void *v)
{
@@ -655,17 +778,22 @@ static int available_error_type_show(struct seq_file *m, void *v)
if (available_error_type & einj_error_type_string[pos].mask)
seq_printf(m, "0x%08x\t%s\n", einj_error_type_string[pos].mask,
einj_error_type_string[pos].str);
-
+ if ((available_error_type & ACPI65_EINJV2_SUPP) && einj_v2_enabled) {
+ for (int pos = 0; pos < ARRAY_SIZE(einjv2_error_type_string); pos++) {
+ if (available_error_type_v2 & einjv2_error_type_string[pos].mask)
+ seq_printf(m, "V2_0x%08x\t%s\n", einjv2_error_type_string[pos].mask,
+ einjv2_error_type_string[pos].str);
+ }
+ }
return 0;
}
DEFINE_SHOW_ATTRIBUTE(available_error_type);
-static int error_type_get(void *data, u64 *val)
+static ssize_t error_type_get(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- *val = error_type;
-
- return 0;
+ return simple_read_from_buffer(buf, count, ppos, einj_buf, strlen(einj_buf));
}
bool einj_is_cxl_error_type(u64 type)
@@ -692,15 +820,35 @@ int einj_validate_error_type(u64 type)
if (tval & (tval - 1))
return -EINVAL;
if (!vendor)
- if (!(type & available_error_type))
+ if (!(type & (available_error_type | available_error_type_v2)))
return -EINVAL;
return 0;
}
-static int error_type_set(void *data, u64 val)
+static ssize_t error_type_set(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int rc;
+ u64 val;
+
+ /* Leave the last character for the NUL terminator */
+ if (count > sizeof(einj_buf) - 1)
+ return -EINVAL;
+
+ memset(einj_buf, 0, sizeof(einj_buf));
+ if (copy_from_user(einj_buf, buf, count))
+ return -EFAULT;
+
+ if (strncmp(einj_buf, "V2_", 3) == 0) {
+ if (!sscanf(einj_buf, "V2_%llx", &val))
+ return -EINVAL;
+ is_v2 = true;
+ } else {
+ if (!sscanf(einj_buf, "%llx", &val))
+ return -EINVAL;
+ is_v2 = false;
+ }
rc = einj_validate_error_type(val);
if (rc)
@@ -708,17 +856,24 @@ static int error_type_set(void *data, u64 val)
error_type = val;
- return 0;
+ return count;
}
-DEFINE_DEBUGFS_ATTRIBUTE(error_type_fops, error_type_get, error_type_set,
- "0x%llx\n");
+static const struct file_operations error_type_fops = {
+ .read = error_type_get,
+ .write = error_type_set,
+};
static int error_inject_set(void *data, u64 val)
{
if (!error_type)
return -EINVAL;
+ if (is_v2)
+ error_flags |= SETWA_FLAGS_EINJV2;
+ else
+ error_flags &= ~SETWA_FLAGS_EINJV2;
+
return einj_error_inject(error_type, error_flags, error_param1, error_param2,
error_param3, error_param4);
}
@@ -741,6 +896,98 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
return 0;
}
+static ssize_t u128_read(struct file *f, char __user *buf, size_t count, loff_t *off)
+{
+ char output[2 * COMPONENT_LEN + 1];
+ u8 *data = f->f_inode->i_private;
+ int i;
+
+ if (*off >= sizeof(output))
+ return 0;
+
+ for (i = 0; i < COMPONENT_LEN; i++)
+ sprintf(output + 2 * i, "%.02x", data[COMPONENT_LEN - i - 1]);
+ output[2 * COMPONENT_LEN] = '\n';
+
+ return simple_read_from_buffer(buf, count, off, output, sizeof(output));
+}
+
+static ssize_t u128_write(struct file *f, const char __user *buf, size_t count, loff_t *off)
+{
+ char input[2 + 2 * COMPONENT_LEN + 2];
+ u8 *save = f->f_inode->i_private;
+ u8 tmp[COMPONENT_LEN];
+ char byte[3] = {};
+ char *s, *e;
+ ssize_t c;
+ long val;
+ int i;
+
+ /* Require that user supply whole input line in one write(2) syscall */
+ if (*off)
+ return -EINVAL;
+
+ c = simple_write_to_buffer(input, sizeof(input), off, buf, count);
+ if (c < 0)
+ return c;
+
+ if (c < 1 || input[c - 1] != '\n')
+ return -EINVAL;
+
+ /* Empty line means invalidate this entry */
+ if (c == 1) {
+ memset(save, 0xff, COMPONENT_LEN);
+ return c;
+ }
+
+ if (input[0] == '0' && (input[1] == 'x' || input[1] == 'X'))
+ s = input + 2;
+ else
+ s = input;
+ e = input + c - 1;
+
+ for (i = 0; i < COMPONENT_LEN; i++) {
+ byte[1] = *--e;
+ byte[0] = e > s ? *--e : '0';
+ if (kstrtol(byte, 16, &val))
+ return -EINVAL;
+ tmp[i] = val;
+ if (e <= s)
+ break;
+ }
+ while (++i < COMPONENT_LEN)
+ tmp[i] = 0;
+
+ memcpy(save, tmp, COMPONENT_LEN);
+
+ return c;
+}
+
+static const struct file_operations u128_fops = {
+ .read = u128_read,
+ .write = u128_write,
+};
+
+static bool setup_einjv2_component_files(void)
+{
+ char name[32];
+
+ syndrome_data = kcalloc(max_nr_components, sizeof(syndrome_data[0]), GFP_KERNEL);
+ if (!syndrome_data)
+ return false;
+
+ for (int i = 0; i < max_nr_components; i++) {
+ sprintf(name, "component_id%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_id, &u128_fops);
+ sprintf(name, "component_syndrome%d", i);
+ debugfs_create_file(name, 0600, einj_debug_dir,
+ &syndrome_data[i].comp_synd, &u128_fops);
+ }
+
+ return true;
+}
+
static int __init einj_probe(struct faux_device *fdev)
{
int rc;
@@ -764,7 +1011,7 @@ static int __init einj_probe(struct faux_device *fdev)
goto err_put_table;
}
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_types(&available_error_type, &available_error_type_v2);
if (rc)
goto err_put_table;
@@ -812,6 +1059,8 @@ static int __init einj_probe(struct faux_device *fdev)
&error_param4);
debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
einj_debug_dir, &notrigger);
+ if (available_error_type & ACPI65_EINJV2_SUPP)
+ einj_v2_enabled = setup_einjv2_component_files();
}
if (vendor_dev[0]) {
@@ -848,7 +1097,7 @@ static void __exit einj_remove(struct faux_device *fdev)
if (einj_param) {
acpi_size size = (acpi5) ?
- sizeof(struct set_error_type_with_address) :
+ v5param_size :
sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
@@ -860,6 +1109,7 @@ static void __exit einj_remove(struct faux_device *fdev)
apei_resources_release(&einj_resources);
apei_resources_fini(&einj_resources);
debugfs_remove_recursive(einj_debug_dir);
+ kfree(syndrome_data);
acpi_put_table((struct acpi_table_header *)einj_tab);
}
diff --git a/drivers/acpi/apei/einj-cxl.c b/drivers/acpi/apei/einj-cxl.c
index 78da9ae543a2..e70a416ec925 100644
--- a/drivers/acpi/apei/einj-cxl.c
+++ b/drivers/acpi/apei/einj-cxl.c
@@ -30,7 +30,7 @@ int einj_cxl_available_error_type_show(struct seq_file *m, void *v)
int cxl_err, rc;
u32 available_error_type = 0;
- rc = einj_get_available_error_type(&available_error_type);
+ rc = einj_get_available_error_type(&available_error_type, ACPI_EINJ_GET_ERROR_TYPE);
if (rc)
return rc;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f0584ccad451..a0d54993edb3 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -464,28 +464,41 @@ static void ghes_clear_estatus(struct ghes *ghes,
ghes_ack_error(ghes->generic_v2);
}
-/*
- * Called as task_work before returning to user-space.
- * Ensure any queued work has been done before we return to the context that
- * triggered the notification.
+/**
+ * struct ghes_task_work - for synchronous RAS event
+ *
+ * @twork: callback_head for task work
+ * @pfn: page frame number of corrupted page
+ * @flags: work control flags
+ *
+ * Structure to pass task work to be handled before
+ * returning to user-space via task_work_add().
*/
-static void ghes_kick_task_work(struct callback_head *head)
+struct ghes_task_work {
+ struct callback_head twork;
+ u64 pfn;
+ int flags;
+};
+
+static void memory_failure_cb(struct callback_head *twork)
{
- struct acpi_hest_generic_status *estatus;
- struct ghes_estatus_node *estatus_node;
- u32 node_len;
+ struct ghes_task_work *twcb = container_of(twork, struct ghes_task_work, twork);
+ int ret;
- estatus_node = container_of(head, struct ghes_estatus_node, task_work);
- if (IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
- memory_failure_queue_kick(estatus_node->task_work_cpu);
+ ret = memory_failure(twcb->pfn, twcb->flags);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)twcb, sizeof(*twcb));
- estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
- node_len = GHES_ESTATUS_NODE_LEN(cper_estatus_len(estatus));
- gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len);
+ if (!ret || ret == -EHWPOISON || ret == -EOPNOTSUPP)
+ return;
+
+ pr_err("%#llx: Sending SIGBUS to %s:%d due to hardware memory corruption\n",
+ twcb->pfn, current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
}
static bool ghes_do_memory_failure(u64 physical_addr, int flags)
{
+ struct ghes_task_work *twcb;
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE))
@@ -499,6 +512,18 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
return false;
}
+ if (flags == MF_ACTION_REQUIRED && current->mm) {
+ twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
+ if (!twcb)
+ return false;
+
+ twcb->pfn = pfn;
+ twcb->flags = flags;
+ init_task_work(&twcb->twork, memory_failure_cb);
+ task_work_add(current, &twcb->twork, TWA_RESUME);
+ return true;
+ }
+
memory_failure_queue(pfn, flags);
return true;
}
@@ -842,7 +867,7 @@ int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
-static bool ghes_do_proc(struct ghes *ghes,
+static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
int sev, sec_sev;
@@ -902,7 +927,16 @@ static bool ghes_do_proc(struct ghes *ghes,
}
}
- return queued;
+ /*
+ * If no memory failure work is queued for abnormal synchronous
+ * errors, do a force kill.
+ */
+ if (sync && !queued) {
+ dev_err(ghes->dev,
+ HW_ERR GHES_PFX "%s:%d: synchronous unrecoverable error (SIGBUS)\n",
+ current->comm, task_pid_nr(current));
+ force_sig(SIGBUS);
+ }
}
static void __ghes_print_estatus(const char *pfx,
@@ -1088,6 +1122,8 @@ static void __ghes_panic(struct ghes *ghes,
__ghes_print_estatus(KERN_EMERG, ghes->generic, estatus);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+
ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx);
if (!panic_timeout)
@@ -1206,9 +1242,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
- bool task_work_pending;
u32 len, node_len;
- int ret;
llnode = llist_del_all(&ghes_estatus_llist);
/*
@@ -1223,25 +1257,16 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
len = cper_estatus_len(estatus);
node_len = GHES_ESTATUS_NODE_LEN(len);
- task_work_pending = ghes_do_proc(estatus_node->ghes, estatus);
+
+ ghes_do_proc(estatus_node->ghes, estatus);
+
if (!ghes_estatus_cached(estatus)) {
generic = estatus_node->generic;
if (ghes_print_estatus(NULL, generic, estatus))
ghes_estatus_cache_add(generic, estatus);
}
-
- if (task_work_pending && current->mm) {
- estatus_node->task_work.func = ghes_kick_task_work;
- estatus_node->task_work_cpu = smp_processor_id();
- ret = task_work_add(current, &estatus_node->task_work,
- TWA_RESUME);
- if (ret)
- estatus_node->task_work.func = NULL;
- }
-
- if (!estatus_node->task_work.func)
- gen_pool_free(ghes_estatus_pool,
- (unsigned long)estatus_node, node_len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
llnode = next;
}
@@ -1302,7 +1327,6 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes,
estatus_node->ghes = ghes;
estatus_node->generic = ghes->generic;
- estatus_node->task_work.func = NULL;
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
if (__ghes_read_estatus(estatus, buf_paddr, fixmap_idx, len)) {
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index c2ab2783303f..a984ccd4a2a0 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1406,7 +1406,7 @@ static int __init acpi_bus_init(void)
goto error1;
/*
- * Register the for all standard device notifications.
+ * Register for all standard device notifications.
*/
status =
acpi_install_notify_handler(ACPI_ROOT_OBJECT, ACPI_SYSTEM_NOTIFY,
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index dbd4446025ec..4e0583274b8f 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1119,6 +1119,8 @@ int acpi_subsys_prepare(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ dev_pm_set_strict_midlayer(dev, true);
+
if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) {
int ret = dev->driver->pm->prepare(dev);
@@ -1147,6 +1149,8 @@ void acpi_subsys_complete(struct device *dev)
*/
if (pm_runtime_suspended(dev) && pm_resume_via_firmware())
pm_request_resume(dev);
+
+ dev_pm_set_strict_midlayer(dev, false);
}
EXPORT_SYMBOL_GPL(acpi_subsys_complete);
@@ -1362,6 +1366,8 @@ static int acpi_subsys_poweroff_noirq(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
+static void acpi_dev_pm_detach(struct device *dev, bool power_off);
+
static struct dev_pm_domain acpi_general_pm_domain = {
.ops = {
.runtime_suspend = acpi_subsys_runtime_suspend,
@@ -1382,6 +1388,7 @@ static struct dev_pm_domain acpi_general_pm_domain = {
.restore_early = acpi_subsys_restore_early,
#endif
},
+ .detach = acpi_dev_pm_detach,
};
/**
@@ -1465,7 +1472,6 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
acpi_device_wakeup_disable(adev);
}
- dev->pm_domain->detach = acpi_dev_pm_detach;
return 1;
}
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index e8caf4106ff9..776914f31b9e 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -238,6 +238,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC10A5", 0},
{"INTC10D8", 0},
{"INTC10D9", 0},
+ {"INTC1100", 0},
+ {"INTC1101", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index aef7aca2161d..a222df059a16 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -61,6 +61,13 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC10D7"},
{"INTC10D8"},
{"INTC10D9"},
+ {"INTC10FC"},
+ {"INTC10FD"},
+ {"INTC10FE"},
+ {"INTC10FF"},
+ {"INTC1100"},
+ {"INTC1101"},
+ {"INTC1102"},
{""},
};
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 15eba1c70e66..8a28a72a7c6a 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -20,6 +20,7 @@
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"INTC10D6", }, /* Fan for Panther Lake generation */ \
+ {"INTC10FE", }, /* Fan for Wildcat Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index 22d29ac2447c..c1afb7b5ed3d 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -22,9 +22,9 @@ static ssize_t show_state(struct device *dev, struct device_attribute *attr, cha
int count;
if (fps->control == 0xFFFFFFFF || fps->control > 100)
- count = scnprintf(buf, PAGE_SIZE, "not-defined:");
+ count = sysfs_emit(buf, "not-defined:");
else
- count = scnprintf(buf, PAGE_SIZE, "%lld:", fps->control);
+ count = sysfs_emit(buf, "%lld:", fps->control);
if (fps->trip_point == 0xFFFFFFFF || fps->trip_point > 9)
count += sysfs_emit_at(buf, count, "not-defined:");
@@ -59,7 +59,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
if (status)
return status;
- return sprintf(buf, "%lld\n", fst.speed);
+ return sysfs_emit(buf, "%lld\n", fst.speed);
}
static ssize_t show_fine_grain_control(struct device *dev, struct device_attribute *attr, char *buf)
@@ -67,7 +67,7 @@ static ssize_t show_fine_grain_control(struct device *dev, struct device_attribu
struct acpi_device *acpi_dev = container_of(dev, struct acpi_device, dev);
struct acpi_fan *fan = acpi_driver_data(acpi_dev);
- return sprintf(buf, "%d\n", fan->fif.fine_grain_ctrl);
+ return sysfs_emit(buf, "%d\n", fan->fif.fine_grain_ctrl);
}
int acpi_fan_create_attributes(struct acpi_device *device)
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 8ad12ad3aaaf..095502086b41 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -102,7 +102,7 @@ match_fps:
break;
}
if (i == fan->fps_count) {
- dev_dbg(&device->dev, "Invalid control value returned\n");
+ dev_dbg(&device->dev, "No matching fps control value\n");
return -EINVAL;
}
diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c
index 3902759abcba..bce6f6a18426 100644
--- a/drivers/acpi/nfit/intel.c
+++ b/drivers/acpi/nfit/intel.c
@@ -55,10 +55,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned long security_flags = 0;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_get_security_state cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_GET_SECURITY_STATE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -120,10 +119,9 @@ static unsigned long intel_security_flags(struct nvdimm *nvdimm,
static int intel_security_freeze(struct nvdimm *nvdimm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_freeze_lock cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FREEZE_LOCK,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -153,10 +151,9 @@ static int intel_security_change_key(struct nvdimm *nvdimm,
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_SET_MASTER_PASSPHRASE :
NVDIMM_INTEL_SET_PASSPHRASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_set_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE * 2,
@@ -195,10 +192,9 @@ static int __maybe_unused intel_security_unlock(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_unlock_unit cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_UNLOCK_UNIT,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -234,10 +230,9 @@ static int intel_security_disable(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_disable_passphrase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_DISABLE_PASSPHRASE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -277,10 +272,9 @@ static int __maybe_unused intel_security_erase(struct nvdimm *nvdimm,
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
unsigned int cmd = ptype == NVDIMM_MASTER ?
NVDIMM_INTEL_MASTER_SECURE_ERASE : NVDIMM_INTEL_SECURE_ERASE;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_secure_erase cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_family = NVDIMM_FAMILY_INTEL,
.nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
@@ -318,10 +312,9 @@ static int __maybe_unused intel_security_query_overwrite(struct nvdimm *nvdimm)
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_query_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_QUERY_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -354,10 +347,9 @@ static int __maybe_unused intel_security_overwrite(struct nvdimm *nvdimm,
{
int rc;
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_overwrite cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_OVERWRITE,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -407,10 +399,9 @@ const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops;
static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc,
struct nd_intel_bus_fw_activate_businfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate_businfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO,
.nd_family = NVDIMM_BUS_FAMILY_INTEL,
@@ -518,33 +509,31 @@ static enum nvdimm_fwa_capability intel_bus_fwa_capability(
static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_bus_fw_activate cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
- .nd_family = NVDIMM_BUS_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
- .nd_size_out =
- sizeof(struct nd_intel_bus_fw_activate),
- .nd_fw_size =
- sizeof(struct nd_intel_bus_fw_activate),
- },
+ ) nd_cmd;
+ int rc;
+
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE,
+ .nd_family = NVDIMM_BUS_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.iodev_state),
+ .nd_size_out =
+ sizeof(struct nd_intel_bus_fw_activate),
+ .nd_fw_size =
+ sizeof(struct nd_intel_bus_fw_activate),
+ };
+ nd_cmd.cmd = (struct nd_intel_bus_fw_activate) {
/*
* Even though activate is run from a suspended context,
* for safety, still ask platform firmware to force
* quiesce devices by default. Let a module
* parameter override that policy.
*/
- .cmd = {
- .iodev_state = acpi_desc->fwa_noidle
- ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
- : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
- },
+ .iodev_state = acpi_desc->fwa_noidle
+ ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE
+ : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE,
};
- int rc;
-
switch (intel_bus_fwa_state(nd_desc)) {
case NVDIMM_FWA_ARMED:
case NVDIMM_FWA_ARM_OVERFLOW:
@@ -582,10 +571,9 @@ const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops;
static int intel_fwa_dimminfo(struct nvdimm *nvdimm,
struct nd_intel_fw_activate_dimminfo *info)
{
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_dimminfo cmd;
- } nd_cmd = {
+ ) nd_cmd = {
.pkg = {
.nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO,
.nd_family = NVDIMM_FAMILY_INTEL,
@@ -688,27 +676,24 @@ static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm)
{
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc;
- struct {
- struct nd_cmd_pkg pkg;
+ TRAILING_OVERLAP(struct nd_cmd_pkg, pkg, nd_payload,
struct nd_intel_fw_activate_arm cmd;
- } nd_cmd = {
- .pkg = {
- .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
- .nd_family = NVDIMM_FAMILY_INTEL,
- .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
- .nd_size_out =
- sizeof(struct nd_intel_fw_activate_arm),
- .nd_fw_size =
- sizeof(struct nd_intel_fw_activate_arm),
- },
- .cmd = {
- .activate_arm = arm == NVDIMM_FWA_ARM
- ? ND_INTEL_DIMM_FWA_ARM
- : ND_INTEL_DIMM_FWA_DISARM,
- },
- };
+ ) nd_cmd;
int rc;
+ nd_cmd.pkg = (struct nd_cmd_pkg) {
+ .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM,
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = sizeof(nd_cmd.cmd.activate_arm),
+ .nd_size_out = sizeof(struct nd_intel_fw_activate_arm),
+ .nd_fw_size = sizeof(struct nd_intel_fw_activate_arm),
+ };
+ nd_cmd.cmd = (struct nd_intel_fw_activate_arm) {
+ .activate_arm = arm == NVDIMM_FWA_ARM ?
+ ND_INTEL_DIMM_FWA_ARM :
+ ND_INTEL_DIMM_FWA_DISARM,
+ };
+
switch (intel_fwa_state(nvdimm)) {
case NVDIMM_FWA_INVALID:
return -ENXIO;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 08e10b6226dc..e4560b33b8ad 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -268,7 +268,7 @@ static int acpi_pci_link_get_current(struct acpi_pci_link *link)
link->irq.active = irq;
- acpi_handle_debug(handle, "Link at IRQ %d \n", link->irq.active);
+ acpi_handle_debug(handle, "Link at IRQ %d\n", link->irq.active);
end:
return result;
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
index 031d1ba81b86..318683744ed1 100644
--- a/drivers/acpi/pfr_update.c
+++ b/drivers/acpi/pfr_update.c
@@ -127,8 +127,11 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
pfru_dev->rev_id,
PFRU_FUNC_QUERY_UPDATE_CAP,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with no object\n");
return ret;
+ }
if (out_obj->package.count < CAP_NR_IDX ||
out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -141,13 +144,17 @@ static int query_capability(struct pfru_update_cap_info *cap_hdr,
out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
- out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
+ out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query cap failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
if (cap_hdr->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
+ dev_dbg(pfru_dev->parent_dev, "Query cap Error Status:%d\n",
+ cap_hdr->status);
goto free_acpi_buffer;
}
@@ -193,24 +200,32 @@ static int query_buffer(struct pfru_com_buf_info *info,
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
NULL, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with no object\n");
return ret;
+ }
if (out_obj->package.count < BUF_NR_IDX ||
out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
info->ext_status =
out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
if (info->status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Status:%d\n", info->status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Query buf failed with Error Extended Status:%d\n", info->ext_status);
goto free_acpi_buffer;
}
@@ -295,12 +310,16 @@ static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
m_img_hdr = data + size;
type = get_image_type(m_img_hdr, pfru_dev);
- if (type < 0)
+ if (type < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image type\n");
return false;
+ }
size = adjust_efi_size(m_img_hdr, size);
- if (size < 0)
+ if (size < 0) {
+ dev_dbg(pfru_dev->parent_dev, "Invalid image size\n");
return false;
+ }
auth = data + size;
size += sizeof(u64) + auth->auth_info.hdr.len;
@@ -346,8 +365,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
pfru_dev->rev_id, PFRU_FUNC_START,
&in_obj, ACPI_TYPE_PACKAGE);
- if (!out_obj)
+ if (!out_obj) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed to start with no object\n");
return ret;
+ }
if (out_obj->package.count < UPDATE_NR_IDX ||
out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
@@ -355,8 +377,11 @@ static int start_update(int action, struct pfru_device *pfru_dev)
out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
- out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with invalid package count/type\n");
goto free_acpi_buffer;
+ }
update_result.status =
out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
@@ -365,8 +390,10 @@ static int start_update(int action, struct pfru_device *pfru_dev)
if (update_result.status != DSM_SUCCEED) {
ret = -EBUSY;
- dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
- dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Status:%d\n", update_result.status);
+ dev_dbg(pfru_dev->parent_dev,
+ "Update failed with Error Extended Status:%d\n",
update_result.ext_status);
goto free_acpi_buffer;
@@ -450,8 +477,10 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
if (ret)
return ret;
- if (len > buf_info.buf_size)
+ if (len > buf_info.buf_size) {
+ dev_dbg(pfru_dev->parent_dev, "Capsule image size too large\n");
return -EINVAL;
+ }
iov.iov_base = (void __user *)buf;
iov.iov_len = len;
@@ -460,10 +489,14 @@ static ssize_t pfru_write(struct file *file, const char __user *buf,
/* map the communication buffer */
phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
- if (!buf_ptr)
+ if (!buf_ptr) {
+ dev_dbg(pfru_dev->parent_dev, "Failed to remap the buffer\n");
return -ENOMEM;
+ }
if (!copy_from_iter_full(buf_ptr, len, &iter)) {
+ dev_dbg(pfru_dev->parent_dev,
+ "Failed to copy the data from the user space buffer\n");
ret = -EINVAL;
goto unmap;
}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index e549914a636c..be033bbb126a 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -85,8 +85,6 @@ static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
}
}
- pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
-
return 0;
}
@@ -154,13 +152,37 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
th->handler_addr =
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
+ /*
+ * Print a warning message if handler_addr is zero which is not expected to
+ * ever happen.
+ */
+ if (unlikely(!th->handler_addr))
+ pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->handler_address);
th->static_data_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
+ /*
+ * According to the PRM specification, static_data_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->static_data_buffer_addr && handler_info->static_data_buffer_address))
+ pr_warn("Failed to find VA of static data buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->static_data_buffer_address);
th->acpi_param_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
+ /*
+ * According to the PRM specification, acpi_param_buffer_address can be zero,
+ * so avoid printing a warning message in that case. Otherwise, if the
+ * return value of efi_pa_va_lookup() is zero, print the message.
+ */
+ if (unlikely(!th->acpi_param_buffer_addr && handler_info->acpi_param_buffer_address))
+ pr_warn("Failed to find VA of acpi param buffer for GUID: %pUL, PA: 0x%llx",
+ &th->guid, handler_info->acpi_param_buffer_address);
+
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 4322f2da6d10..c08ead07252b 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/export.h>
+#include <linux/string_choices.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <linux/acpi.h>
@@ -30,17 +30,16 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
if (!dev->wakeup.flags.valid)
continue;
- seq_printf(seq, "%s\t S%d\t",
+ seq_printf(seq, "%s\t S%llu\t",
dev->pnp.bus_id,
- (u32) dev->wakeup.sleep_state);
+ dev->wakeup.sleep_state);
mutex_lock(&dev->physical_node_lock);
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- device_may_wakeup(&dev->dev) ?
- "enabled" : "disabled");
+ str_enabled_disabled(device_may_wakeup(&dev->dev)));
} else {
struct device *ldev;
list_for_each_entry(entry, &dev->physical_node_list,
@@ -55,9 +54,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.valid ? '*' : ' ',
- (device_may_wakeup(&dev->dev) ||
- device_may_wakeup(ldev)) ?
- "enabled" : "disabled",
+ str_enabled_disabled(device_may_wakeup(ldev) ||
+ device_may_wakeup(&dev->dev)),
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
put_device(ldev);
@@ -141,6 +139,5 @@ static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
void __init acpi_sleep_proc_init(void)
{
/* 'wakeup device' [R/W] */
- proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
- acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
+ proc_create("wakeup", 0644, acpi_root_dir, &acpi_system_wakeup_device_proc_ops);
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 64b8d1e19594..755003bf3a45 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -173,11 +173,14 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
{
unsigned int cpu;
+ if (ignore_ppc == 1)
+ return;
+
for_each_cpu(cpu, policy->related_cpus) {
struct acpi_processor *pr = per_cpu(processors, cpu);
int ret;
- if (!pr)
+ if (!pr || !pr->performance)
continue;
/*
@@ -193,6 +196,11 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
+
+ ret = acpi_processor_get_platform_limit(pr);
+ if (ret)
+ pr_err("Failed to update freq constraint for CPU%d (%d)\n",
+ cpu, ret);
}
}
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index d1541a386fbc..f9c2bc1d4a3a 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -235,7 +235,7 @@ static int acpi_processor_throttling_notifier(unsigned long event, void *data)
if (pr->throttling_platform_limit > target_state)
target_state = pr->throttling_platform_limit;
if (target_state >= p_throttling->state_count) {
- pr_warn("Exceed the limit of T-state \n");
+ pr_warn("Exceed the limit of T-state\n");
target_state = p_throttling->state_count - 1;
}
p_tstate->target_state = target_state;
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
index 4cdff387deff..440cf9fb91aa 100644
--- a/drivers/acpi/riscv/cppc.c
+++ b/drivers/acpi/riscv/cppc.c
@@ -37,10 +37,8 @@ static int __init sbi_cppc_init(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_CPPC) > 0) {
- pr_info("SBI CPPC extension detected\n");
cppc_ext_present = true;
} else {
- pr_info("SBI CPPC extension NOT detected!!\n");
cppc_ext_present = false;
}
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index b02bf770aead..ff6dc957bc11 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -42,7 +42,7 @@ void acpi_enable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
@@ -67,7 +67,7 @@ void acpi_disable_wakeup_devices(u8 sleep_state)
list_for_each_entry_safe(dev, tmp, &acpi_wakeup_device_list,
wakeup_list) {
if (!dev->wakeup.flags.valid
- || sleep_state > (u32) dev->wakeup.sleep_state
+ || sleep_state > dev->wakeup.sleep_state
|| !(device_may_wakeup(&dev->dev)
|| dev->wakeup.prepare_count))
continue;
diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c
index 258440b899a9..6daa6372f980 100644
--- a/drivers/acpi/x86/lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -387,9 +387,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT3435", LPSS_ADDR(lpt_uart_dev_desc) },
{ "INT3436", LPSS_ADDR(lpt_sdio_dev_desc) },
- /* Wildcat Point LPSS devices */
- { "INT3438", LPSS_ADDR(lpt_spi_dev_desc) },
-
{ }
};
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 71482d639a6d..74e34a07ef72 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -138,7 +138,7 @@ static int amba_read_periphid(struct amba_device *dev)
void __iomem *tmp;
int i, ret;
- ret = dev_pm_domain_attach(&dev->dev, true);
+ ret = dev_pm_domain_attach(&dev->dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_dbg(&dev->dev, "can't get PM domain: %d\n", ret);
goto err_out;
@@ -291,7 +291,7 @@ static int amba_probe(struct device *dev)
if (ret < 0)
break;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c463ca4a8fff..1ba2192ae667 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5384,10 +5384,9 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr)))
+ return -EFAULT;
+
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
@@ -5402,8 +5401,6 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
goto out;
}
}
@@ -5417,22 +5414,17 @@ static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
- if (ret < 0) {
- if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
- ret = -EFAULT;
+ if (ret < 0)
goto out;
- }
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
- if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
- ret = -EFAULT;
- goto out;
- }
out:
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
return ret;
}
@@ -6128,7 +6120,7 @@ static int binder_release(struct inode *nodp, struct file *filp)
debugfs_remove(proc->debugfs_entry);
if (proc->binderfs_entry) {
- binderfs_remove_file(proc->binderfs_entry);
+ simple_recursive_removal(proc->binderfs_entry, NULL);
proc->binderfs_entry = NULL;
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 1ba5caf1d88d..a5fd23dcafcd 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -81,7 +81,6 @@ extern bool is_binderfs_device(const struct inode *inode);
extern struct dentry *binderfs_create_file(struct dentry *dir, const char *name,
const struct file_operations *fops,
void *data);
-extern void binderfs_remove_file(struct dentry *dentry);
#else
static inline bool is_binderfs_device(const struct inode *inode)
{
@@ -94,7 +93,6 @@ static inline struct dentry *binderfs_create_file(struct dentry *dir,
{
return NULL;
}
-static inline void binderfs_remove_file(struct dentry *dentry) {}
#endif
#ifdef CONFIG_ANDROID_BINDERFS
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 024275dbfdd8..acc946bb1457 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -500,21 +500,6 @@ static struct dentry *binderfs_create_dentry(struct dentry *parent,
return dentry;
}
-void binderfs_remove_file(struct dentry *dentry)
-{
- struct inode *parent_inode;
-
- parent_inode = d_inode(dentry->d_parent);
- inode_lock(parent_inode);
- if (simple_positive(dentry)) {
- dget(dentry);
- simple_unlink(parent_inode, dentry);
- d_delete(dentry);
- dput(dentry);
- }
- inode_unlock(parent_inode);
-}
-
struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
const struct file_operations *fops,
void *data)
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index e00536b49552..120a2b7067fc 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -117,23 +117,39 @@ config SATA_AHCI
config SATA_MOBILE_LPM_POLICY
int "Default SATA Link Power Management policy"
- range 0 4
+ range 0 5
default 3
depends on SATA_AHCI
help
Select the Default SATA Link Power Management (LPM) policy to use
for chipsets / "South Bridges" supporting low-power modes. Such
chipsets are ubiquitous across laptops, desktops and servers.
-
- The value set has the following meanings:
+ Each policy combines power saving states and features:
+ - Partial: The Phy logic is powered but is in a reduced power
+ state. The exit latency from this state is no longer than
+ 10us).
+ - Slumber: The Phy logic is powered but is in an even lower power
+ state. The exit latency from this state is potentially
+ longer, but no longer than 10ms.
+ - DevSleep: The Phy logic may be powered down. The exit latency from
+ this state is no longer than 20 ms, unless otherwise
+ specified by DETO in the device Identify Device Data log.
+ - HIPM: Host Initiated Power Management (host automatically
+ transitions to partial and slumber).
+ - DIPM: Device Initiated Power Management (device automatically
+ transitions to partial and slumber).
+
+ The possible values for the default SATA link power management
+ policies are:
0 => Keep firmware settings
- 1 => Maximum performance
- 2 => Medium power
- 3 => Medium power with Device Initiated PM enabled
- 4 => Minimum power
-
- Note "Minimum power" is known to cause issues, including disk
- corruption, with some disks and should not be used.
+ 1 => No power savings (maximum performance)
+ 2 => HIPM (Partial)
+ 3 => HIPM (Partial) and DIPM (Partial and Slumber)
+ 4 => HIPM (Partial and DevSleep) and DIPM (Partial and Slumber)
+ 5 => HIPM (Slumber and DevSleep) and DIPM (Partial and Slumber)
+
+ Excluding the value 0, higher values represent policies with higher
+ power savings.
config SATA_AHCI_PLATFORM
tristate "Platform AHCI SATA support"
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index aa93b0ecbbc6..e1c24bbacf64 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -110,17 +110,17 @@ static const struct scsi_host_template ahci_sht = {
static struct ata_port_operations ahci_vt8251_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_vt8251_hardreset,
+ .reset.hardreset = ahci_vt8251_hardreset,
};
static struct ata_port_operations ahci_p5wdh_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_p5wdh_hardreset,
+ .reset.hardreset = ahci_p5wdh_hardreset,
};
static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_avn_hardreset,
+ .reset.hardreset = ahci_avn_hardreset,
};
static const struct ata_port_info ahci_port_info[] = {
@@ -674,7 +674,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
static int mobile_lpm_policy = -1;
module_param(mobile_lpm_policy, int, 0644);
-MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
+MODULE_PARM_DESC(mobile_lpm_policy,
+ "Default LPM policy. Despite its name, this parameter applies "
+ "to all chipsets, including desktop and server chipsets");
static char *ahci_mask_port_map;
module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
@@ -1774,15 +1776,26 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
* LPM if the port advertises itself as an external port.
*/
if (ap->pflags & ATA_PFLAG_EXTERNAL) {
- ata_port_dbg(ap, "external port, not enabling LPM\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
}
+ /* If no Partial or no Slumber, we cannot support DIPM. */
+ if ((ap->host->flags & ATA_HOST_NO_PART) ||
+ (ap->host->flags & ATA_HOST_NO_SSC)) {
+ ata_port_dbg(ap, "Host does not support DIPM\n");
+ ap->flags |= ATA_FLAG_NO_DIPM;
+ }
+
/* If no LPM states are supported by the HBA, do not bother with LPM */
if ((ap->host->flags & ATA_HOST_NO_PART) &&
(ap->host->flags & ATA_HOST_NO_SSC) &&
(ap->host->flags & ATA_HOST_NO_DEVSLP)) {
- ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ ata_port_dbg(ap,
+ "No LPM states supported, forcing LPM max_power\n");
+ ap->flags |= ATA_FLAG_NO_LPM;
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
return;
}
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index ca0924dc5bd2..f97566c420f8 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -137,13 +137,13 @@ static int ahci_da850_hardreset(struct ata_link *link,
static struct ata_port_operations ahci_da850_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_da850_softreset,
+ .reset.softreset = ahci_da850_softreset,
/*
* No need to override .pmp_softreset - it's only used for actual
* PMP-enabled ports.
*/
- .hardreset = ahci_da850_hardreset,
- .pmp_hardreset = ahci_da850_hardreset,
+ .reset.hardreset = ahci_da850_hardreset,
+ .pmp_reset.hardreset = ahci_da850_hardreset,
};
static const struct ata_port_info ahci_da850_port_info = {
diff --git a/drivers/ata/ahci_dm816.c b/drivers/ata/ahci_dm816.c
index b08547b877a1..93faed2cfeb6 100644
--- a/drivers/ata/ahci_dm816.c
+++ b/drivers/ata/ahci_dm816.c
@@ -124,7 +124,7 @@ static int ahci_dm816_softreset(struct ata_link *link,
static struct ata_port_operations ahci_dm816_port_ops = {
.inherits = &ahci_platform_ops,
- .softreset = ahci_dm816_softreset,
+ .reset.softreset = ahci_dm816_softreset,
};
static const struct ata_port_info ahci_dm816_port_info = {
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index f01f08048f97..86aedd5923ac 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -642,18 +642,19 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
int ret;
if (imxpriv->type == AHCI_IMX53)
- ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
+ ret = ahci_pmp_retry_srst_ops.reset.softreset(link, class,
+ deadline);
else
- ret = ahci_ops.softreset(link, class, deadline);
+ ret = ahci_ops.reset.softreset(link, class, deadline);
return ret;
}
static struct ata_port_operations ahci_imx_ops = {
- .inherits = &ahci_ops,
- .host_stop = ahci_imx_host_stop,
- .error_handler = ahci_imx_error_handler,
- .softreset = ahci_imx_softreset,
+ .inherits = &ahci_ops,
+ .host_stop = ahci_imx_host_stop,
+ .error_handler = ahci_imx_error_handler,
+ .reset.softreset = ahci_imx_softreset,
};
static const struct ata_port_info ahci_imx_port_info = {
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 30e39885b64e..0dec1a17e5b1 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -146,8 +146,8 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
}
static struct ata_port_operations ahci_qoriq_ops = {
- .inherits = &ahci_ops,
- .hardreset = ahci_qoriq_hardreset,
+ .inherits = &ahci_ops,
+ .reset.hardreset = ahci_qoriq_hardreset,
};
static const struct ata_port_info ahci_qoriq_port_info = {
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index dfbd8c53abcb..5d5a51a77f5d 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -613,11 +613,11 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
+ .reset.softreset = xgene_ahci_softreset,
+ .pmp_reset.softreset = xgene_ahci_pmp_softreset,
.read_id = xgene_ahci_read_id,
.qc_issue = xgene_ahci_qc_issue,
- .softreset = xgene_ahci_softreset,
- .pmp_softreset = xgene_ahci_pmp_softreset
};
static const struct ata_port_info xgene_ahci_v1_port_info = {
@@ -630,7 +630,7 @@ static const struct ata_port_info xgene_ahci_v1_port_info = {
static struct ata_port_operations xgene_ahci_v2_ops = {
.inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop,
- .hardreset = xgene_ahci_hardreset,
+ .reset.hardreset = xgene_ahci_hardreset,
.read_id = xgene_ahci_read_id,
};
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index d441246fa357..229429ba5027 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1074,7 +1074,7 @@ static struct ata_port_operations piix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = piix_set_piomode,
.set_dmamode = piix_set_dmamode,
- .prereset = piix_pata_prereset,
+ .reset.prereset = piix_pata_prereset,
};
static struct ata_port_operations piix_vmw_ops = {
@@ -1102,7 +1102,7 @@ static const struct scsi_host_template piix_sidpr_sht = {
static struct ata_port_operations piix_sidpr_sata_ops = {
.inherits = &piix_sata_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = piix_sidpr_scr_read,
.scr_write = piix_sidpr_scr_write,
.set_lpm = piix_sidpr_set_lpm,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 4e9c82f36df1..b335fb7e5cb4 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -162,10 +162,10 @@ struct ata_port_operations ahci_ops = {
.freeze = ahci_freeze,
.thaw = ahci_thaw,
- .softreset = ahci_softreset,
- .hardreset = ahci_hardreset,
- .postreset = ahci_postreset,
- .pmp_softreset = ahci_softreset,
+ .reset.softreset = ahci_softreset,
+ .reset.hardreset = ahci_hardreset,
+ .reset.postreset = ahci_postreset,
+ .pmp_reset.softreset = ahci_softreset,
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.dev_config = ahci_dev_config,
@@ -192,7 +192,7 @@ EXPORT_SYMBOL_GPL(ahci_ops);
struct ata_port_operations ahci_pmp_retry_srst_ops = {
.inherits = &ahci_ops,
- .softreset = ahci_pmp_retry_softreset,
+ .reset.softreset = ahci_pmp_retry_softreset,
};
EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 79b20da0a256..97d9f0488cc1 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -65,8 +65,8 @@
#include "libata-transport.h"
const struct ata_port_operations ata_base_port_ops = {
- .prereset = ata_std_prereset,
- .postreset = ata_std_postreset,
+ .reset.prereset = ata_std_prereset,
+ .reset.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
.sched_eh = ata_std_sched_eh,
.end_eh = ata_std_end_eh,
@@ -2154,14 +2154,46 @@ retry:
return err_mask;
}
+static inline void ata_clear_log_directory(struct ata_device *dev)
+{
+ memset(dev->gp_log_dir, 0, ATA_SECT_SIZE);
+}
+
+static int ata_read_log_directory(struct ata_device *dev)
+{
+ u16 version;
+
+ /* If the log page is already cached, do nothing. */
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version == 0x0001)
+ return 0;
+
+ if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->gp_log_dir, 1)) {
+ ata_clear_log_directory(dev);
+ return -EIO;
+ }
+
+ version = get_unaligned_le16(&dev->gp_log_dir[0]);
+ if (version != 0x0001) {
+ ata_dev_err(dev, "Invalid log directory version 0x%04x\n",
+ version);
+ ata_clear_log_directory(dev);
+ dev->quirks |= ATA_QUIRK_NO_LOG_DIR;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int ata_log_supported(struct ata_device *dev, u8 log)
{
if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
return 0;
- if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
+ if (ata_read_log_directory(dev))
return 0;
- return get_unaligned_le16(&dev->sector_buf[log * 2]);
+
+ return get_unaligned_le16(&dev->gp_log_dir[log * 2]);
}
static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
@@ -2421,18 +2453,7 @@ static void ata_dev_config_zac(struct ata_device *dev)
dev->zac_zones_optimal_nonseq = U32_MAX;
dev->zac_zones_max_open = U32_MAX;
- /*
- * Always set the 'ZAC' flag for Host-managed devices.
- */
- if (dev->class == ATA_DEV_ZAC)
- dev->flags |= ATA_DFLAG_ZAC;
- else if (ata_id_zoned_cap(dev->id) == 0x01)
- /*
- * Check for host-aware devices.
- */
- dev->flags |= ATA_DFLAG_ZAC;
-
- if (!(dev->flags & ATA_DFLAG_ZAC))
+ if (!ata_dev_is_zac(dev))
return;
if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
@@ -2495,7 +2516,7 @@ static void ata_dev_config_trusted(struct ata_device *dev)
dev->flags |= ATA_DFLAG_TRUSTED;
}
-void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
+static void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
{
kfree(dev->cdl);
dev->cdl = NULL;
@@ -2801,17 +2822,70 @@ out:
kfree(buf);
}
+/*
+ * Configure features related to link power management.
+ */
+static void ata_dev_config_lpm(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ unsigned int err_mask;
+
+ if (ap->flags & ATA_FLAG_NO_LPM) {
+ /*
+ * When the port does not support LPM, we cannot support it on
+ * the device either.
+ */
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ } else {
+ /*
+ * Some WD SATA-1 drives have issues with LPM, turn on NOLPM for
+ * them.
+ */
+ if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
+ (dev->id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ dev->quirks |= ATA_QUIRK_NOLPM;
+
+ /* ATI specific quirk */
+ if ((dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI) &&
+ ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
+ dev->quirks |= ATA_QUIRK_NOLPM;
+ }
+
+ if (dev->quirks & ATA_QUIRK_NOLPM &&
+ ap->target_lpm_policy != ATA_LPM_MAX_POWER) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
+ /*
+ * Device Initiated Power Management (DIPM) is normally disabled by
+ * default on a device. However, DIPM may have been enabled and that
+ * setting kept even after COMRESET because of the Software Settings
+ * Preservation feature. So if the port does not support DIPM and the
+ * device does, disable DIPM on the device.
+ */
+ if (ap->flags & ATA_FLAG_NO_DIPM && ata_id_has_dipm(dev->id)) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV)
+ ata_dev_err(dev, "Disable DIPM failed, Emask 0x%x\n",
+ err_mask);
+ }
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
+ ata_id_has_hipm(dev->id) ? " HIPM" : "",
+ ata_id_has_dipm(dev->id) ? " DIPM" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
@@ -2848,6 +2922,9 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
}
+ /* Clear the general purpose log directory cache. */
+ ata_clear_log_directory(dev);
+
/* Set quirks */
dev->quirks |= ata_dev_quirks(dev);
ata_force_quirks(dev);
@@ -2871,23 +2948,6 @@ int ata_dev_configure(struct ata_device *dev)
if (rc)
return rc;
- /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
- if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
- (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
- ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (ap->flags & ATA_FLAG_NO_LPM)
- dev->quirks |= ATA_QUIRK_NOLPM;
-
- if (dev->quirks & ATA_QUIRK_NOLPM) {
- ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
- dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
- }
-
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -2974,6 +3034,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_chs(dev);
}
+ ata_dev_config_lpm(dev);
ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
@@ -3449,7 +3510,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
}
/**
- * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
@@ -3465,7 +3526,7 @@ static int ata_dev_set_mode(struct ata_device *dev)
* 0 on success, negative errno otherwise
*/
-int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3546,7 +3607,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
*r_failed_dev = dev;
return rc;
}
-EXPORT_SYMBOL_GPL(ata_do_set_mode);
+EXPORT_SYMBOL_GPL(ata_set_mode);
/**
* ata_wait_ready - wait for link to become ready
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1f90fcd2b3c4..2946ae6d4b2c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -153,8 +153,6 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
#undef CMDS
static void __ata_port_freeze(struct ata_port *ap);
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev);
#ifdef CONFIG_PM
static void ata_eh_handle_port_suspend(struct ata_port *ap);
static void ata_eh_handle_port_resume(struct ata_port *ap);
@@ -825,7 +823,7 @@ void ata_port_wait_eh(struct ata_port *ap)
retry:
spin_lock_irqsave(ap->lock, flags);
- while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
+ while (ata_port_eh_scheduled(ap)) {
prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irqrestore(ap->lock, flags);
schedule();
@@ -909,7 +907,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
+static void ata_eh_set_pending(struct ata_port *ap, bool fastdrain)
{
unsigned int cnt;
@@ -949,7 +947,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
qc->flags |= ATA_QCFLAG_EH;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
@@ -971,7 +969,7 @@ void ata_std_sched_eh(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
- ata_eh_set_pending(ap, 1);
+ ata_eh_set_pending(ap, true);
scsi_schedule_eh(ap->scsi_host);
trace_ata_std_sched_eh(ap);
@@ -1022,7 +1020,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
int tag, nr_aborted = 0;
/* we're gonna abort all commands, no need for fast drain */
- ata_eh_set_pending(ap, 0);
+ ata_eh_set_pending(ap, false);
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
@@ -2073,6 +2071,183 @@ out:
ata_eh_done(link, dev, ATA_EH_GET_SUCCESS_SENSE);
}
+/*
+ * Check if a link is established. This is a relaxed version of
+ * ata_phys_link_online() which accounts for the fact that this is potentially
+ * called after changing the link power management policy, which may not be
+ * reflected immediately in the SSTAUS register (e.g., we may still be seeing
+ * the PHY in partial, slumber or devsleep Partial power management state.
+ * So check that:
+ * - A device is still present, that is, DET is 1h (Device presence detected
+ * but Phy communication not established) or 3h (Device presence detected and
+ * Phy communication established)
+ * - Communication is established, that is, IPM is not 0h, indicating that PHY
+ * is online or in a low power state.
+ */
+static bool ata_eh_link_established(struct ata_link *link)
+{
+ u32 sstatus;
+ u8 det, ipm;
+
+ if (sata_scr_read(link, SCR_STATUS, &sstatus))
+ return false;
+
+ det = sstatus & 0x0f;
+ ipm = (sstatus >> 8) & 0x0f;
+
+ return (det & 0x01) && ipm;
+}
+
+/**
+ * ata_eh_link_set_lpm - configure SATA interface power management
+ * @link: link to configure
+ * @policy: the link power management policy
+ * @r_failed_dev: out parameter for failed device
+ *
+ * Enable SATA Interface power management. This will enable
+ * Device Interface Power Management (DIPM) for min_power and
+ * medium_power_with_dipm policies, and then call driver specific
+ * callbacks for enabling Host Initiated Power management.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+static int ata_eh_link_set_lpm(struct ata_link *link,
+ enum ata_lpm_policy policy,
+ struct ata_device **r_failed_dev)
+{
+ struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
+ enum ata_lpm_policy old_policy = link->lpm_policy;
+ bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
+ unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
+ unsigned int err_mask;
+ int rc;
+
+ /* if the link or host doesn't do LPM, noop */
+ if (!IS_ENABLED(CONFIG_SATA_HOST) ||
+ (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
+ return 0;
+
+ /*
+ * This function currently assumes that it will never be supplied policy
+ * ATA_LPM_UNKNOWN.
+ */
+ if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
+ return 0;
+
+ ata_link_dbg(link, "Set LPM policy: %d -> %d\n", old_policy, policy);
+
+ /*
+ * DIPM is enabled only for ATA_LPM_MIN_POWER,
+ * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
+ * some devices misbehave when the host NACKs transition to SLUMBER.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_hipm = ata_id_has_hipm(dev->id);
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ /* find the first enabled and LPM enabled devices */
+ if (!link_dev)
+ link_dev = dev;
+
+ if (!lpm_dev &&
+ (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
+ lpm_dev = dev;
+
+ hints &= ~ATA_LPM_EMPTY;
+ if (!dev_has_hipm)
+ hints &= ~ATA_LPM_HIPM;
+
+ /* disable DIPM before changing link config */
+ if (dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_DISABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to disable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ if (ap) {
+ rc = ap->ops->set_lpm(link, policy, hints);
+ if (!rc && ap->slave_link)
+ rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
+ } else
+ rc = sata_pmp_set_lpm(link, policy, hints);
+
+ /*
+ * Attribute link config failure to the first (LPM) enabled
+ * device on the link.
+ */
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ link->flags |= ATA_LFLAG_NO_LPM;
+ return 0;
+ }
+ dev = lpm_dev ? lpm_dev : link_dev;
+ goto fail;
+ }
+
+ /*
+ * Low level driver acked the transition. Issue DIPM command
+ * with the new policy set.
+ */
+ link->lpm_policy = policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = policy;
+
+ /*
+ * Host config updated, enable DIPM if transitioning to
+ * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
+ * ATA_LPM_MED_POWER_WITH_DIPM.
+ */
+ ata_for_each_dev(dev, link, ENABLED) {
+ bool dev_has_dipm = ata_id_has_dipm(dev->id);
+
+ if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
+ dev_has_dipm) {
+ err_mask = ata_dev_set_feature(dev,
+ SETFEATURES_SATA_ENABLE, SATA_DIPM);
+ if (err_mask && err_mask != AC_ERR_DEV) {
+ ata_dev_warn(dev,
+ "failed to enable DIPM, Emask 0x%x\n",
+ err_mask);
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ }
+
+ link->last_lpm_change = jiffies;
+ link->flags |= ATA_LFLAG_CHANGED;
+
+ return 0;
+
+fail:
+ /* restore the old policy */
+ link->lpm_policy = old_policy;
+ if (ap && ap->slave_link)
+ ap->slave_link->lpm_policy = old_policy;
+
+ /* if no device or only one more chance is left, disable LPM */
+ if (!dev || ehc->tries[dev->devno] <= 2) {
+ ata_link_warn(link, "disabling LPM on the link\n");
+ link->flags |= ATA_LFLAG_NO_LPM;
+ }
+ if (r_failed_dev)
+ *r_failed_dev = dev;
+ return rc;
+}
+
/**
* ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on
@@ -2606,25 +2781,28 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
return reset(link, classes, deadline);
}
-static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
+static bool ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
- return 0;
+ return false;
if (rc == -EAGAIN)
- return 1;
+ return true;
if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
- return 1;
- return 0;
+ return true;
+ return false;
}
int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_port *ap = link->ap;
struct ata_link *slave = ap->slave_link;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
+ ata_reset_fn_t hardreset = reset_ops->hardreset;
+ ata_reset_fn_t softreset = reset_ops->softreset;
+ ata_prereset_fn_t prereset = reset_ops->prereset;
+ ata_postreset_fn_t postreset = reset_ops->postreset;
unsigned int *classes = ehc->classes;
unsigned int lflags = link->flags;
int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
@@ -3123,13 +3301,13 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* to ap->target_lpm_policy after revalidation is done.
*/
if (link->lpm_policy > ATA_LPM_MAX_POWER) {
- rc = ata_eh_set_lpm(link, ATA_LPM_MAX_POWER,
- r_failed_dev);
+ rc = ata_eh_link_set_lpm(link, ATA_LPM_MAX_POWER,
+ r_failed_dev);
if (rc)
goto err;
}
- if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
+ if (!ata_eh_link_established(ata_dev_phys_link(dev))) {
rc = -EIO;
goto err;
}
@@ -3233,12 +3411,12 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
}
/**
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
+ * ata_eh_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
* @r_failed_dev: out parameter for failed device
*
* Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
- * ata_set_mode() fails, pointer to the failing device is
+ * ata_eh_set_mode() fails, pointer to the failing device is
* returned in @r_failed_dev.
*
* LOCKING:
@@ -3247,7 +3425,8 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
* RETURNS:
* 0 on success, negative errno otherwise
*/
-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
+static int ata_eh_set_mode(struct ata_link *link,
+ struct ata_device **r_failed_dev)
{
struct ata_port *ap = link->ap;
struct ata_device *dev;
@@ -3268,7 +3447,7 @@ int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
if (ap->ops->set_mode)
rc = ap->ops->set_mode(link, r_failed_dev);
else
- rc = ata_do_set_mode(link, r_failed_dev);
+ rc = ata_set_mode(link, r_failed_dev);
/* if transfer mode has changed, set DUBIOUS_XFER on device */
ata_for_each_dev(dev, link, ENABLED) {
@@ -3408,153 +3587,6 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
return rc;
}
-/**
- * ata_eh_set_lpm - configure SATA interface power management
- * @link: link to configure power management
- * @policy: the link power management policy
- * @r_failed_dev: out parameter for failed device
- *
- * Enable SATA Interface power management. This will enable
- * Device Interface Power Management (DIPM) for min_power and
- * medium_power_with_dipm policies, and then call driver specific
- * callbacks for enabling Host Initiated Power management.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- struct ata_device **r_failed_dev)
-{
- struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
- enum ata_lpm_policy old_policy = link->lpm_policy;
- bool host_has_dipm = !(link->ap->flags & ATA_FLAG_NO_DIPM);
- unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
- unsigned int err_mask;
- int rc;
-
- /* if the link or host doesn't do LPM, noop */
- if (!IS_ENABLED(CONFIG_SATA_HOST) ||
- (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm))
- return 0;
-
- /*
- * This function currently assumes that it will never be supplied policy
- * ATA_LPM_UNKNOWN.
- */
- if (WARN_ON_ONCE(policy == ATA_LPM_UNKNOWN))
- return 0;
-
- /*
- * DIPM is enabled only for ATA_LPM_MIN_POWER,
- * ATA_LPM_MIN_POWER_WITH_PARTIAL, and ATA_LPM_MED_POWER_WITH_DIPM, as
- * some devices misbehave when the host NACKs transition to SLUMBER.
- */
- ata_for_each_dev(dev, link, ENABLED) {
- bool dev_has_hipm = ata_id_has_hipm(dev->id);
- bool dev_has_dipm = ata_id_has_dipm(dev->id);
-
- /* find the first enabled and LPM enabled devices */
- if (!link_dev)
- link_dev = dev;
-
- if (!lpm_dev &&
- (dev_has_hipm || (dev_has_dipm && host_has_dipm)))
- lpm_dev = dev;
-
- hints &= ~ATA_LPM_EMPTY;
- if (!dev_has_hipm)
- hints &= ~ATA_LPM_HIPM;
-
- /* disable DIPM before changing link config */
- if (dev_has_dipm) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_DISABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to disable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- if (ap) {
- rc = ap->ops->set_lpm(link, policy, hints);
- if (!rc && ap->slave_link)
- rc = ap->ops->set_lpm(ap->slave_link, policy, hints);
- } else
- rc = sata_pmp_set_lpm(link, policy, hints);
-
- /*
- * Attribute link config failure to the first (LPM) enabled
- * device on the link.
- */
- if (rc) {
- if (rc == -EOPNOTSUPP) {
- link->flags |= ATA_LFLAG_NO_LPM;
- return 0;
- }
- dev = lpm_dev ? lpm_dev : link_dev;
- goto fail;
- }
-
- /*
- * Low level driver acked the transition. Issue DIPM command
- * with the new policy set.
- */
- link->lpm_policy = policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = policy;
-
- /*
- * Host config updated, enable DIPM if transitioning to
- * ATA_LPM_MIN_POWER, ATA_LPM_MIN_POWER_WITH_PARTIAL, or
- * ATA_LPM_MED_POWER_WITH_DIPM.
- */
- ata_for_each_dev(dev, link, ENABLED) {
- bool dev_has_dipm = ata_id_has_dipm(dev->id);
-
- if (policy >= ATA_LPM_MED_POWER_WITH_DIPM && host_has_dipm &&
- dev_has_dipm) {
- err_mask = ata_dev_set_feature(dev,
- SETFEATURES_SATA_ENABLE, SATA_DIPM);
- if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_warn(dev,
- "failed to enable DIPM, Emask 0x%x\n",
- err_mask);
- rc = -EIO;
- goto fail;
- }
- }
- }
-
- link->last_lpm_change = jiffies;
- link->flags |= ATA_LFLAG_CHANGED;
-
- return 0;
-
-fail:
- /* restore the old policy */
- link->lpm_policy = old_policy;
- if (ap && ap->slave_link)
- ap->slave_link->lpm_policy = old_policy;
-
- /* if no device or only one more chance is left, disable LPM */
- if (!dev || ehc->tries[dev->devno] <= 2) {
- ata_link_warn(link, "disabling LPM on the link\n");
- link->flags |= ATA_LFLAG_NO_LPM;
- }
- if (r_failed_dev)
- *r_failed_dev = dev;
- return rc;
-}
-
int ata_link_nr_enabled(struct ata_link *link)
{
struct ata_device *dev;
@@ -3727,10 +3759,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
/**
* ata_eh_recover - recover host port after error
* @ap: host port to recover
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
* @r_failed_link: out parameter for failed link
*
* This is the alpha and omega, eum and yang, heart and soul of
@@ -3746,9 +3775,7 @@ static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
* RETURNS:
* 0 on success, -errno on failure.
*/
-int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+int ata_eh_recover(struct ata_port *ap, struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_link)
{
struct ata_link *link;
@@ -3816,8 +3843,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (!(ehc->i.action & ATA_EH_RESET))
continue;
- rc = ata_eh_reset(link, ata_link_nr_vacant(link),
- prereset, softreset, hardreset, postreset);
+ rc = ata_eh_reset(link, ata_link_nr_vacant(link), reset_ops);
if (rc) {
ata_link_err(link, "reset failed, giving up\n");
goto out;
@@ -3898,7 +3924,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
/* configure transfer mode if necessary */
if (ehc->i.flags & ATA_EHI_SETMODE) {
- rc = ata_set_mode(link, &dev);
+ rc = ata_eh_set_mode(link, &dev);
if (rc)
goto rest_fail;
ehc->i.flags &= ~ATA_EHI_SETMODE;
@@ -3943,7 +3969,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
config_lpm:
/* configure link power saving */
if (link->lpm_policy != ap->target_lpm_policy) {
- rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev);
+ rc = ata_eh_link_set_lpm(link, ap->target_lpm_policy,
+ &dev);
if (rc)
goto rest_fail;
}
@@ -4037,59 +4064,39 @@ void ata_eh_finish(struct ata_port *ap)
}
/**
- * ata_do_eh - do standard error handling
+ * ata_std_error_handler - standard error handler
* @ap: host port to handle error for
*
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method (can be NULL)
- * @hardreset: hardreset method (can be NULL)
- * @postreset: postreset method (can be NULL)
- *
* Perform standard error handling sequence.
*
* LOCKING:
* Kernel thread context (may sleep).
*/
-void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset)
+void ata_std_error_handler(struct ata_port *ap)
{
- struct ata_device *dev;
+ struct ata_reset_operations *reset_ops = &ap->ops->reset;
+ struct ata_link *link = &ap->link;
int rc;
+ /* Ignore built-in hardresets if SCR access is not available */
+ if ((reset_ops->hardreset == sata_std_hardreset ||
+ reset_ops->hardreset == sata_sff_hardreset) &&
+ !sata_scr_valid(link))
+ link->flags |= ATA_LFLAG_NO_HRST;
+
ata_eh_autopsy(ap);
ata_eh_report(ap);
- rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
- NULL);
+ rc = ata_eh_recover(ap, reset_ops, NULL);
if (rc) {
- ata_for_each_dev(dev, &ap->link, ALL)
+ struct ata_device *dev;
+
+ ata_for_each_dev(dev, link, ALL)
ata_dev_disable(dev);
}
ata_eh_finish(ap);
}
-
-/**
- * ata_std_error_handler - standard error handler
- * @ap: host port to handle error for
- *
- * Standard error handler
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- */
-void ata_std_error_handler(struct ata_port *ap)
-{
- struct ata_port_operations *ops = ap->ops;
- ata_reset_fn_t hardreset = ops->hardreset;
-
- /* ignore built-in hardreset if SCR access is not available */
- if (hardreset == sata_std_hardreset && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
-}
EXPORT_SYMBOL_GPL(ata_std_error_handler);
#ifdef CONFIG_PM
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index d5d189328ae6..57023324a56f 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -15,9 +15,9 @@
const struct ata_port_operations sata_pmp_port_ops = {
.inherits = &sata_port_ops,
- .pmp_prereset = ata_std_prereset,
- .pmp_hardreset = sata_std_hardreset,
- .pmp_postreset = ata_std_postreset,
+ .pmp_reset.prereset = ata_std_prereset,
+ .pmp_reset.hardreset = sata_std_hardreset,
+ .pmp_reset.postreset = ata_std_postreset,
.error_handler = sata_pmp_error_handler,
};
@@ -727,10 +727,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
/**
* sata_pmp_eh_recover_pmp - recover PMP
* @ap: ATA port PMP is attached to
- * @prereset: prereset method (can be NULL)
- * @softreset: softreset method
- * @hardreset: hardreset method
- * @postreset: postreset method (can be NULL)
+ * @reset_ops: The set of reset operations to use
*
* Recover PMP attached to @ap. Recovery procedure is somewhat
* similar to that of ata_eh_recover() except that reset should
@@ -744,8 +741,7 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
* 0 on success, -errno on failure.
*/
static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
+ struct ata_reset_operations *reset_ops)
{
struct ata_link *link = &ap->link;
struct ata_eh_context *ehc = &link->eh_context;
@@ -767,8 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
struct ata_link *tlink;
/* reset */
- rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
- postreset);
+ rc = ata_eh_reset(link, 0, reset_ops);
if (rc) {
ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
@@ -932,8 +927,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
retry:
/* PMP attached? */
if (!sata_pmp_attached(ap)) {
- rc = ata_eh_recover(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset, NULL);
+ rc = ata_eh_recover(ap, &ops->reset, NULL);
if (rc) {
ata_for_each_dev(dev, &ap->link, ALL)
ata_dev_disable(dev);
@@ -951,8 +945,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
}
/* recover pmp */
- rc = sata_pmp_eh_recover_pmp(ap, ops->prereset, ops->softreset,
- ops->hardreset, ops->postreset);
+ rc = sata_pmp_eh_recover_pmp(ap, &ops->reset);
if (rc)
goto pmp_fail;
@@ -978,8 +971,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto pmp_fail;
/* recover links */
- rc = ata_eh_recover(ap, ops->pmp_prereset, ops->pmp_softreset,
- ops->pmp_hardreset, ops->pmp_postreset, &link);
+ rc = ata_eh_recover(ap, &ops->pmp_reset, &link);
if (rc)
goto link_fail;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index cb46ce276bb1..4734465d3b1e 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -924,6 +924,11 @@ static ssize_t ata_scsi_lpm_store(struct device *device,
spin_lock_irqsave(ap->lock, flags);
+ if (ap->flags & ATA_FLAG_NO_LPM) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+
ata_for_each_link(link, ap, EDGE) {
ata_for_each_dev(dev, &ap->link, ENABLED) {
if (dev->quirks & ATA_QUIRK_NOLPM) {
@@ -1699,6 +1704,6 @@ const struct ata_port_operations sata_port_ops = {
.inherits = &ata_base_port_ops,
.qc_defer = ata_std_qc_defer,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
};
EXPORT_SYMBOL_GPL(sata_port_ops);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a21c9895408d..27b15176db56 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1923,8 +1923,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_device *dev,
};
for (i = 0; i < sizeof(pages); i++) {
- if (pages[i] == 0xb6 &&
- !(dev->flags & ATA_DFLAG_ZAC))
+ if (pages[i] == 0xb6 && !ata_dev_is_zac(dev))
continue;
rbuf[num_pages + 4] = pages[i];
num_pages++;
@@ -2181,7 +2180,7 @@ static unsigned int ata_scsiop_inq_b2(struct ata_device *dev,
static unsigned int ata_scsiop_inq_b6(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 *rbuf)
{
- if (!(dev->flags & ATA_DFLAG_ZAC)) {
+ if (!ata_dev_is_zac(dev)) {
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
return 0;
}
@@ -4317,9 +4316,10 @@ int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
* scsi_queue_rq() will defer commands if scsi_host_in_recovery().
* However, this check is done without holding the ap->lock (a libata
* specific lock), so we can have received an error irq since then,
- * therefore we must check if EH is pending, while holding ap->lock.
+ * therefore we must check if EH is pending or running, while holding
+ * ap->lock.
*/
- if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS))
+ if (ata_port_eh_scheduled(ap))
return SCSI_MLQUEUE_DEVICE_BUSY;
if (unlikely(!scmd->cmd_len))
@@ -4634,24 +4634,23 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
* ata_scsi_offline_dev - offline attached SCSI device
* @dev: ATA device to offline attached SCSI device for
*
- * This function is called from ata_eh_hotplug() and responsible
- * for taking the SCSI device attached to @dev offline. This
- * function is called with host lock which protects dev->sdev
- * against clearing.
+ * This function is called from ata_eh_detach_dev() and is responsible for
+ * taking the SCSI device attached to @dev offline. This function is
+ * called with host lock which protects dev->sdev against clearing.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*
* RETURNS:
- * 1 if attached SCSI device exists, 0 otherwise.
+ * true if attached SCSI device exists, false otherwise.
*/
-int ata_scsi_offline_dev(struct ata_device *dev)
+bool ata_scsi_offline_dev(struct ata_device *dev)
{
if (dev->sdev) {
scsi_device_set_state(dev->sdev, SDEV_OFFLINE);
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 5a46c066abc3..7fc407255eb4 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -31,10 +31,10 @@ const struct ata_port_operations ata_sff_port_ops = {
.freeze = ata_sff_freeze,
.thaw = ata_sff_thaw,
- .prereset = ata_sff_prereset,
- .softreset = ata_sff_softreset,
- .hardreset = sata_sff_hardreset,
- .postreset = ata_sff_postreset,
+ .reset.prereset = ata_sff_prereset,
+ .reset.softreset = ata_sff_softreset,
+ .reset.hardreset = sata_sff_hardreset,
+ .reset.postreset = ata_sff_postreset,
.error_handler = ata_sff_error_handler,
.sff_dev_select = ata_sff_dev_select,
@@ -2054,8 +2054,6 @@ EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
*/
void ata_sff_error_handler(struct ata_port *ap)
{
- ata_reset_fn_t softreset = ap->ops->softreset;
- ata_reset_fn_t hardreset = ap->ops->hardreset;
struct ata_queued_cmd *qc;
unsigned long flags;
@@ -2077,13 +2075,7 @@ void ata_sff_error_handler(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- /* ignore built-in hardresets if SCR access is not available */
- if ((hardreset == sata_std_hardreset ||
- hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
- hardreset = NULL;
-
- ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
- ap->ops->postreset);
+ ata_std_error_handler(ap);
}
EXPORT_SYMBOL_GPL(ata_sff_error_handler);
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e898be49df6b..62415fe67a11 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -202,7 +202,7 @@ show_ata_port_##name(struct device *dev, \
{ \
struct ata_port *ap = transport_class_to_port(dev); \
\
- return scnprintf(buf, 20, format_string, cast ap->field); \
+ return sysfs_emit(buf, format_string, cast ap->field); \
}
#define ata_port_simple_attr(field, name, format_string, type) \
@@ -389,7 +389,7 @@ show_ata_dev_##field(struct device *dev, \
{ \
struct ata_device *ata_dev = transport_class_to_dev(dev); \
\
- return scnprintf(buf, 20, format_string, cast ata_dev->field); \
+ return sysfs_emit(buf, format_string, cast ata_dev->field); \
}
#define ata_dev_simple_attr(field, format_string, type) \
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index ce5c628fa6fd..e5b977a8d3e1 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -44,6 +44,18 @@ static inline bool ata_sstatus_online(u32 sstatus)
return (sstatus & 0xf) == 0x3;
}
+static inline bool ata_dev_is_zac(struct ata_device *dev)
+{
+ /* Host managed device or host aware device */
+ return dev->class == ATA_DEV_ZAC ||
+ ata_id_zoned_cap(dev->id) == 0x01;
+}
+
+static inline bool ata_port_eh_scheduled(struct ata_port *ap)
+{
+ return ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS);
+}
+
#ifdef CONFIG_ATA_FORCE
extern void ata_force_cbl(struct ata_port *ap);
#else
@@ -90,7 +102,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern const char *sata_spd_string(unsigned int spd);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
-void ata_dev_cleanup_cdl_resources(struct ata_device *dev);
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
@@ -137,7 +148,7 @@ extern struct ata_device *ata_scsi_find_dev(struct ata_port *ap,
extern int ata_scsi_add_hosts(struct ata_host *host,
const struct scsi_host_template *sht);
extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
-extern int ata_scsi_offline_dev(struct ata_device *dev);
+extern bool ata_scsi_offline_dev(struct ata_device *dev);
extern bool ata_scsi_sense_is_valid(u8 sk, u8 asc, u8 ascq);
extern void ata_scsi_set_sense(struct ata_device *dev,
struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
@@ -169,12 +180,9 @@ extern void ata_eh_autopsy(struct ata_port *ap);
const char *ata_get_cmd_name(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
- ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
- ata_reset_fn_t hardreset, ata_postreset_fn_t postreset);
-extern int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
-extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
- ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
- ata_postreset_fn_t postreset,
+ struct ata_reset_operations *reset_ops);
+extern int ata_eh_recover(struct ata_port *ap,
+ struct ata_reset_operations *reset_ops,
struct ata_link **r_failed_disk);
extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index ab38871b5e00..23fff10af2ac 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -216,7 +216,7 @@ static struct ata_port_operations pacpi_ops = {
.mode_filter = pacpi_mode_filter,
.set_piomode = pacpi_set_piomode,
.set_dmamode = pacpi_set_dmamode,
- .prereset = pacpi_pre_reset,
+ .reset.prereset = pacpi_pre_reset,
.port_start = pacpi_port_start,
};
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index bb790edd6036..9d5cb9c34c52 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -392,11 +392,11 @@ static struct ata_port_operations ali_20_port_ops = {
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
- .inherits = &ali_dma_base_ops,
- .check_atapi_dma = ali_check_atapi_dma,
- .cable_detect = ali_c2_cable_detect,
- .dev_config = ali_lock_sectors,
- .postreset = ali_c2_c3_postreset,
+ .inherits = &ali_dma_base_ops,
+ .check_atapi_dma = ali_check_atapi_dma,
+ .cable_detect = ali_c2_cable_detect,
+ .dev_config = ali_lock_sectors,
+ .reset.postreset = ali_c2_c3_postreset,
};
/*
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 5b02b89748b7..a2fecadc927d 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -394,7 +394,7 @@ static const struct scsi_host_template amd_sht = {
static const struct ata_port_operations amd_base_port_ops = {
.inherits = &ata_bmdma32_port_ops,
- .prereset = amd_pre_reset,
+ .reset.prereset = amd_pre_reset,
};
static struct ata_port_operations amd33_port_ops = {
@@ -429,7 +429,7 @@ static const struct ata_port_operations nv_base_port_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = ata_cable_ignore,
.mode_filter = nv_mode_filter,
- .prereset = nv_pre_reset,
+ .reset.prereset = nv_pre_reset,
.host_stop = nv_host_stop,
};
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 40544282f455..6160414172a3 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -301,7 +301,7 @@ static struct ata_port_operations artop6210_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
@@ -310,7 +310,7 @@ static struct ata_port_operations artop6260_ops = {
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
- .prereset = artop62x0_pre_reset,
+ .reset.prereset = artop62x0_pre_reset,
};
static void atp8xx_fixup(struct pci_dev *pdev)
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 8c5cc803aab3..4c612f9543f6 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -264,7 +264,7 @@ static struct ata_port_operations atiixp_port_ops = {
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
- .prereset = atiixp_prereset,
+ .reset.prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index 2e6eccf2902f..6fe49b303fee 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -243,7 +243,7 @@ static struct ata_port_operations efar_ops = {
.cable_detect = efar_cable_detect,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
- .prereset = efar_pre_reset,
+ .reset.prereset = efar_pre_reset,
};
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index e8cda988feb5..b2b9e0058333 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -879,8 +879,8 @@ static const struct scsi_host_template ep93xx_pata_sht = {
static struct ata_port_operations ep93xx_pata_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = ep93xx_pata_softreset,
- .hardreset = ATA_OP_NULL,
+ .reset.softreset = ep93xx_pata_softreset,
+ .reset.hardreset = ATA_OP_NULL,
.sff_dev_select = ep93xx_pata_dev_select,
.sff_set_devctl = ep93xx_pata_set_devctl,
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 5280e9960025..b96e8bd2a3f8 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -322,7 +322,7 @@ static const struct scsi_host_template hpt36x_sht = {
static struct ata_port_operations hpt366_port_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = hpt366_prereset,
+ .reset.prereset = hpt366_prereset,
.cable_detect = hpt36x_cable_detect,
.mode_filter = hpt366_filter,
.set_piomode = hpt366_set_piomode,
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4af22b819416..07e3a984cbb1 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -543,7 +543,7 @@ static struct ata_port_operations hpt370_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
@@ -567,7 +567,7 @@ static struct ata_port_operations hpt302_port_ops = {
.cable_detect = hpt37x_cable_detect,
.set_piomode = hpt37x_set_piomode,
.set_dmamode = hpt37x_set_dmamode,
- .prereset = hpt37x_pre_reset,
+ .reset.prereset = hpt37x_pre_reset,
};
/*
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 5b1ecccf3c83..2cc57fcf2c46 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -356,7 +356,7 @@ static struct ata_port_operations hpt3xxn_port_ops = {
.cable_detect = hpt3x2n_cable_detect,
.set_piomode = hpt3x2n_set_piomode,
.set_dmamode = hpt3x2n_set_dmamode,
- .prereset = hpt3x2n_pre_reset,
+ .reset.prereset = hpt3x2n_pre_reset,
};
/*
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 61d8760f09d9..70f056e47e6b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -336,7 +336,7 @@ static struct ata_port_operations pata_icside_port_ops = {
.cable_detect = ata_cable_40wire,
.set_dmamode = pata_icside_set_dmamode,
- .postreset = pata_icside_postreset,
+ .reset.postreset = pata_icside_postreset,
.port_start = ATA_OP_NULL, /* don't need PRD table */
};
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 9cbe2132ce59..a6f2cfc1602e 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -238,7 +238,7 @@ static struct ata_port_operations it8213_ops = {
.cable_detect = it8213_cable_detect,
.set_piomode = it8213_set_piomode,
.set_dmamode = it8213_set_dmamode,
- .prereset = it8213_pre_reset,
+ .reset.prereset = it8213_pre_reset,
};
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
index f51fb8219762..b885f33e8980 100644
--- a/drivers/ata/pata_jmicron.c
+++ b/drivers/ata/pata_jmicron.c
@@ -113,7 +113,7 @@ static const struct scsi_host_template jmicron_sht = {
static struct ata_port_operations jmicron_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = jmicron_pre_reset,
+ .reset.prereset = jmicron_pre_reset,
};
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 8119caaad605..deab67328388 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -99,7 +99,7 @@ static const struct scsi_host_template marvell_sht = {
static struct ata_port_operations marvell_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = marvell_cable_detect,
- .prereset = marvell_pre_reset,
+ .reset.prereset = marvell_pre_reset,
};
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index 69e4baf27d72..ce310ae7c93a 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -145,7 +145,7 @@ static struct ata_port_operations mpiix_port_ops = {
.qc_issue = mpiix_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = mpiix_set_piomode,
- .prereset = mpiix_pre_reset,
+ .reset.prereset = mpiix_pre_reset,
.sff_data_xfer = ata_sff_data_xfer32,
};
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 44cc24d21d5f..bdb55c1a3280 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -123,7 +123,7 @@ static struct ata_port_operations ns87410_port_ops = {
.qc_issue = ns87410_qc_issue,
.cable_detect = ata_cable_40wire,
.set_piomode = ns87410_set_piomode,
- .prereset = ns87410_pre_reset,
+ .reset.prereset = ns87410_pre_reset,
};
static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 2d32125c16fd..df42ebe98db7 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -941,7 +941,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
/* 16 bit but not True IDE */
base = cs0 + 0x800;
octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
- octeon_cf_ops.softreset = octeon_cf_softreset16;
+ octeon_cf_ops.reset.softreset = octeon_cf_softreset16;
octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index 3d01b7000e41..81a7f3eb5654 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -214,7 +214,7 @@ static struct ata_port_operations oldpiix_pata_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = oldpiix_set_piomode,
.set_dmamode = oldpiix_set_dmamode,
- .prereset = oldpiix_pre_reset,
+ .reset.prereset = oldpiix_pre_reset,
};
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 3d23f57eb128..3db1b95d1404 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -156,7 +156,7 @@ static struct ata_port_operations opti_port_ops = {
.inherits = &ata_sff_port_ops,
.cable_detect = ata_cable_40wire,
.set_piomode = opti_set_piomode,
- .prereset = opti_pre_reset,
+ .reset.prereset = opti_pre_reset,
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index dfc36b4ec9c6..b42dba5f4e05 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -322,7 +322,9 @@ static int optidma_set_mode(struct ata_link *link, struct ata_device **r_failed)
u8 r;
int nybble = 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int rc = ata_do_set_mode(link, r_failed);
+ int rc;
+
+ rc = ata_set_mode(link, r_failed);
if (rc == 0) {
pci_read_config_byte(pdev, 0x43, &r);
@@ -344,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
.set_piomode = optidma_set_pio_mode,
.set_dmamode = optidma_set_dma_mode,
.set_mode = optidma_set_mode,
- .prereset = optidma_pre_reset,
+ .reset.prereset = optidma_pre_reset,
};
static struct ata_port_operations optiplus_port_ops = {
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
index 93ebf566b54e..22bd3ff6b7ae 100644
--- a/drivers/ata/pata_parport/pata_parport.c
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -321,8 +321,8 @@ static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
static struct ata_port_operations pata_parport_port_ops = {
.inherits = &ata_sff_port_ops,
- .softreset = pata_parport_softreset,
- .hardreset = NULL,
+ .reset.softreset = pata_parport_softreset,
+ .reset.hardreset = NULL,
.sff_dev_select = pata_parport_dev_select,
.sff_set_devctl = pata_parport_set_devctl,
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 5b602206c522..cf3810933a27 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -46,7 +46,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
struct ata_device *slave = &link->device[1];
if (!ata_dev_enabled(master) || !ata_dev_enabled(slave))
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
if (memcmp(master->id + ATA_ID_FW_REV, slave->id + ATA_ID_FW_REV,
ATA_ID_FW_REV_LEN + ATA_ID_PROD_LEN) == 0) {
@@ -58,7 +58,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
ata_dev_disable(slave);
}
}
- return ata_do_set_mode(link, r_failed_dev);
+ return ata_set_mode(link, r_failed_dev);
}
/**
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 6820c5597b14..d792ce6d97bf 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -130,7 +130,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
.inherits = &ata_bmdma_port_ops,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.cable_detect = pdc2027x_cable_detect,
- .prereset = pdc2027x_prereset,
+ .reset.prereset = pdc2027x_prereset,
};
static struct ata_port_operations pdc2027x_pata133_ops = {
@@ -387,7 +387,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
struct ata_device *dev;
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc < 0)
return rc;
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 09792aac7f9d..6ff4c11e937d 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -276,7 +276,7 @@ static struct ata_port_operations rdc_pata_ops = {
.cable_detect = rdc_pata_cable_detect,
.set_piomode = rdc_set_piomode,
.set_dmamode = rdc_set_dmamode,
- .prereset = rdc_pata_prereset,
+ .reset.prereset = rdc_pata_prereset,
};
static const struct ata_port_info rdc_port_info = {
@@ -359,8 +359,8 @@ static void rdc_remove_one(struct pci_dev *pdev)
}
static const struct pci_device_id rdc_pci_tbl[] = {
- { PCI_DEVICE(0x17F3, 0x1011), },
- { PCI_DEVICE(0x17F3, 0x1012), },
+ { PCI_VDEVICE(RDC, 0x1011) },
+ { PCI_VDEVICE(RDC, 0x1012) },
{ } /* terminate list */
};
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index 31de06b66221..2b751e393771 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -552,7 +552,7 @@ static struct ata_port_operations sis_133_for_sata_ops = {
static struct ata_port_operations sis_base_ops = {
.inherits = &ata_bmdma_port_ops,
- .prereset = sis_pre_reset,
+ .reset.prereset = sis_pre_reset,
};
static struct ata_port_operations sis_133_ops = {
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 93882e976ede..2d24c6b3e9d9 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -248,7 +248,7 @@ static struct ata_port_operations sl82c105_port_ops = {
.bmdma_stop = sl82c105_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = sl82c105_set_piomode,
- .prereset = sl82c105_pre_reset,
+ .reset.prereset = sl82c105_pre_reset,
.sff_irq_check = sl82c105_sff_irq_check,
};
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index 26d448a869e2..596e86a031b3 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -170,7 +170,7 @@ static struct ata_port_operations triflex_port_ops = {
.bmdma_stop = triflex_bmdma_stop,
.cable_detect = ata_cable_40wire,
.set_piomode = triflex_set_piomode,
- .prereset = triflex_prereset,
+ .reset.prereset = triflex_prereset,
};
static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index bb80e7800dcb..a8c9cf685b4b 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -451,7 +451,7 @@ static struct ata_port_operations via_port_ops = {
.cable_detect = via_cable_detect,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
- .prereset = via_pre_reset,
+ .reset.prereset = via_pre_reset,
.sff_tf_load = via_tf_load,
.port_start = via_port_start,
.mode_filter = via_mode_filter,
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 8e6b2599f0d5..17a5a59861c3 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -140,7 +140,7 @@ static struct ata_port_operations adma_ata_ops = {
.freeze = adma_freeze,
.thaw = adma_thaw,
- .prereset = adma_prereset,
+ .reset.prereset = adma_prereset,
.port_start = adma_port_start,
.port_stop = adma_port_stop,
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 6e1dd0d9c035..7a4f59202156 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1097,7 +1097,7 @@ static struct ata_port_operations sata_dwc_ops = {
.inherits = &ata_sff_port_ops,
.error_handler = sata_dwc_error_handler,
- .hardreset = sata_dwc_hardreset,
+ .reset.hardreset = sata_dwc_hardreset,
.qc_issue = sata_dwc_qc_issue,
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 87e91a937a44..84da8d6ef28e 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1395,9 +1395,9 @@ static struct ata_port_operations sata_fsl_ops = {
.freeze = sata_fsl_freeze,
.thaw = sata_fsl_thaw,
- .softreset = sata_fsl_softreset,
- .hardreset = sata_fsl_hardreset,
- .pmp_softreset = sata_fsl_softreset,
+ .reset.softreset = sata_fsl_softreset,
+ .reset.hardreset = sata_fsl_hardreset,
+ .pmp_reset.softreset = sata_fsl_softreset,
.error_handler = sata_fsl_error_handler,
.post_internal_cmd = sata_fsl_post_internal_cmd,
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index c8c817c51230..3421039f4bae 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -428,7 +428,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
static struct ata_port_operations ahci_highbank_ops = {
.inherits = &ahci_ops,
- .hardreset = ahci_highbank_hardreset,
+ .reset.hardreset = ahci_highbank_hardreset,
.transmit_led_message = ecx_transmit_led_message,
};
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index db9c255dc9f2..46a8c20daf18 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -730,7 +730,7 @@ static struct ata_port_operations inic_port_ops = {
.freeze = inic_freeze,
.thaw = inic_thaw,
- .hardreset = inic_hardreset,
+ .reset.hardreset = inic_hardreset,
.error_handler = inic_error_handler,
.post_internal_cmd = inic_post_internal_cmd,
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bcbf96867f89..ffb396f61731 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -687,7 +687,7 @@ static struct ata_port_operations mv5_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
+ .reset.hardreset = mv_hardreset,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
@@ -709,10 +709,10 @@ static struct ata_port_operations mv6_ops = {
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
- .hardreset = mv_hardreset,
- .softreset = mv_softreset,
- .pmp_hardreset = mv_pmp_hardreset,
- .pmp_softreset = mv_softreset,
+ .reset.hardreset = mv_hardreset,
+ .reset.softreset = mv_softreset,
+ .pmp_reset.hardreset = mv_pmp_hardreset,
+ .pmp_reset.softreset = mv_softreset,
.error_handler = mv_pmp_error_handler,
.scr_read = mv_scr_read,
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f36e2915ccf1..841e7de2bba6 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -462,7 +462,7 @@ static struct ata_port_operations nv_generic_ops = {
.lost_interrupt = ATA_OP_NULL,
.scr_read = nv_scr_read,
.scr_write = nv_scr_write,
- .hardreset = nv_hardreset,
+ .reset.hardreset = nv_hardreset,
};
static struct ata_port_operations nv_nf2_ops = {
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 2df1a070b25a..2a005aede123 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -188,7 +188,7 @@ static struct ata_port_operations pdc_sata_ops = {
.scr_read = pdc_sata_scr_read,
.scr_write = pdc_sata_scr_write,
.port_start = pdc_sata_port_start,
- .hardreset = pdc_sata_hardreset,
+ .reset.hardreset = pdc_sata_hardreset,
};
/* First-generation chips need a more restrictive ->check_atapi_dma op,
@@ -206,7 +206,7 @@ static struct ata_port_operations pdc_pata_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
.port_start = pdc_common_port_start,
- .softreset = pdc_pata_softreset,
+ .reset.softreset = pdc_pata_softreset,
};
static const struct ata_port_info pdc_port_info[] = {
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index 8a6286159044..cfb9b5b61cd7 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -123,8 +123,8 @@ static struct ata_port_operations qs_ata_ops = {
.freeze = qs_freeze,
.thaw = qs_thaw,
- .prereset = qs_prereset,
- .softreset = ATA_OP_NULL,
+ .reset.prereset = qs_prereset,
+ .reset.softreset = ATA_OP_NULL,
.error_handler = qs_error_handler,
.lost_interrupt = ATA_OP_NULL,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 22820a02d740..487eadd4073f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -624,7 +624,7 @@ static struct ata_port_operations sata_rcar_port_ops = {
.freeze = sata_rcar_freeze,
.thaw = sata_rcar_thaw,
- .softreset = sata_rcar_softreset,
+ .reset.softreset = sata_rcar_softreset,
.scr_read = sata_rcar_scr_read,
.scr_write = sata_rcar_scr_write,
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 3a99f66198a9..1b6dc950a42a 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -351,7 +351,7 @@ static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed)
u32 tmp, dev_mode[2] = { };
int rc;
- rc = ata_do_set_mode(link, r_failed);
+ rc = ata_set_mode(link, r_failed);
if (rc)
return rc;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 87f4cde6a686..d642ece9f07a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -393,10 +393,10 @@ static struct ata_port_operations sil24_ops = {
.freeze = sil24_freeze,
.thaw = sil24_thaw,
- .softreset = sil24_softreset,
- .hardreset = sil24_hardreset,
- .pmp_softreset = sil24_softreset,
- .pmp_hardreset = sil24_pmp_hardreset,
+ .reset.softreset = sil24_softreset,
+ .reset.hardreset = sil24_hardreset,
+ .pmp_reset.softreset = sil24_softreset,
+ .pmp_reset.hardreset = sil24_pmp_hardreset,
.error_handler = sil24_error_handler,
.post_internal_cmd = sil24_post_internal_cmd,
.dev_config = sil24_dev_config,
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 598a872f6a08..c5d6aa36c9c3 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -340,8 +340,8 @@ static const struct scsi_host_template k2_sata_sht = {
static struct ata_port_operations k2_sata_ops = {
.inherits = &ata_bmdma_port_ops,
- .softreset = k2_sata_softreset,
- .hardreset = k2_sata_hardreset,
+ .reset.softreset = k2_sata_softreset,
+ .reset.hardreset = k2_sata_hardreset,
.sff_tf_load = k2_sata_tf_load,
.sff_tf_read = k2_sata_tf_read,
.sff_check_status = k2_stat_check_status,
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index f7f5131af937..0986ebd1eb4e 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -241,7 +241,7 @@ static struct ata_port_operations pdc_20621_ops = {
.freeze = pdc_freeze,
.thaw = pdc_thaw,
- .softreset = pdc_softreset,
+ .reset.softreset = pdc_softreset,
.error_handler = pdc_error_handler,
.lost_interrupt = ATA_OP_NULL,
.post_internal_cmd = pdc_post_internal_cmd,
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 52894ff49dcb..44985796cc47 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -67,7 +67,7 @@ static struct ata_port_operations uli_ops = {
.inherits = &ata_bmdma_port_ops,
.scr_read = uli_scr_read,
.scr_write = uli_scr_write,
- .hardreset = ATA_OP_NULL,
+ .reset.hardreset = ATA_OP_NULL,
};
static const struct ata_port_info uli_port_info = {
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 4ecd8f33b082..68e9003ec2d4 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -120,7 +120,7 @@ static struct ata_port_operations svia_base_ops = {
static struct ata_port_operations vt6420_sata_ops = {
.inherits = &svia_base_ops,
.freeze = svia_noop_freeze,
- .prereset = vt6420_prereset,
+ .reset.prereset = vt6420_prereset,
.bmdma_start = vt6420_bmdma_start,
};
@@ -140,7 +140,7 @@ static struct ata_port_operations vt6421_sata_ops = {
static struct ata_port_operations vt8251_ops = {
.inherits = &svia_base_ops,
- .hardreset = sata_std_hardreset,
+ .reset.hardreset = sata_std_hardreset,
.scr_read = vt8251_scr_read,
.scr_write = vt8251_scr_write,
};
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index dba7c8e13a53..44cd3f85b659 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -217,7 +217,7 @@ static int auxiliary_bus_probe(struct device *dev)
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b526e0e0f52d..13ab98e033ea 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -25,6 +25,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/async.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/devinfo.h>
#include <linux/slab.h>
@@ -552,6 +553,7 @@ static void device_unbind_cleanup(struct device *dev)
dev->dma_range_map = NULL;
device_set_driver(dev, NULL);
dev_set_drvdata(dev, NULL);
+ dev_pm_domain_detach(dev, dev->power.detach_power_off);
if (dev->pm_domain && dev->pm_domain->dismiss)
dev->pm_domain->dismiss(dev);
pm_runtime_reinit(dev);
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 44486b2c7172..6942c62fa59d 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -822,26 +822,6 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name,
{}
#endif
-/*
- * Reject firmware file names with ".." path components.
- * There are drivers that construct firmware file names from device-supplied
- * strings, and we don't want some device to be able to tell us "I would like to
- * be sent my firmware from ../../../etc/shadow, please".
- *
- * Search for ".." surrounded by either '/' or start/end of string.
- *
- * This intentionally only looks at the firmware name, not at the firmware base
- * directory or at symlink contents.
- */
-static bool name_contains_dotdot(const char *name)
-{
- size_t name_len = strlen(name);
-
- return strcmp(name, "..") == 0 || strncmp(name, "../", 3) == 0 ||
- strstr(name, "/../") != NULL ||
- (name_len >= 3 && strcmp(name+name_len-3, "/..") == 0);
-}
-
/* called from request_firmware() and request_firmware_work_func() */
static int
_request_firmware(const struct firmware **firmware_p, const char *name,
@@ -862,6 +842,17 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
+
+ /*
+ * Reject firmware file names with ".." path components.
+ * There are drivers that construct firmware file names from
+ * device-supplied strings, and we don't want some device to be
+ * able to tell us "I would like to be sent my firmware from
+ * ../../../etc/shadow, please".
+ *
+ * This intentionally only looks at the firmware name, not at
+ * the firmware base directory or at symlink contents.
+ */
if (name_contains_dotdot(name)) {
dev_warn(device,
"Firmware load for '%s' refused, path contains '..' component\n",
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 075ec1d1b73a..09450349cf32 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1396,15 +1396,13 @@ static int platform_probe(struct device *_dev)
if (ret < 0)
return ret;
- ret = dev_pm_domain_attach(_dev, true);
+ ret = dev_pm_domain_attach(_dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
goto out;
- if (drv->probe) {
+ if (drv->probe)
ret = drv->probe(dev);
- if (ret)
- dev_pm_domain_detach(_dev, true);
- }
out:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -1422,7 +1420,6 @@ static void platform_remove(struct device *_dev)
if (drv->remove)
drv->remove(dev);
- dev_pm_domain_detach(_dev, true);
}
static void platform_shutdown(struct device *_dev)
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 781968a128ff..6ecf9ce4a4e6 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
/**
* dev_pm_domain_attach - Attach a device to its PM domain.
* @dev: Device to attach.
- * @power_on: Used to indicate whether we should power on the device.
+ * @flags: indicate whether we should power on/off the device on attach/detach
*
* The @dev may only be attached to a single PM domain. By iterating through
* the available alternatives we try to find a valid PM domain for the device.
@@ -100,17 +100,20 @@ EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
* Returns 0 on successfully attached PM domain, or when it is found that the
* device doesn't need a PM domain, else a negative error code.
*/
-int dev_pm_domain_attach(struct device *dev, bool power_on)
+int dev_pm_domain_attach(struct device *dev, u32 flags)
{
int ret;
if (dev->pm_domain)
return 0;
- ret = acpi_dev_pm_attach(dev, power_on);
+ ret = acpi_dev_pm_attach(dev, !!(flags & PD_FLAG_ATTACH_POWER_ON));
if (!ret)
ret = genpd_dev_pm_attach(dev);
+ if (dev->pm_domain)
+ dev->power.detach_power_off = !!(flags & PD_FLAG_DETACH_POWER_OFF);
+
return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a6ab666ef48a..8aa06d59a2ee 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -647,14 +647,27 @@ static void dpm_async_resume_children(struct device *dev, async_func_t func)
/*
* Start processing "async" children of the device unless it's been
* started already for them.
- *
- * This could have been done for the device's "async" consumers too, but
- * they either need to wait for their parents or the processing has
- * already started for them after their parents were processed.
*/
device_for_each_child(dev, func, dpm_async_with_cleanup);
}
+static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ dpm_async_resume_children(dev, func);
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" consumers. */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->consumer, func);
+
+ device_links_read_unlock(idx);
+}
+
static void dpm_clear_async_state(struct device *dev)
{
reinit_completion(&dev->power.completion);
@@ -663,7 +676,14 @@ static void dpm_clear_async_state(struct device *dev)
static bool dpm_root_device(struct device *dev)
{
- return !dev->parent;
+ lockdep_assert_held(&dpm_list_mtx);
+
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return !dev->parent && list_empty(&dev->links.suppliers);
}
static void async_resume_noirq(void *data, async_cookie_t cookie);
@@ -747,12 +767,12 @@ Out:
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
}
- dpm_async_resume_children(dev, async_resume_noirq);
+ dpm_async_resume_subordinate(dev, async_resume_noirq);
}
static void async_resume_noirq(void *data, async_cookie_t cookie)
@@ -804,7 +824,7 @@ static void dpm_noirq_resume_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "noirq");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
@@ -890,12 +910,12 @@ Out:
complete_all(&dev->power.completion);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async early" : " early", error);
}
- dpm_async_resume_children(dev, async_resume_early);
+ dpm_async_resume_subordinate(dev, async_resume_early);
}
static void async_resume_early(void *data, async_cookie_t cookie)
@@ -951,7 +971,7 @@ void dpm_resume_early(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, "early");
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME_EARLY);
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
@@ -1066,12 +1086,12 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
TRACE_RESUME(error);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
- dpm_async_resume_children(dev, async_resume);
+ dpm_async_resume_subordinate(dev, async_resume);
}
static void async_resume(void *data, async_cookie_t cookie)
@@ -1095,7 +1115,6 @@ void dpm_resume(pm_message_t state)
ktime_t starttime = ktime_get();
trace_suspend_resume(TPS("dpm_resume"), state.event, true);
- might_sleep();
pm_transition = state;
async_error = 0;
@@ -1131,7 +1150,7 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, 0, NULL);
- if (async_error)
+ if (READ_ONCE(async_error))
dpm_save_failed_step(SUSPEND_RESUME);
cpufreq_resume();
@@ -1198,7 +1217,6 @@ void dpm_complete(pm_message_t state)
struct list_head list;
trace_suspend_resume(TPS("dpm_complete"), state.event, true);
- might_sleep();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
@@ -1258,10 +1276,15 @@ static bool dpm_leaf_device(struct device *dev)
return false;
}
- return true;
+ /*
+ * Since this function is required to run under dpm_list_mtx, the
+ * list_empty() below will only return true if the device's list of
+ * consumers is actually empty before calling it.
+ */
+ return list_empty(&dev->links.consumers);
}
-static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
+static bool dpm_async_suspend_parent(struct device *dev, async_func_t func)
{
guard(mutex)(&dpm_list_mtx);
@@ -1273,11 +1296,47 @@ static void dpm_async_suspend_parent(struct device *dev, async_func_t func)
* deleted before it.
*/
if (!device_pm_initialized(dev))
- return;
+ return false;
/* Start processing the device's parent if it is "async". */
if (dev->parent)
dpm_async_with_cleanup(dev->parent, func);
+
+ return true;
+}
+
+static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
+{
+ struct device_link *link;
+ int idx;
+
+ if (!dpm_async_suspend_parent(dev, func))
+ return;
+
+ idx = device_links_read_lock();
+
+ /* Start processing the device's "async" suppliers. */
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_async_with_cleanup(link->supplier, func);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_async_suspend_complete_all(struct list_head *device_list)
+{
+ struct device *dev;
+
+ guard(mutex)(&async_wip_mtx);
+
+ list_for_each_entry_reverse(dev, device_list, power.entry) {
+ /*
+ * In case the device is being waited for and async processing
+ * has not started for it yet, let the waiters make progress.
+ */
+ if (!dev->power.work_in_progress)
+ complete_all(&dev->power.completion);
+ }
}
/**
@@ -1328,7 +1387,7 @@ static void async_suspend_noirq(void *data, async_cookie_t cookie);
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1339,7 +1398,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (dev->power.syscore || dev->power.direct_complete)
@@ -1372,7 +1431,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state, bool asy
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
@@ -1398,12 +1457,10 @@ Complete:
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- if (error || async_error)
- return error;
-
- dpm_async_suspend_parent(dev, async_suspend_noirq);
+ if (error || READ_ONCE(async_error))
+ return;
- return 0;
+ dpm_async_suspend_superior(dev, async_suspend_noirq);
}
static void async_suspend_noirq(void *data, async_cookie_t cookie)
@@ -1418,7 +1475,7 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
- int error = 0;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
@@ -1449,13 +1506,14 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state, false);
+ device_suspend_noirq(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error) {
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_late_early_list);
/*
* Move all devices to the target list to resume them
* properly.
@@ -1468,9 +1526,8 @@ static int dpm_noirq_suspend_devices(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
@@ -1525,7 +1582,7 @@ static void async_suspend_late(void *data, async_cookie_t cookie);
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state, bool async)
+static void device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1542,11 +1599,11 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (READ_ONCE(async_error))
goto Complete;
if (pm_wakeup_pending()) {
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
@@ -1580,7 +1637,7 @@ static int device_suspend_late(struct device *dev, pm_message_t state, bool asyn
Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
goto Complete;
@@ -1594,12 +1651,10 @@ Complete:
TRACE_SUSPEND(error);
complete_all(&dev->power.completion);
- if (error || async_error)
- return error;
-
- dpm_async_suspend_parent(dev, async_suspend_late);
+ if (error || READ_ONCE(async_error))
+ return;
- return 0;
+ dpm_async_suspend_superior(dev, async_suspend_late);
}
static void async_suspend_late(void *data, async_cookie_t cookie)
@@ -1618,7 +1673,7 @@ int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
- int error = 0;
+ int error;
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
@@ -1651,13 +1706,14 @@ int dpm_suspend_late(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state, false);
+ device_suspend_late(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error) {
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_suspended_list);
/*
* Move all devices to the target list to resume them
* properly.
@@ -1670,9 +1726,8 @@ int dpm_suspend_late(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error) {
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
@@ -1761,7 +1816,7 @@ static void async_suspend(void *data, async_cookie_t cookie);
* @state: PM transition of the system being carried out.
* @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state, bool async)
+static void device_suspend(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
const char *info = NULL;
@@ -1773,7 +1828,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error) {
+ if (READ_ONCE(async_error)) {
dev->power.direct_complete = false;
goto Complete;
}
@@ -1793,7 +1848,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
- async_error = -EBUSY;
+ WRITE_ONCE(async_error, -EBUSY);
goto Complete;
}
@@ -1877,7 +1932,7 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
if (error) {
- async_error = error;
+ WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async" : "", error);
}
@@ -1885,12 +1940,10 @@ static int device_suspend(struct device *dev, pm_message_t state, bool async)
complete_all(&dev->power.completion);
TRACE_SUSPEND(error);
- if (error || async_error)
- return error;
-
- dpm_async_suspend_parent(dev, async_suspend);
+ if (error || READ_ONCE(async_error))
+ return;
- return 0;
+ dpm_async_suspend_superior(dev, async_suspend);
}
static void async_suspend(void *data, async_cookie_t cookie)
@@ -1909,7 +1962,7 @@ int dpm_suspend(pm_message_t state)
{
ktime_t starttime = ktime_get();
struct device *dev;
- int error = 0;
+ int error;
trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
might_sleep();
@@ -1944,13 +1997,14 @@ int dpm_suspend(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state, false);
+ device_suspend(dev, state, false);
put_device(dev);
mutex_lock(&dpm_list_mtx);
- if (error || async_error) {
+ if (READ_ONCE(async_error)) {
+ dpm_async_suspend_complete_all(&dpm_prepared_list);
/*
* Move all devices to the target list to resume them
* properly.
@@ -1963,9 +2017,8 @@ int dpm_suspend(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- if (!error)
- error = async_error;
+ error = READ_ONCE(async_error);
if (error)
dpm_save_failed_step(SUSPEND_SUSPEND);
@@ -2110,7 +2163,6 @@ int dpm_prepare(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
- might_sleep();
/*
* Give a chance for the known devices to complete their probes, before
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index c55a7c70bc1a..f835b85bd4d9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -19,10 +19,24 @@
typedef int (*pm_callback_t)(struct device *);
+static inline pm_callback_t get_callback_ptr(const void *start, size_t offset)
+{
+ return *(pm_callback_t *)(start + offset);
+}
+
+static pm_callback_t __rpm_get_driver_callback(struct device *dev,
+ size_t cb_offset)
+{
+ if (dev->driver && dev->driver->pm)
+ return get_callback_ptr(dev->driver->pm, cb_offset);
+
+ return NULL;
+}
+
static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
{
- pm_callback_t cb;
const struct dev_pm_ops *ops;
+ pm_callback_t cb = NULL;
if (dev->pm_domain)
ops = &dev->pm_domain->ops;
@@ -36,12 +50,10 @@ static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
ops = NULL;
if (ops)
- cb = *(pm_callback_t *)((void *)ops + cb_offset);
- else
- cb = NULL;
+ cb = get_callback_ptr(ops, cb_offset);
- if (!cb && dev->driver && dev->driver->pm)
- cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
+ if (!cb)
+ cb = __rpm_get_driver_callback(dev, cb_offset);
return cb;
}
@@ -1191,10 +1203,12 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
*
* Return -EINVAL if runtime PM is disabled for @dev.
*
- * Otherwise, if the runtime PM status of @dev is %RPM_ACTIVE and either
- * @ign_usage_count is %true or the runtime PM usage counter of @dev is not
- * zero, increment the usage counter of @dev and return 1. Otherwise, return 0
- * without changing the usage counter.
+ * Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
+ * is set, or (2) @dev is not ignoring children and its active child count is
+ * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * the usage counter of @dev and return 1.
+ *
+ * Otherwise, return 0 without changing the usage counter.
*
* If @ign_usage_count is %true, this function can be used to prevent suspending
* the device when its runtime PM status is %RPM_ACTIVE.
@@ -1216,7 +1230,8 @@ static int pm_runtime_get_conditional(struct device *dev, bool ign_usage_count)
retval = -EINVAL;
} else if (dev->power.runtime_status != RPM_ACTIVE) {
retval = 0;
- } else if (ign_usage_count) {
+ } else if (ign_usage_count || (!dev->power.ignore_children &&
+ atomic_read(&dev->power.child_count) > 0)) {
retval = 1;
atomic_inc(&dev->power.usage_count);
} else {
@@ -1249,10 +1264,16 @@ EXPORT_SYMBOL_GPL(pm_runtime_get_if_active);
* @dev: Target device.
*
* Increment the runtime PM usage counter of @dev if its runtime PM status is
- * %RPM_ACTIVE and its runtime PM usage counter is greater than 0, in which case
- * it returns 1. If the device is in a different state or its usage_count is 0,
- * 0 is returned. -EINVAL is returned if runtime PM is disabled for the device,
- * in which case also the usage_count will remain unmodified.
+ * %RPM_ACTIVE and its runtime PM usage counter is greater than 0 or it is not
+ * ignoring children and its active child count is nonzero. 1 is returned in
+ * this case.
+ *
+ * If @dev is in a different state or it is not in use (that is, its usage
+ * counter is 0, or it is ignoring children, or its active child count is 0),
+ * 0 is returned.
+ *
+ * -EINVAL is returned if runtime PM is disabled for the device, in which case
+ * also the usage counter of @dev is not updated.
*/
int pm_runtime_get_if_in_use(struct device *dev)
{
@@ -1827,7 +1848,7 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
- dev->power.needs_force_resume = 0;
+ dev->power.needs_force_resume = false;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@@ -1854,6 +1875,11 @@ void pm_runtime_reinit(struct device *dev)
pm_runtime_put(dev->parent);
}
}
+ /*
+ * Clear power.needs_force_resume in case it has been set by
+ * pm_runtime_force_suspend() invoked from a driver remove callback.
+ */
+ dev->power.needs_force_resume = false;
}
/**
@@ -1941,13 +1967,23 @@ void pm_runtime_drop_link(struct device_link *link)
pm_request_idle(link->supplier);
}
-bool pm_runtime_need_not_resume(struct device *dev)
+static pm_callback_t get_callback(struct device *dev, size_t cb_offset)
{
- return atomic_read(&dev->power.usage_count) <= 1 &&
- (atomic_read(&dev->power.child_count) == 0 ||
- dev->power.ignore_children);
+ /*
+ * Setting power.strict_midlayer means that the middle layer
+ * code does not want its runtime PM callbacks to be invoked via
+ * pm_runtime_force_suspend() and pm_runtime_force_resume(), so
+ * return a direct pointer to the driver callback in that case.
+ */
+ if (dev_pm_strict_midlayer_is_set(dev))
+ return __rpm_get_driver_callback(dev, cb_offset);
+
+ return __rpm_get_callback(dev, cb_offset);
}
+#define GET_CALLBACK(dev, callback) \
+ get_callback(dev, offsetof(struct dev_pm_ops, callback))
+
/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
@@ -1964,10 +2000,6 @@ bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
- *
- * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
- * state where this function has called the ->runtime_suspend callback but the
- * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
@@ -1975,10 +2007,10 @@ int pm_runtime_force_suspend(struct device *dev)
int ret;
pm_runtime_disable(dev);
- if (pm_runtime_status_suspended(dev))
+ if (pm_runtime_status_suspended(dev) || dev->power.needs_force_resume)
return 0;
- callback = RPM_GET_CALLBACK(dev, runtime_suspend);
+ callback = GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq_check(dev, true);
ret = callback ? callback(dev) : 0;
@@ -1990,15 +2022,16 @@ int pm_runtime_force_suspend(struct device *dev)
/*
* If the device can stay in suspend after the system-wide transition
* to the working state that will follow, drop the children counter of
- * its parent, but set its status to RPM_SUSPENDED anyway in case this
- * function will be called again for it in the meantime.
+ * its parent and the usage counters of its suppliers. Otherwise, set
+ * power.needs_force_resume to let pm_runtime_force_resume() know that
+ * the device needs to be taken care of and to prevent this function
+ * from handling the device again in case the device is passed to it
+ * once more subsequently.
*/
- if (pm_runtime_need_not_resume(dev)) {
+ if (pm_runtime_need_not_resume(dev))
pm_runtime_set_suspended(dev);
- } else {
- __update_runtime_status(dev, RPM_SUSPENDED);
- dev->power.needs_force_resume = 1;
- }
+ else
+ dev->power.needs_force_resume = true;
return 0;
@@ -2009,33 +2042,37 @@ err:
}
EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+#ifdef CONFIG_PM_SLEEP
+
/**
* pm_runtime_force_resume - Force a device into resume state if needed.
* @dev: Device to resume.
*
- * Prior invoking this function we expect the user to have brought the device
- * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
- * those actions and bring the device into full power, if it is expected to be
- * used on system resume. In the other case, we defer the resume to be managed
- * via runtime PM.
+ * This function expects that either pm_runtime_force_suspend() has put the
+ * device into a low-power state prior to calling it, or the device had been
+ * runtime-suspended before the preceding system-wide suspend transition and it
+ * was left in suspend during that transition.
*
- * Typically this function may be invoked from a system resume callback.
+ * The actions carried out by pm_runtime_force_suspend(), or by a runtime
+ * suspend in general, are reversed and the device is brought back into full
+ * power if it is expected to be used on system resume, which is the case when
+ * its needs_force_resume flag is set or when its smart_suspend flag is set and
+ * its runtime PM status is "active".
+ *
+ * In other cases, the resume is deferred to be managed via runtime PM.
+ *
+ * Typically, this function may be invoked from a system resume callback.
*/
int pm_runtime_force_resume(struct device *dev)
{
int (*callback)(struct device *);
int ret = 0;
- if (!dev->power.needs_force_resume)
+ if (!dev->power.needs_force_resume && (!dev_pm_smart_suspend(dev) ||
+ pm_runtime_status_suspended(dev)))
goto out;
- /*
- * The value of the parent's children counter is correct already, so
- * just update the status of the device.
- */
- __update_runtime_status(dev, RPM_ACTIVE);
-
- callback = RPM_GET_CALLBACK(dev, runtime_resume);
+ callback = GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq_check(dev, false);
ret = callback ? callback(dev) : 0;
@@ -2046,9 +2083,30 @@ int pm_runtime_force_resume(struct device *dev)
}
pm_runtime_mark_last_busy(dev);
+
out:
- dev->power.needs_force_resume = 0;
+ /*
+ * The smart_suspend flag can be cleared here because it is not going
+ * to be necessary until the next system-wide suspend transition that
+ * will update it again.
+ */
+ dev->power.smart_suspend = false;
+ /*
+ * Also clear needs_force_resume to make this function skip devices that
+ * have been seen by it once.
+ */
+ dev->power.needs_force_resume = false;
+
pm_runtime_enable(dev);
return ret;
}
EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
+
+bool pm_runtime_need_not_resume(struct device *dev)
+{
+ return atomic_read(&dev->power.usage_count) <= 1 &&
+ (atomic_read(&dev->power.child_count) == 0 ||
+ dev->power.ignore_children);
+}
+
+#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index fb84cda92a75..c9b4c04b1cf6 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -470,10 +470,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_only) {
@@ -486,7 +482,6 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
map->cache_only = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -517,10 +512,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
if (err)
return count;
- err = debugfs_file_get(file->f_path.dentry);
- if (err)
- return err;
-
map->lock(map->lock_arg);
if (new_val && !map->cache_bypass) {
@@ -532,7 +523,6 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
map->cache_bypass = new_val;
map->unlock(map->lock_arg);
- debugfs_file_put(file->f_path.dentry);
return count;
}
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 64ea340950b6..95c5bf2a78ee 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -736,7 +736,7 @@ static void stride(struct kunit *test)
}
}
-static struct regmap_range_cfg test_range = {
+static const struct regmap_range_cfg test_range = {
.selector_reg = 1,
.selector_mask = 0xff,
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f2843f814675..1f3f782a04ba 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1173,6 +1173,8 @@ err_name:
err_map:
kfree(map);
err:
+ if (bus && bus->free_on_exit)
+ kfree(bus);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(__regmap_init);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 0f70e2374e7f..df38fb364904 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -256,49 +256,6 @@ config BLK_DEV_RAM_SIZE
The default value is 4096 kilobytes. Only change this if you know
what you are doing.
-config CDROM_PKTCDVD
- tristate "Packet writing on CD/DVD media (DEPRECATED)"
- depends on !UML
- depends on SCSI
- select CDROM
- help
- Note: This driver is deprecated and will be removed from the
- kernel in the near future!
-
- If you have a CDROM/DVD drive that supports packet writing, say
- Y to include support. It should work with any MMC/Mt Fuji
- compliant ATAPI or SCSI drive, which is just about any newer
- DVD/CD writer.
-
- Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
- is possible.
- DVD-RW disks must be in restricted overwrite mode.
-
- See the file <file:Documentation/cdrom/packet-writing.rst>
- for further information on the use of this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called pktcdvd.
-
-config CDROM_PKTCDVD_BUFFERS
- int "Free buffers for data gathering"
- depends on CDROM_PKTCDVD
- default "8"
- help
- This controls the maximum number of active concurrent packets. More
- concurrent packets can increase write performance, but also require
- more memory. Each concurrent packet will require approximately 64Kb
- of non-swappable kernel memory, memory which will be allocated when
- a disc is opened for writing.
-
-config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching"
- depends on CDROM_PKTCDVD
- help
- If enabled, write caching will be set for the CD-R/W device. For now
- this option is dangerous unless the CD-RW media is known good, as we
- don't do deferred write error handling yet.
-
config ATA_OVER_ETH
tristate "ATA over Ethernet support"
depends on NET
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 097707aca725..a695ce74ef22 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_N64CART) += n64cart.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index e5a2e5f7887b..975024cf03c5 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -2500,7 +2500,11 @@ static int handle_write_conflicts(struct drbd_device *device,
peer_req->w.cb = superseded ? e_send_superseded :
e_send_retry_write;
list_add_tail(&peer_req->w.list, &device->done_ee);
- queue_work(connection->ack_sender, &peer_req->peer_device->send_acks_work);
+ /* put is in drbd_send_acks_wf() */
+ kref_get(&device->kref);
+ if (!queue_work(connection->ack_sender,
+ &peer_req->peer_device->send_acks_work))
+ kref_put(&device->kref, drbd_destroy_device);
err = -ENOENT;
goto out;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e97432032f01..24be0c2c4075 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3411,7 +3411,7 @@ static int fd_locked_ioctl(struct block_device *bdev, blk_mode_t mode,
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
- const void *outparam; /* parameters passed back to user space */
+ const void *outparam = NULL; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 500840e4a74e..1b6ee91f8eb9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -308,14 +308,13 @@ end_io:
static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
- struct loop_device *lo = rq->q->queuedata;
if (!atomic_dec_and_test(&cmd->ref))
return;
kfree(cmd->bvec);
cmd->bvec = NULL;
if (req_op(rq) == REQ_OP_WRITE)
- file_end_write(lo->lo_backing_file);
+ kiocb_end_write(&cmd->iocb);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
@@ -391,7 +390,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
}
if (rw == ITER_SOURCE) {
- file_start_write(lo->lo_backing_file);
+ kiocb_start_write(&cmd->iocb);
ret = file->f_op->write_iter(&cmd->iocb, &iter);
} else
ret = file->f_op->read_iter(&cmd->iocb, &iter);
@@ -1432,17 +1431,34 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
return 0;
}
-static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+static int loop_set_block_size(struct loop_device *lo, blk_mode_t mode,
+ struct block_device *bdev, unsigned long arg)
{
struct queue_limits lim;
unsigned int memflags;
int err = 0;
- if (lo->lo_state != Lo_bound)
- return -ENXIO;
+ /*
+ * If we don't hold exclusive handle for the device, upgrade to it
+ * here to avoid changing device under exclusive owner.
+ */
+ if (!(mode & BLK_OPEN_EXCL)) {
+ err = bd_prepare_to_claim(bdev, loop_set_block_size, NULL);
+ if (err)
+ return err;
+ }
+
+ err = mutex_lock_killable(&lo->lo_mutex);
+ if (err)
+ goto abort_claim;
+
+ if (lo->lo_state != Lo_bound) {
+ err = -ENXIO;
+ goto unlock;
+ }
if (lo->lo_queue->limits.logical_block_size == arg)
- return 0;
+ goto unlock;
sync_blockdev(lo->lo_device);
invalidate_bdev(lo->lo_device);
@@ -1455,6 +1471,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
+unlock:
+ mutex_unlock(&lo->lo_mutex);
+abort_claim:
+ if (!(mode & BLK_OPEN_EXCL))
+ bd_abort_claiming(bdev, loop_set_block_size);
return err;
}
@@ -1473,9 +1494,6 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
case LOOP_SET_DIRECT_IO:
err = loop_set_dio(lo, arg);
break;
- case LOOP_SET_BLOCK_SIZE:
- err = loop_set_block_size(lo, arg);
- break;
default:
err = -EINVAL;
}
@@ -1530,9 +1548,12 @@ static int lo_ioctl(struct block_device *bdev, blk_mode_t mode,
break;
case LOOP_GET_STATUS64:
return loop_get_status64(lo, argp);
+ case LOOP_SET_BLOCK_SIZE:
+ if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return loop_set_block_size(lo, mode, bdev, arg);
case LOOP_SET_CAPACITY:
case LOOP_SET_DIRECT_IO:
- case LOOP_SET_BLOCK_SIZE:
if (!(mode & BLK_OPEN_WRITE) && !capable(CAP_SYS_ADMIN))
return -EPERM;
fallthrough;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 66ce6b81c7d9..8fc7761397bd 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -2040,11 +2040,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* @dir Direction (read or write)
*
* return value
- * None
+ * 0 The IO completed successfully.
+ * -ENOMEM The DMA mapping failed.
*/
-static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
- struct mtip_cmd *command,
- struct blk_mq_hw_ctx *hctx)
+static int mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
+ struct mtip_cmd *command,
+ struct blk_mq_hw_ctx *hctx)
{
struct mtip_cmd_hdr *hdr =
dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
@@ -2056,12 +2057,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
unsigned int nents;
/* Map the scatter list for DMA access */
- nents = blk_rq_map_sg(rq, command->sg);
- nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
+ command->scatter_ents = blk_rq_map_sg(rq, command->sg);
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ command->scatter_ents, dma_dir);
+ if (!nents)
+ return -ENOMEM;
- prefetch(&port->flags);
- command->scatter_ents = nents;
+ prefetch(&port->flags);
/*
* The number of retries for this command before it is
@@ -2112,11 +2115,13 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) {
set_bit(rq->tag, port->cmds_to_issue);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
- return;
+ return 0;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, rq->tag);
+
+ return 0;
}
/*
@@ -3315,7 +3320,9 @@ static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- mtip_hw_submit_io(dd, rq, cmd, hctx);
+ if (mtip_hw_submit_io(dd, rq, cmd, hctx))
+ return BLK_STS_IOERR;
+
return BLK_STS_OK;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2592bd19ebc1..6463d0e8d0ce 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1473,7 +1473,17 @@ static int nbd_start_device(struct nbd_device *nbd)
return -EINVAL;
}
- blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
+retry:
+ mutex_unlock(&nbd->config_lock);
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections);
+ mutex_lock(&nbd->config_lock);
+
+ /* if another code path updated nr_hw_queues, retry until succeed */
+ if (num_connections != config->num_connections) {
+ num_connections = config->num_connections;
+ goto retry;
+ }
+
nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
deleted file mode 100644
index d5cc7bd2875c..000000000000
--- a/drivers/block/pktcdvd.c
+++ /dev/null
@@ -1,2916 +0,0 @@
-/*
- * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
- * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
- * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
- *
- * May be copied or modified under the terms of the GNU General Public
- * License. See linux/COPYING for more information.
- *
- * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
- * DVD-RAM devices.
- *
- * Theory of operation:
- *
- * At the lowest level, there is the standard driver for the CD/DVD device,
- * such as drivers/scsi/sr.c. This driver can handle read and write requests,
- * but it doesn't know anything about the special restrictions that apply to
- * packet writing. One restriction is that write requests must be aligned to
- * packet boundaries on the physical media, and the size of a write request
- * must be equal to the packet size. Another restriction is that a
- * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
- * command, if the previous command was a write.
- *
- * The purpose of the packet writing driver is to hide these restrictions from
- * higher layers, such as file systems, and present a block device that can be
- * randomly read and written using 2kB-sized blocks.
- *
- * The lowest layer in the packet writing driver is the packet I/O scheduler.
- * Its data is defined by the struct packet_iosched and includes two bio
- * queues with pending read and write requests. These queues are processed
- * by the pkt_iosched_process_queue() function. The write requests in this
- * queue are already properly aligned and sized. This layer is responsible for
- * issuing the flush cache commands and scheduling the I/O in a good order.
- *
- * The next layer transforms unaligned write requests to aligned writes. This
- * transformation requires reading missing pieces of data from the underlying
- * block device, assembling the pieces to full packets and queuing them to the
- * packet I/O scheduler.
- *
- * At the top layer there is a custom ->submit_bio function that forwards
- * read requests directly to the iosched queue and puts write requests in the
- * unaligned write queue. A kernel thread performs the necessary read
- * gathering to convert the unaligned writes to aligned writes and then feeds
- * them to the packet I/O scheduler.
- *
- *************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/backing-dev.h>
-#include <linux/compat.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/kernel.h>
-#include <linux/kthread.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/nospec.h>
-#include <linux/pktcdvd.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_ioctl.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "pktcdvd"
-
-#define MAX_SPEED 0xffff
-
-static DEFINE_MUTEX(pktcdvd_mutex);
-static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
-static struct proc_dir_entry *pkt_proc;
-static int pktdev_major;
-static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
-static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
-static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
-static mempool_t psd_pool;
-static struct bio_set pkt_bio_set;
-
-/* /sys/class/pktcdvd */
-static struct class class_pktcdvd;
-static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
-
-/* forward declaration */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
-static int pkt_remove_dev(dev_t pkt_dev);
-
-static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
-{
- return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
-}
-
-/**********************************************************
- * sysfs interface for pktcdvd
- * by (C) 2006 Thomas Maier <balagi@justmail.de>
-
- /sys/class/pktcdvd/pktcdvd[0-7]/
- stat/reset
- stat/packets_started
- stat/packets_finished
- stat/kb_written
- stat/kb_read
- stat/kb_read_gather
- write_queue/size
- write_queue/congestion_off
- write_queue/congestion_on
- **********************************************************/
-
-static ssize_t packets_started_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
-}
-static DEVICE_ATTR_RO(packets_started);
-
-static ssize_t packets_finished_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
-}
-static DEVICE_ATTR_RO(packets_finished);
-
-static ssize_t kb_written_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
-}
-static DEVICE_ATTR_RO(kb_written);
-
-static ssize_t kb_read_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
-}
-static DEVICE_ATTR_RO(kb_read);
-
-static ssize_t kb_read_gather_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
-}
-static DEVICE_ATTR_RO(kb_read_gather);
-
-static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
-
- if (len > 0) {
- pd->stats.pkt_started = 0;
- pd->stats.pkt_ended = 0;
- pd->stats.secs_w = 0;
- pd->stats.secs_rg = 0;
- pd->stats.secs_r = 0;
- }
- return len;
-}
-static DEVICE_ATTR_WO(reset);
-
-static struct attribute *pkt_stat_attrs[] = {
- &dev_attr_packets_finished.attr,
- &dev_attr_packets_started.attr,
- &dev_attr_kb_read.attr,
- &dev_attr_kb_written.attr,
- &dev_attr_kb_read_gather.attr,
- &dev_attr_reset.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_stat_group = {
- .name = "stat",
- .attrs = pkt_stat_attrs,
-};
-
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
- spin_unlock(&pd->lock);
- return n;
-}
-static DEVICE_ATTR_RO(size);
-
-static void init_write_congestion_marks(int* lo, int* hi)
-{
- if (*hi > 0) {
- *hi = max(*hi, 500);
- *hi = min(*hi, 1000000);
- if (*lo <= 0)
- *lo = *hi - 100;
- else {
- *lo = min(*lo, *hi - 100);
- *lo = max(*lo, 100);
- }
- } else {
- *hi = -1;
- *lo = -1;
- }
-}
-
-static ssize_t congestion_off_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_off_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_off = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_off);
-
-static ssize_t congestion_on_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int n;
-
- spin_lock(&pd->lock);
- n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return n;
-}
-
-static ssize_t congestion_on_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct pktcdvd_device *pd = dev_get_drvdata(dev);
- int val, ret;
-
- ret = kstrtoint(buf, 10, &val);
- if (ret)
- return ret;
-
- spin_lock(&pd->lock);
- pd->write_congestion_on = val;
- init_write_congestion_marks(&pd->write_congestion_off, &pd->write_congestion_on);
- spin_unlock(&pd->lock);
- return len;
-}
-static DEVICE_ATTR_RW(congestion_on);
-
-static struct attribute *pkt_wq_attrs[] = {
- &dev_attr_congestion_on.attr,
- &dev_attr_congestion_off.attr,
- &dev_attr_size.attr,
- NULL,
-};
-
-static const struct attribute_group pkt_wq_group = {
- .name = "write_queue",
- .attrs = pkt_wq_attrs,
-};
-
-static const struct attribute_group *pkt_groups[] = {
- &pkt_stat_group,
- &pkt_wq_group,
- NULL,
-};
-
-static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd)) {
- pd->dev = device_create_with_groups(&class_pktcdvd, NULL,
- MKDEV(0, 0), pd, pkt_groups,
- "%s", pd->disk->disk_name);
- if (IS_ERR(pd->dev))
- pd->dev = NULL;
- }
-}
-
-static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (class_is_registered(&class_pktcdvd))
- device_unregister(pd->dev);
-}
-
-
-/********************************************************************
- /sys/class/pktcdvd/
- add map block device
- remove unmap packet dev
- device_map show mappings
- *******************************************************************/
-
-static ssize_t device_map_show(const struct class *c, const struct class_attribute *attr,
- char *data)
-{
- int n = 0;
- int idx;
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- struct pktcdvd_device *pd = pkt_devs[idx];
- if (!pd)
- continue;
- n += sysfs_emit_at(data, n, "%s %u:%u %u:%u\n",
- pd->disk->disk_name,
- MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
- MAJOR(file_bdev(pd->bdev_file)->bd_dev),
- MINOR(file_bdev(pd->bdev_file)->bd_dev));
- }
- mutex_unlock(&ctl_mutex);
- return n;
-}
-static CLASS_ATTR_RO(device_map);
-
-static ssize_t add_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
-
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- /* pkt_setup_dev() expects caller to hold reference to self */
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
-
- pkt_setup_dev(MKDEV(major, minor), NULL);
-
- module_put(THIS_MODULE);
-
- return count;
- }
-
- return -EINVAL;
-}
-static CLASS_ATTR_WO(add);
-
-static ssize_t remove_store(const struct class *c, const struct class_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int major, minor;
- if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
- pkt_remove_dev(MKDEV(major, minor));
- return count;
- }
- return -EINVAL;
-}
-static CLASS_ATTR_WO(remove);
-
-static struct attribute *class_pktcdvd_attrs[] = {
- &class_attr_add.attr,
- &class_attr_remove.attr,
- &class_attr_device_map.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(class_pktcdvd);
-
-static struct class class_pktcdvd = {
- .name = DRIVER_NAME,
- .class_groups = class_pktcdvd_groups,
-};
-
-static int pkt_sysfs_init(void)
-{
- /*
- * create control files in sysfs
- * /sys/class/pktcdvd/...
- */
- return class_register(&class_pktcdvd);
-}
-
-static void pkt_sysfs_cleanup(void)
-{
- class_unregister(&class_pktcdvd);
-}
-
-/********************************************************************
- entries in debugfs
-
- /sys/kernel/debug/pktcdvd[0-7]/
- info
-
- *******************************************************************/
-
-static void pkt_count_states(struct pktcdvd_device *pd, int *states)
-{
- struct packet_data *pkt;
- int i;
-
- for (i = 0; i < PACKET_NUM_STATES; i++)
- states[i] = 0;
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- states[pkt->state]++;
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-static int pkt_seq_show(struct seq_file *m, void *p)
-{
- struct pktcdvd_device *pd = m->private;
- char *msg;
- int states[PACKET_NUM_STATES];
-
- seq_printf(m, "Writer %s mapped to %pg:\n", pd->disk->disk_name,
- file_bdev(pd->bdev_file));
-
- seq_printf(m, "\nSettings:\n");
- seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
-
- if (pd->settings.write_type == 0)
- msg = "Packet";
- else
- msg = "Unknown";
- seq_printf(m, "\twrite type:\t\t%s\n", msg);
-
- seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
- seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
-
- seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
-
- if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
- msg = "Mode 1";
- else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
- msg = "Mode 2";
- else
- msg = "Unknown";
- seq_printf(m, "\tblock mode:\t\t%s\n", msg);
-
- seq_printf(m, "\nStatistics:\n");
- seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
- seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
- seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
- seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
- seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
-
- seq_printf(m, "\nMisc:\n");
- seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
- seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
- seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
- seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
- seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
- seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
-
- seq_printf(m, "\nQueue state:\n");
- seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
- seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
- seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", pd->current_sector);
-
- pkt_count_states(pd, states);
- seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
- pd->write_congestion_off,
- pd->write_congestion_on);
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(pkt_seq);
-
-static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- pd->dfs_d_root = debugfs_create_dir(pd->disk->disk_name, pkt_debugfs_root);
-
- pd->dfs_f_info = debugfs_create_file("info", 0444, pd->dfs_d_root,
- pd, &pkt_seq_fops);
-}
-
-static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
-{
- if (!pkt_debugfs_root)
- return;
- debugfs_remove(pd->dfs_f_info);
- debugfs_remove(pd->dfs_d_root);
- pd->dfs_f_info = NULL;
- pd->dfs_d_root = NULL;
-}
-
-static void pkt_debugfs_init(void)
-{
- pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
-}
-
-static void pkt_debugfs_cleanup(void)
-{
- debugfs_remove(pkt_debugfs_root);
- pkt_debugfs_root = NULL;
-}
-
-/* ----------------------------------------------------------*/
-
-
-static void pkt_bio_finished(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
- if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
- dev_dbg(ddev, "queue empty\n");
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
- }
-}
-
-/*
- * Allocate a packet_data struct
- */
-static struct packet_data *pkt_alloc_packet_data(int frames)
-{
- int i;
- struct packet_data *pkt;
-
- pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
- if (!pkt)
- goto no_pkt;
-
- pkt->frames = frames;
- pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
- if (!pkt->w_bio)
- goto no_bio;
-
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
- pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
- if (!pkt->pages[i])
- goto no_page;
- }
-
- spin_lock_init(&pkt->lock);
- bio_list_init(&pkt->orig_bios);
-
- for (i = 0; i < frames; i++) {
- pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
- if (!pkt->r_bios[i])
- goto no_rd_bio;
- }
-
- return pkt;
-
-no_rd_bio:
- for (i = 0; i < frames; i++)
- kfree(pkt->r_bios[i]);
-no_page:
- for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
- if (pkt->pages[i])
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
-no_bio:
- kfree(pkt);
-no_pkt:
- return NULL;
-}
-
-/*
- * Free a packet_data struct
- */
-static void pkt_free_packet_data(struct packet_data *pkt)
-{
- int i;
-
- for (i = 0; i < pkt->frames; i++)
- kfree(pkt->r_bios[i]);
- for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
- __free_page(pkt->pages[i]);
- kfree(pkt->w_bio);
- kfree(pkt);
-}
-
-static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
-{
- struct packet_data *pkt, *next;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
-
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
- pkt_free_packet_data(pkt);
- }
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
-}
-
-static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
-{
- struct packet_data *pkt;
-
- BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
-
- while (nr_packets > 0) {
- pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
- if (!pkt) {
- pkt_shrink_pktlist(pd);
- return 0;
- }
- pkt->id = nr_packets;
- pkt->pd = pd;
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- nr_packets--;
- }
- return 1;
-}
-
-static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
-{
- struct rb_node *n = rb_next(&node->rb_node);
- if (!n)
- return NULL;
- return rb_entry(n, struct pkt_rb_node, rb_node);
-}
-
-static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- rb_erase(&node->rb_node, &pd->bio_queue);
- mempool_free(node, &pd->rb_pool);
- pd->bio_queue_size--;
- BUG_ON(pd->bio_queue_size < 0);
-}
-
-/*
- * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
- */
-static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
-{
- struct rb_node *n = pd->bio_queue.rb_node;
- struct rb_node *next;
- struct pkt_rb_node *tmp;
-
- if (!n) {
- BUG_ON(pd->bio_queue_size > 0);
- return NULL;
- }
-
- for (;;) {
- tmp = rb_entry(n, struct pkt_rb_node, rb_node);
- if (s <= tmp->bio->bi_iter.bi_sector)
- next = n->rb_left;
- else
- next = n->rb_right;
- if (!next)
- break;
- n = next;
- }
-
- if (s > tmp->bio->bi_iter.bi_sector) {
- tmp = pkt_rbtree_next(tmp);
- if (!tmp)
- return NULL;
- }
- BUG_ON(s > tmp->bio->bi_iter.bi_sector);
- return tmp;
-}
-
-/*
- * Insert a node into the pd->bio_queue rb tree.
- */
-static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
-{
- struct rb_node **p = &pd->bio_queue.rb_node;
- struct rb_node *parent = NULL;
- sector_t s = node->bio->bi_iter.bi_sector;
- struct pkt_rb_node *tmp;
-
- while (*p) {
- parent = *p;
- tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
- if (s < tmp->bio->bi_iter.bi_sector)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
- rb_link_node(&node->rb_node, parent, p);
- rb_insert_color(&node->rb_node, &pd->bio_queue);
- pd->bio_queue_size++;
-}
-
-/*
- * Send a packet_command to the underlying block device and
- * wait for completion.
- */
-static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- struct request_queue *q = bdev_get_queue(file_bdev(pd->bdev_file));
- struct scsi_cmnd *scmd;
- struct request *rq;
- int ret = 0;
-
- rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- scmd = blk_mq_rq_to_pdu(rq);
-
- if (cgc->buflen) {
- ret = blk_rq_map_kern(rq, cgc->buffer, cgc->buflen,
- GFP_NOIO);
- if (ret)
- goto out;
- }
-
- scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
- memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
-
- rq->timeout = 60*HZ;
- if (cgc->quiet)
- rq->rq_flags |= RQF_QUIET;
-
- blk_execute_rq(rq, false);
- if (scmd->result)
- ret = -EIO;
-out:
- blk_mq_free_request(rq);
- return ret;
-}
-
-static const char *sense_key_string(__u8 index)
-{
- static const char * const info[] = {
- "No sense", "Recovered error", "Not ready",
- "Medium error", "Hardware error", "Illegal request",
- "Unit attention", "Data protect", "Blank check",
- };
-
- return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
-}
-
-/*
- * A generic sense dump / resolve mechanism should be implemented across
- * all ATAPI + SCSI devices.
- */
-static void pkt_dump_sense(struct pktcdvd_device *pd,
- struct packet_command *cgc)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct scsi_sense_hdr *sshdr = cgc->sshdr;
-
- if (sshdr)
- dev_err(ddev, "%*ph - sense %02x.%02x.%02x (%s)\n",
- CDROM_PACKET_SIZE, cgc->cmd,
- sshdr->sense_key, sshdr->asc, sshdr->ascq,
- sense_key_string(sshdr->sense_key));
- else
- dev_err(ddev, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
-}
-
-/*
- * flush the drive cache to media
- */
-static int pkt_flush_cache(struct pktcdvd_device *pd)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_FLUSH_CACHE;
- cgc.quiet = 1;
-
- /*
- * the IMMED bit -- we default to not setting it, although that
- * would allow a much faster close, this is safer
- */
-#if 0
- cgc.cmd[1] = 1 << 1;
-#endif
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * speed is given as the normal factor, e.g. 4 for 4x
- */
-static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
- unsigned write_speed, unsigned read_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_SET_SPEED;
- put_unaligned_be16(read_speed, &cgc.cmd[2]);
- put_unaligned_be16(write_speed, &cgc.cmd[4]);
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
-
- return ret;
-}
-
-/*
- * Queue a bio for processing by the low-level CD device. Must be called
- * from process context.
- */
-static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
-{
- /*
- * Some CDRW drives can not handle writes larger than one packet,
- * even if the size is a multiple of the packet size.
- */
- bio->bi_opf |= REQ_NOMERGE;
-
- spin_lock(&pd->iosched.lock);
- if (bio_data_dir(bio) == READ)
- bio_list_add(&pd->iosched.read_queue, bio);
- else
- bio_list_add(&pd->iosched.write_queue, bio);
- spin_unlock(&pd->iosched.lock);
-
- atomic_set(&pd->iosched.attention, 1);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Process the queued read/write requests. This function handles special
- * requirements for CDRW drives:
- * - A cache flush command must be inserted before a read request if the
- * previous request was a write.
- * - Switching between reading and writing is slow, so don't do it more often
- * than necessary.
- * - Optimize for throughput at the expense of latency. This means that streaming
- * writes will never be interrupted by a read, but if the drive has to seek
- * before the next write, switch to reading instead if there are any pending
- * read requests.
- * - Set the read speed according to current usage pattern. When only reading
- * from the device, it's best to use the highest possible read speed, but
- * when switching often between reading and writing, it's better to have the
- * same read and write speeds.
- */
-static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (atomic_read(&pd->iosched.attention) == 0)
- return;
- atomic_set(&pd->iosched.attention, 0);
-
- for (;;) {
- struct bio *bio;
- int reads_queued, writes_queued;
-
- spin_lock(&pd->iosched.lock);
- reads_queued = !bio_list_empty(&pd->iosched.read_queue);
- writes_queued = !bio_list_empty(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!reads_queued && !writes_queued)
- break;
-
- if (pd->iosched.writing) {
- int need_write_seek = 1;
- spin_lock(&pd->iosched.lock);
- bio = bio_list_peek(&pd->iosched.write_queue);
- spin_unlock(&pd->iosched.lock);
- if (bio && (bio->bi_iter.bi_sector ==
- pd->iosched.last_write))
- need_write_seek = 0;
- if (need_write_seek && reads_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "write, waiting\n");
- break;
- }
- pkt_flush_cache(pd);
- pd->iosched.writing = 0;
- }
- } else {
- if (!reads_queued && writes_queued) {
- if (atomic_read(&pd->cdrw.pending_bios) > 0) {
- dev_dbg(ddev, "read, waiting\n");
- break;
- }
- pd->iosched.writing = 1;
- }
- }
-
- spin_lock(&pd->iosched.lock);
- if (pd->iosched.writing)
- bio = bio_list_pop(&pd->iosched.write_queue);
- else
- bio = bio_list_pop(&pd->iosched.read_queue);
- spin_unlock(&pd->iosched.lock);
-
- if (!bio)
- continue;
-
- if (bio_data_dir(bio) == READ)
- pd->iosched.successive_reads +=
- bio->bi_iter.bi_size >> 10;
- else {
- pd->iosched.successive_reads = 0;
- pd->iosched.last_write = bio_end_sector(bio);
- }
- if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
- if (pd->read_speed == pd->write_speed) {
- pd->read_speed = MAX_SPEED;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- } else {
- if (pd->read_speed != pd->write_speed) {
- pd->read_speed = pd->write_speed;
- pkt_set_speed(pd, pd->write_speed, pd->read_speed);
- }
- }
-
- atomic_inc(&pd->cdrw.pending_bios);
- submit_bio_noacct(bio);
- }
-}
-
-/*
- * Special care is needed if the underlying block device has a small
- * max_phys_segments value.
- */
-static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= queue_max_segments(q)) {
- /*
- * The cdrom device can handle one segment/frame
- */
- clear_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- if ((pd->settings.size << 9) / PAGE_SIZE <= queue_max_segments(q)) {
- /*
- * We can handle this case at the expense of some extra memory
- * copies during write operations
- */
- set_bit(PACKET_MERGE_SEGS, &pd->flags);
- return 0;
- }
-
- dev_err(ddev, "cdrom max_phys_segments too small\n");
- return -EIO;
-}
-
-static void pkt_end_io_read(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "bio=%p sec0=%llx sec=%llx err=%d\n",
- bio, pkt->sector, bio->bi_iter.bi_sector, bio->bi_status);
-
- if (bio->bi_status)
- atomic_inc(&pkt->io_errors);
- bio_uninit(bio);
- if (atomic_dec_and_test(&pkt->io_wait)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- pkt_bio_finished(pd);
-}
-
-static void pkt_end_io_packet_write(struct bio *bio)
-{
- struct packet_data *pkt = bio->bi_private;
- struct pktcdvd_device *pd = pkt->pd;
- BUG_ON(!pd);
-
- dev_dbg(disk_to_dev(pd->disk), "id=%d, err=%d\n", pkt->id, bio->bi_status);
-
- pd->stats.pkt_ended++;
-
- bio_uninit(bio);
- pkt_bio_finished(pd);
- atomic_dec(&pkt->io_wait);
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
-}
-
-/*
- * Schedule reads for the holes in a packet
- */
-static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int frames_read = 0;
- struct bio *bio;
- int f;
- char written[PACKET_MAX_SIZE];
-
- BUG_ON(bio_list_empty(&pkt->orig_bios));
-
- atomic_set(&pkt->io_wait, 0);
- atomic_set(&pkt->io_errors, 0);
-
- /*
- * Figure out which frames we need to read before we can write.
- */
- memset(written, 0, sizeof(written));
- spin_lock(&pkt->lock);
- bio_list_for_each(bio, &pkt->orig_bios) {
- int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
- (CD_FRAMESIZE >> 9);
- int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
- pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
- BUG_ON(first_frame < 0);
- BUG_ON(first_frame + num_frames > pkt->frames);
- for (f = first_frame; f < first_frame + num_frames; f++)
- written[f] = 1;
- }
- spin_unlock(&pkt->lock);
-
- if (pkt->cache_valid) {
- dev_dbg(ddev, "zone %llx cached\n", pkt->sector);
- goto out_account;
- }
-
- /*
- * Schedule reads for missing parts of the packet.
- */
- for (f = 0; f < pkt->frames; f++) {
- int p, offset;
-
- if (written[f])
- continue;
-
- bio = pkt->r_bios[f];
- bio_init(bio, file_bdev(pd->bdev_file), bio->bi_inline_vecs, 1,
- REQ_OP_READ);
- bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_end_io = pkt_end_io_read;
- bio->bi_private = pkt;
-
- p = (f * CD_FRAMESIZE) / PAGE_SIZE;
- offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
- dev_dbg(ddev, "Adding frame %d, page:%p offs:%d\n", f,
- pkt->pages[p], offset);
- if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
- BUG();
-
- atomic_inc(&pkt->io_wait);
- pkt_queue_bio(pd, bio);
- frames_read++;
- }
-
-out_account:
- dev_dbg(ddev, "need %d frames for zone %llx\n", frames_read, pkt->sector);
- pd->stats.pkt_started++;
- pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
-}
-
-/*
- * Find a packet matching zone, or the least recently used packet if
- * there is no match.
- */
-static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
-{
- struct packet_data *pkt;
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
- if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
- list_del_init(&pkt->list);
- if (pkt->sector != zone)
- pkt->cache_valid = 0;
- return pkt;
- }
- }
- BUG();
- return NULL;
-}
-
-static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- if (pkt->cache_valid) {
- list_add(&pkt->list, &pd->cdrw.pkt_free_list);
- } else {
- list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
- }
-}
-
-static inline void pkt_set_state(struct device *ddev, struct packet_data *pkt,
- enum packet_data_state state)
-{
- static const char *state_name[] = {
- "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
- };
- enum packet_data_state old_state = pkt->state;
-
- dev_dbg(ddev, "pkt %2d : s=%6llx %s -> %s\n",
- pkt->id, pkt->sector, state_name[old_state], state_name[state]);
-
- pkt->state = state;
-}
-
-/*
- * Scan the work queue to see if we can start a new packet.
- * returns non-zero if any work was done.
- */
-static int pkt_handle_queue(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *p;
- struct bio *bio = NULL;
- sector_t zone = 0; /* Suppress gcc warning */
- struct pkt_rb_node *node, *first_node;
- struct rb_node *n;
-
- atomic_set(&pd->scan_queue, 0);
-
- if (list_empty(&pd->cdrw.pkt_free_list)) {
- dev_dbg(ddev, "no pkt\n");
- return 0;
- }
-
- /*
- * Try to find a zone we are not already working on.
- */
- spin_lock(&pd->lock);
- first_node = pkt_rbtree_find(pd, pd->current_sector);
- if (!first_node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- first_node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- node = first_node;
- while (node) {
- bio = node->bio;
- zone = get_zone(bio->bi_iter.bi_sector, pd);
- list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
- if (p->sector == zone) {
- bio = NULL;
- goto try_next_bio;
- }
- }
- break;
-try_next_bio:
- node = pkt_rbtree_next(node);
- if (!node) {
- n = rb_first(&pd->bio_queue);
- if (n)
- node = rb_entry(n, struct pkt_rb_node, rb_node);
- }
- if (node == first_node)
- node = NULL;
- }
- spin_unlock(&pd->lock);
- if (!bio) {
- dev_dbg(ddev, "no bio\n");
- return 0;
- }
-
- pkt = pkt_get_packet_data(pd, zone);
-
- pd->current_sector = zone + pd->settings.size;
- pkt->sector = zone;
- BUG_ON(pkt->frames != pd->settings.size >> 2);
- pkt->write_size = 0;
-
- /*
- * Scan work queue for bios in the same zone and link them
- * to this packet.
- */
- spin_lock(&pd->lock);
- dev_dbg(ddev, "looking for zone %llx\n", zone);
- while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
- sector_t tmp = get_zone(node->bio->bi_iter.bi_sector, pd);
-
- bio = node->bio;
- dev_dbg(ddev, "found zone=%llx\n", tmp);
- if (tmp != zone)
- break;
- pkt_rbtree_erase(pd, node);
- spin_lock(&pkt->lock);
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
- spin_unlock(&pkt->lock);
- }
- /* check write congestion marks, and if bio_queue_size is
- * below, wake up any waiters
- */
- if (pd->congested &&
- pd->bio_queue_size <= pd->write_congestion_off) {
- pd->congested = false;
- wake_up_var(&pd->congested);
- }
- spin_unlock(&pd->lock);
-
- pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
- pkt_set_state(ddev, pkt, PACKET_WAITING_STATE);
- atomic_set(&pkt->run_sm, 1);
-
- spin_lock(&pd->cdrw.active_list_lock);
- list_add(&pkt->list, &pd->cdrw.pkt_active_list);
- spin_unlock(&pd->cdrw.active_list_lock);
-
- return 1;
-}
-
-/**
- * bio_list_copy_data - copy contents of data buffers from one chain of bios to
- * another
- * @src: source bio list
- * @dst: destination bio list
- *
- * Stops when it reaches the end of either the @src list or @dst list - that is,
- * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
- * bios).
- */
-static void bio_list_copy_data(struct bio *dst, struct bio *src)
-{
- struct bvec_iter src_iter = src->bi_iter;
- struct bvec_iter dst_iter = dst->bi_iter;
-
- while (1) {
- if (!src_iter.bi_size) {
- src = src->bi_next;
- if (!src)
- break;
-
- src_iter = src->bi_iter;
- }
-
- if (!dst_iter.bi_size) {
- dst = dst->bi_next;
- if (!dst)
- break;
-
- dst_iter = dst->bi_iter;
- }
-
- bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
- }
-}
-
-/*
- * Assemble a bio to write one packet and queue the bio for processing
- * by the underlying block device.
- */
-static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int f;
-
- bio_init(pkt->w_bio, file_bdev(pd->bdev_file), pkt->w_bio->bi_inline_vecs,
- pkt->frames, REQ_OP_WRITE);
- pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
- pkt->w_bio->bi_private = pkt;
-
- /* XXX: locking? */
- for (f = 0; f < pkt->frames; f++) {
- struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
- unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
-
- if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
- BUG();
- }
- dev_dbg(ddev, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
-
- /*
- * Fill-in bvec with data from orig_bios.
- */
- spin_lock(&pkt->lock);
- bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
-
- pkt_set_state(ddev, pkt, PACKET_WRITE_WAIT_STATE);
- spin_unlock(&pkt->lock);
-
- dev_dbg(ddev, "Writing %d frames for zone %llx\n", pkt->write_size, pkt->sector);
-
- if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
- pkt->cache_valid = 1;
- else
- pkt->cache_valid = 0;
-
- /* Start the write request */
- atomic_set(&pkt->io_wait, 1);
- pkt_queue_bio(pd, pkt->w_bio);
-}
-
-static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
-{
- struct bio *bio;
-
- if (status)
- pkt->cache_valid = 0;
-
- /* Finish all bios corresponding to this packet */
- while ((bio = bio_list_pop(&pkt->orig_bios))) {
- bio->bi_status = status;
- bio_endio(bio);
- }
-}
-
-static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- dev_dbg(ddev, "pkt %d\n", pkt->id);
-
- for (;;) {
- switch (pkt->state) {
- case PACKET_WAITING_STATE:
- if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
- return;
-
- pkt->sleep_time = 0;
- pkt_gather_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_READ_WAIT_STATE);
- break;
-
- case PACKET_READ_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (atomic_read(&pkt->io_errors) > 0) {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- } else {
- pkt_start_write(pd, pkt);
- }
- break;
-
- case PACKET_WRITE_WAIT_STATE:
- if (atomic_read(&pkt->io_wait) > 0)
- return;
-
- if (!pkt->w_bio->bi_status) {
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- } else {
- pkt_set_state(ddev, pkt, PACKET_RECOVERY_STATE);
- }
- break;
-
- case PACKET_RECOVERY_STATE:
- dev_dbg(ddev, "No recovery possible\n");
- pkt_set_state(ddev, pkt, PACKET_FINISHED_STATE);
- break;
-
- case PACKET_FINISHED_STATE:
- pkt_finish_packet(pkt, pkt->w_bio->bi_status);
- return;
-
- default:
- BUG();
- break;
- }
- }
-}
-
-static void pkt_handle_packets(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt, *next;
-
- /*
- * Run state machine for active packets
- */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0) {
- atomic_set(&pkt->run_sm, 0);
- pkt_run_state_machine(pd, pkt);
- }
- }
-
- /*
- * Move no longer active packets to the free list
- */
- spin_lock(&pd->cdrw.active_list_lock);
- list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
- if (pkt->state == PACKET_FINISHED_STATE) {
- list_del(&pkt->list);
- pkt_put_packet_data(pd, pkt);
- pkt_set_state(ddev, pkt, PACKET_IDLE_STATE);
- atomic_set(&pd->scan_queue, 1);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-}
-
-/*
- * kcdrwd is woken up when writes have been queued for one of our
- * registered devices
- */
-static int kcdrwd(void *foobar)
-{
- struct pktcdvd_device *pd = foobar;
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_data *pkt;
- int states[PACKET_NUM_STATES];
- long min_sleep_time, residue;
-
- set_user_nice(current, MIN_NICE);
- set_freezable();
-
- for (;;) {
- DECLARE_WAITQUEUE(wait, current);
-
- /*
- * Wait until there is something to do
- */
- add_wait_queue(&pd->wqueue, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* Check if we need to run pkt_handle_queue */
- if (atomic_read(&pd->scan_queue) > 0)
- goto work_to_do;
-
- /* Check if we need to run the state machine for some packet */
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (atomic_read(&pkt->run_sm) > 0)
- goto work_to_do;
- }
-
- /* Check if we need to process the iosched queues */
- if (atomic_read(&pd->iosched.attention) != 0)
- goto work_to_do;
-
- /* Otherwise, go to sleep */
- pkt_count_states(pd, states);
- dev_dbg(ddev, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
- states[0], states[1], states[2], states[3], states[4], states[5]);
-
- min_sleep_time = MAX_SCHEDULE_TIMEOUT;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
- min_sleep_time = pkt->sleep_time;
- }
-
- dev_dbg(ddev, "sleeping\n");
- residue = schedule_timeout(min_sleep_time);
- dev_dbg(ddev, "wake up\n");
-
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (!pkt->sleep_time)
- continue;
- pkt->sleep_time -= min_sleep_time - residue;
- if (pkt->sleep_time <= 0) {
- pkt->sleep_time = 0;
- atomic_inc(&pkt->run_sm);
- }
- }
-
- if (kthread_should_stop())
- break;
- }
-work_to_do:
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&pd->wqueue, &wait);
-
- if (kthread_should_stop())
- break;
-
- /*
- * if pkt_handle_queue returns true, we can queue
- * another request.
- */
- while (pkt_handle_queue(pd))
- ;
-
- /*
- * Handle packet state machine
- */
- pkt_handle_packets(pd);
-
- /*
- * Handle iosched queues
- */
- pkt_iosched_process_queue(pd);
- }
-
- return 0;
-}
-
-static void pkt_print_settings(struct pktcdvd_device *pd)
-{
- dev_info(disk_to_dev(pd->disk), "%s packets, %u blocks, Mode-%c disc\n",
- pd->settings.fp ? "Fixed" : "Variable",
- pd->settings.size >> 2,
- pd->settings.block_mode == 8 ? '1' : '2');
-}
-
-static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
-
- cgc->cmd[0] = GPCMD_MODE_SENSE_10;
- cgc->cmd[2] = page_code | (page_control << 6);
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_READ;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
-{
- memset(cgc->cmd, 0, sizeof(cgc->cmd));
- memset(cgc->buffer, 0, 2);
- cgc->cmd[0] = GPCMD_MODE_SELECT_10;
- cgc->cmd[1] = 0x10; /* PF */
- put_unaligned_be16(cgc->buflen, &cgc->cmd[7]);
- cgc->data_direction = CGC_DATA_WRITE;
- return pkt_generic_packet(pd, cgc);
-}
-
-static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
-{
- struct packet_command cgc;
- int ret;
-
- /* set up command and get the disc info */
- init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_DISC_INFO;
- cgc.cmd[8] = cgc.buflen = 2;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- /* not all drives have the same disc_info length, so requeue
- * packet with the length the drive tells us it can supply
- */
- cgc.buflen = be16_to_cpu(di->disc_information_length) +
- sizeof(di->disc_information_length);
-
- if (cgc.buflen > sizeof(disc_information))
- cgc.buflen = sizeof(disc_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
-{
- struct packet_command cgc;
- int ret;
-
- init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
- cgc.cmd[1] = type & 3;
- put_unaligned_be16(track, &cgc.cmd[4]);
- cgc.cmd[8] = 8;
- cgc.quiet = 1;
-
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- return ret;
-
- cgc.buflen = be16_to_cpu(ti->track_information_length) +
- sizeof(ti->track_information_length);
-
- if (cgc.buflen > sizeof(track_information))
- cgc.buflen = sizeof(track_information);
-
- cgc.cmd[8] = cgc.buflen;
- return pkt_generic_packet(pd, &cgc);
-}
-
-static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
- long *last_written)
-{
- disc_information di;
- track_information ti;
- __u32 last_track;
- int ret;
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret)
- return ret;
-
- last_track = (di.last_track_msb << 8) | di.last_track_lsb;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
-
- /* if this track is blank, try the previous. */
- if (ti.blank) {
- last_track--;
- ret = pkt_get_track_info(pd, last_track, 1, &ti);
- if (ret)
- return ret;
- }
-
- /* if last recorded field is valid, return it. */
- if (ti.lra_v) {
- *last_written = be32_to_cpu(ti.last_rec_address);
- } else {
- /* make it up instead */
- *last_written = be32_to_cpu(ti.track_start) +
- be32_to_cpu(ti.track_size);
- if (ti.free_blocks)
- *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
- }
- return 0;
-}
-
-/*
- * write mode select package based on pd->settings
- */
-static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- write_param_page *wp;
- char buffer[128];
- int ret, size;
-
- /* doesn't apply to DVD+RW or DVD-RAM */
- if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
- return 0;
-
- memset(buffer, 0, sizeof(buffer));
- init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- size = 2 + get_unaligned_be16(&buffer[0]);
- pd->mode_offset = get_unaligned_be16(&buffer[6]);
- if (size > sizeof(buffer))
- size = sizeof(buffer);
-
- /*
- * now get it all
- */
- init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- /*
- * write page is offset header + block descriptor length
- */
- wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
-
- wp->fp = pd->settings.fp;
- wp->track_mode = pd->settings.track_mode;
- wp->write_type = pd->settings.write_type;
- wp->data_block_type = pd->settings.block_mode;
-
- wp->multi_session = 0;
-
-#ifdef PACKET_USE_LS
- wp->link_size = 7;
- wp->ls_v = 1;
-#endif
-
- if (wp->data_block_type == PACKET_BLOCK_MODE1) {
- wp->session_format = 0;
- wp->subhdr2 = 0x20;
- } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
- wp->session_format = 0x20;
- wp->subhdr2 = 8;
-#if 0
- wp->mcn[0] = 0x80;
- memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
-#endif
- } else {
- /*
- * paranoia
- */
- dev_err(ddev, "write mode wrong %d\n", wp->data_block_type);
- return 1;
- }
- wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
-
- cgc.buflen = cgc.cmd[8] = size;
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- pkt_print_settings(pd);
- return 0;
-}
-
-/*
- * 1 -- we can write to this track, 0 -- we can't
- */
-static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- /* The track is always writable on DVD+RW/DVD-RAM */
- return 1;
- default:
- break;
- }
-
- if (!ti->packet || !ti->fp)
- return 0;
-
- /*
- * "good" settings as per Mt Fuji.
- */
- if (ti->rt == 0 && ti->blank == 0)
- return 1;
-
- if (ti->rt == 0 && ti->blank == 1)
- return 1;
-
- if (ti->rt == 1 && ti->blank == 0)
- return 1;
-
- dev_err(ddev, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
- return 0;
-}
-
-/*
- * 1 -- we can write to this disc, 0 -- we can't
- */
-static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- switch (pd->mmc3_profile) {
- case 0x0a: /* CD-RW */
- case 0xffff: /* MMC3 not supported */
- break;
- case 0x1a: /* DVD+RW */
- case 0x13: /* DVD-RW */
- case 0x12: /* DVD-RAM */
- return 1;
- default:
- dev_dbg(ddev, "Wrong disc profile (%x)\n", pd->mmc3_profile);
- return 0;
- }
-
- /*
- * for disc type 0xff we should probably reserve a new track.
- * but i'm not sure, should we leave this to user apps? probably.
- */
- if (di->disc_type == 0xff) {
- dev_notice(ddev, "unknown disc - no track?\n");
- return 0;
- }
-
- if (di->disc_type != 0x20 && di->disc_type != 0) {
- dev_err(ddev, "wrong disc type (%x)\n", di->disc_type);
- return 0;
- }
-
- if (di->erasable == 0) {
- dev_err(ddev, "disc not erasable\n");
- return 0;
- }
-
- if (di->border_status == PACKET_SESSION_RESERVED) {
- dev_err(ddev, "can't write to last track (reserved)\n");
- return 0;
- }
-
- return 1;
-}
-
-static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- unsigned char buf[12];
- disc_information di;
- track_information ti;
- int ret, track;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
- cgc.cmd[8] = 8;
- ret = pkt_generic_packet(pd, &cgc);
- pd->mmc3_profile = ret ? 0xffff : get_unaligned_be16(&buf[6]);
-
- memset(&di, 0, sizeof(disc_information));
- memset(&ti, 0, sizeof(track_information));
-
- ret = pkt_get_disc_info(pd, &di);
- if (ret) {
- dev_err(ddev, "failed get_disc\n");
- return ret;
- }
-
- if (!pkt_writable_disc(pd, &di))
- return -EROFS;
-
- pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
-
- track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
- ret = pkt_get_track_info(pd, track, 1, &ti);
- if (ret) {
- dev_err(ddev, "failed get_track\n");
- return ret;
- }
-
- if (!pkt_writable_track(pd, &ti)) {
- dev_err(ddev, "can't write to this track\n");
- return -EROFS;
- }
-
- /*
- * we keep packet size in 512 byte units, makes it easier to
- * deal with request calculations.
- */
- pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
- if (pd->settings.size == 0) {
- dev_notice(ddev, "detected zero packet size!\n");
- return -ENXIO;
- }
- if (pd->settings.size > PACKET_MAX_SECTORS) {
- dev_err(ddev, "packet size is too big\n");
- return -EROFS;
- }
- pd->settings.fp = ti.fp;
- pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
-
- if (ti.nwa_v) {
- pd->nwa = be32_to_cpu(ti.next_writable);
- set_bit(PACKET_NWA_VALID, &pd->flags);
- }
-
- /*
- * in theory we could use lra on -RW media as well and just zero
- * blocks that haven't been written yet, but in practice that
- * is just a no-go. we'll use that for -R, naturally.
- */
- if (ti.lra_v) {
- pd->lra = be32_to_cpu(ti.last_rec_address);
- set_bit(PACKET_LRA_VALID, &pd->flags);
- } else {
- pd->lra = 0xffffffff;
- set_bit(PACKET_LRA_VALID, &pd->flags);
- }
-
- /*
- * fine for now
- */
- pd->settings.link_loss = 7;
- pd->settings.write_type = 0; /* packet */
- pd->settings.track_mode = ti.track_mode;
-
- /*
- * mode1 or mode2 disc
- */
- switch (ti.data_mode) {
- case PACKET_MODE1:
- pd->settings.block_mode = PACKET_BLOCK_MODE1;
- break;
- case PACKET_MODE2:
- pd->settings.block_mode = PACKET_BLOCK_MODE2;
- break;
- default:
- dev_err(ddev, "unknown data mode\n");
- return -EROFS;
- }
- return 0;
-}
-
-/*
- * enable/disable write caching on drive
- */
-static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- bool set = IS_ENABLED(CONFIG_CDROM_PKTCDVD_WCACHE);
- int ret;
-
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.buflen = pd->mode_offset + 12;
-
- /*
- * caching mode page might not be there, so quiet this command
- */
- cgc.quiet = 1;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
- if (ret)
- return ret;
-
- /*
- * use drive write caching -- we need deferred error handling to be
- * able to successfully recover with this option (drive will return good
- * status as soon as the cdb is validated).
- */
- buf[pd->mode_offset + 10] |= (set << 2);
-
- cgc.buflen = cgc.cmd[8] = 2 + get_unaligned_be16(&buf[0]);
- ret = pkt_mode_select(pd, &cgc);
- if (ret) {
- dev_err(ddev, "write caching control failed\n");
- pkt_dump_sense(pd, &cgc);
- } else if (!ret && set)
- dev_notice(ddev, "enabled write caching\n");
- return ret;
-}
-
-static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
-{
- struct packet_command cgc;
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
- cgc.cmd[4] = lockflag ? 1 : 0;
- return pkt_generic_packet(pd, &cgc);
-}
-
-/*
- * Returns drive maximum write speed
- */
-static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
- unsigned *write_speed)
-{
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[256+18];
- unsigned char *cap_buf;
- int ret, offset;
-
- cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
- init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
- cgc.sshdr = &sshdr;
-
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
- sizeof(struct mode_page_header);
- ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- }
-
- offset = 20; /* Obsoleted field, used by older drives */
- if (cap_buf[1] >= 28)
- offset = 28; /* Current write speed selected */
- if (cap_buf[1] >= 30) {
- /* If the drive reports at least one "Logical Unit Write
- * Speed Performance Descriptor Block", use the information
- * in the first block. (contains the highest speed)
- */
- int num_spdb = get_unaligned_be16(&cap_buf[30]);
- if (num_spdb > 0)
- offset = 34;
- }
-
- *write_speed = get_unaligned_be16(&cap_buf[offset]);
- return 0;
-}
-
-/* These tables from cdrecord - I don't have orange book */
-/* standard speed CD-RW (1-4x) */
-static char clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* high speed CD-RW (-10x) */
-static char hs_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-/* ultra high speed CD-RW */
-static char us_clv_to_speed[16] = {
- /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
- 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
-};
-
-/*
- * reads the maximum media speed from ATIP
- */
-static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
- unsigned *speed)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- unsigned char buf[64];
- unsigned int size, st, sp;
- int ret;
-
- init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4; /* READ ATIP */
- cgc.cmd[8] = 2;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
- size = 2 + get_unaligned_be16(&buf[0]);
- if (size > sizeof(buf))
- size = sizeof(buf);
-
- init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
- cgc.sshdr = &sshdr;
- cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
- cgc.cmd[1] = 2;
- cgc.cmd[2] = 4;
- cgc.cmd[8] = size;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret) {
- pkt_dump_sense(pd, &cgc);
- return ret;
- }
-
- if (!(buf[6] & 0x40)) {
- dev_notice(ddev, "disc type is not CD-RW\n");
- return 1;
- }
- if (!(buf[6] & 0x4)) {
- dev_notice(ddev, "A1 values on media are not valid, maybe not CDRW?\n");
- return 1;
- }
-
- st = (buf[6] >> 3) & 0x7; /* disc sub-type */
-
- sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
-
- /* Info from cdrecord */
- switch (st) {
- case 0: /* standard speed */
- *speed = clv_to_speed[sp];
- break;
- case 1: /* high speed */
- *speed = hs_clv_to_speed[sp];
- break;
- case 2: /* ultra high speed */
- *speed = us_clv_to_speed[sp];
- break;
- default:
- dev_notice(ddev, "unknown disc sub-type %d\n", st);
- return 1;
- }
- if (*speed) {
- dev_info(ddev, "maximum media speed: %d\n", *speed);
- return 0;
- } else {
- dev_notice(ddev, "unknown speed %d for sub-type %d\n", sp, st);
- return 1;
- }
-}
-
-static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- struct packet_command cgc;
- struct scsi_sense_hdr sshdr;
- int ret;
-
- dev_dbg(ddev, "Performing OPC\n");
-
- init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
- cgc.sshdr = &sshdr;
- cgc.timeout = 60*HZ;
- cgc.cmd[0] = GPCMD_SEND_OPC;
- cgc.cmd[1] = 1;
- ret = pkt_generic_packet(pd, &cgc);
- if (ret)
- pkt_dump_sense(pd, &cgc);
- return ret;
-}
-
-static int pkt_open_write(struct pktcdvd_device *pd)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- unsigned int write_speed, media_write_speed, read_speed;
-
- ret = pkt_probe_settings(pd);
- if (ret) {
- dev_dbg(ddev, "failed probe\n");
- return ret;
- }
-
- ret = pkt_set_write_settings(pd);
- if (ret) {
- dev_notice(ddev, "failed saving write settings\n");
- return -EIO;
- }
-
- pkt_write_caching(pd);
-
- ret = pkt_get_max_speed(pd, &write_speed);
- if (ret)
- write_speed = 16 * 177;
- switch (pd->mmc3_profile) {
- case 0x13: /* DVD-RW */
- case 0x1a: /* DVD+RW */
- case 0x12: /* DVD-RAM */
- dev_notice(ddev, "write speed %ukB/s\n", write_speed);
- break;
- default:
- ret = pkt_media_speed(pd, &media_write_speed);
- if (ret)
- media_write_speed = 16;
- write_speed = min(write_speed, media_write_speed * 177);
- dev_notice(ddev, "write speed %ux\n", write_speed / 176);
- break;
- }
- read_speed = write_speed;
-
- ret = pkt_set_speed(pd, write_speed, read_speed);
- if (ret) {
- dev_notice(ddev, "couldn't set write speed\n");
- return -EIO;
- }
- pd->write_speed = write_speed;
- pd->read_speed = read_speed;
-
- ret = pkt_perform_opc(pd);
- if (ret)
- dev_notice(ddev, "Optimum Power Calibration failed\n");
-
- return 0;
-}
-
-/*
- * called at open time.
- */
-static int pkt_open_dev(struct pktcdvd_device *pd, bool write)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
- long lba;
- struct request_queue *q;
- struct file *bdev_file;
-
- /*
- * We need to re-open the cdrom device without O_NONBLOCK to be able
- * to read/write from/to it. It is already opened in O_NONBLOCK mode
- * so open should not fail.
- */
- bdev_file = bdev_file_open_by_dev(file_bdev(pd->bdev_file)->bd_dev,
- BLK_OPEN_READ, pd, NULL);
- if (IS_ERR(bdev_file)) {
- ret = PTR_ERR(bdev_file);
- goto out;
- }
- pd->f_open_bdev = bdev_file;
-
- ret = pkt_get_last_written(pd, &lba);
- if (ret) {
- dev_err(ddev, "pkt_get_last_written failed\n");
- goto out_putdev;
- }
-
- set_capacity(pd->disk, lba << 2);
- set_capacity_and_notify(file_bdev(pd->bdev_file)->bd_disk, lba << 2);
-
- q = bdev_get_queue(file_bdev(pd->bdev_file));
- if (write) {
- ret = pkt_open_write(pd);
- if (ret)
- goto out_putdev;
- set_bit(PACKET_WRITABLE, &pd->flags);
- } else {
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- clear_bit(PACKET_WRITABLE, &pd->flags);
- }
-
- ret = pkt_set_segment_merging(pd, q);
- if (ret)
- goto out_putdev;
-
- if (write) {
- if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
- dev_err(ddev, "not enough memory for buffers\n");
- ret = -ENOMEM;
- goto out_putdev;
- }
- dev_info(ddev, "%lukB available on disc\n", lba << 1);
- }
- set_blocksize(bdev_file, CD_FRAMESIZE);
-
- return 0;
-
-out_putdev:
- fput(bdev_file);
-out:
- return ret;
-}
-
-/*
- * called when the device is closed. makes sure that the device flushes
- * the internal cache before we close.
- */
-static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
-{
- struct device *ddev = disk_to_dev(pd->disk);
-
- if (flush && pkt_flush_cache(pd))
- dev_notice(ddev, "not flushing cache\n");
-
- pkt_lock_door(pd, 0);
-
- pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
- fput(pd->f_open_bdev);
- pd->f_open_bdev = NULL;
-
- pkt_shrink_pktlist(pd);
-}
-
-static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
-{
- if (dev_minor >= MAX_WRITERS)
- return NULL;
-
- dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
- return pkt_devs[dev_minor];
-}
-
-static int pkt_open(struct gendisk *disk, blk_mode_t mode)
-{
- struct pktcdvd_device *pd = NULL;
- int ret;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd = pkt_find_dev_from_minor(disk->first_minor);
- if (!pd) {
- ret = -ENODEV;
- goto out;
- }
- BUG_ON(pd->refcnt < 0);
-
- pd->refcnt++;
- if (pd->refcnt > 1) {
- if ((mode & BLK_OPEN_WRITE) &&
- !test_bit(PACKET_WRITABLE, &pd->flags)) {
- ret = -EBUSY;
- goto out_dec;
- }
- } else {
- ret = pkt_open_dev(pd, mode & BLK_OPEN_WRITE);
- if (ret)
- goto out_dec;
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return 0;
-
-out_dec:
- pd->refcnt--;
-out:
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
- return ret;
-}
-
-static void pkt_release(struct gendisk *disk)
-{
- struct pktcdvd_device *pd = disk->private_data;
-
- mutex_lock(&pktcdvd_mutex);
- mutex_lock(&ctl_mutex);
- pd->refcnt--;
- BUG_ON(pd->refcnt < 0);
- if (pd->refcnt == 0) {
- int flush = test_bit(PACKET_WRITABLE, &pd->flags);
- pkt_release_dev(pd, flush);
- }
- mutex_unlock(&ctl_mutex);
- mutex_unlock(&pktcdvd_mutex);
-}
-
-
-static void pkt_end_io_read_cloned(struct bio *bio)
-{
- struct packet_stacked_data *psd = bio->bi_private;
- struct pktcdvd_device *pd = psd->pd;
-
- psd->bio->bi_status = bio->bi_status;
- bio_put(bio);
- bio_endio(psd->bio);
- mempool_free(psd, &psd_pool);
- pkt_bio_finished(pd);
-}
-
-static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
-{
- struct bio *cloned_bio = bio_alloc_clone(file_bdev(pd->bdev_file), bio,
- GFP_NOIO, &pkt_bio_set);
- struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
-
- psd->pd = pd;
- psd->bio = bio;
- cloned_bio->bi_private = psd;
- cloned_bio->bi_end_io = pkt_end_io_read_cloned;
- pd->stats.secs_r += bio_sectors(bio);
- pkt_queue_bio(pd, cloned_bio);
-}
-
-static void pkt_make_request_write(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- sector_t zone;
- struct packet_data *pkt;
- int was_empty, blocked_bio;
- struct pkt_rb_node *node;
-
- zone = get_zone(bio->bi_iter.bi_sector, pd);
-
- /*
- * If we find a matching packet in state WAITING or READ_WAIT, we can
- * just append this bio to that packet.
- */
- spin_lock(&pd->cdrw.active_list_lock);
- blocked_bio = 0;
- list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
- if (pkt->sector == zone) {
- spin_lock(&pkt->lock);
- if ((pkt->state == PACKET_WAITING_STATE) ||
- (pkt->state == PACKET_READ_WAIT_STATE)) {
- bio_list_add(&pkt->orig_bios, bio);
- pkt->write_size +=
- bio->bi_iter.bi_size / CD_FRAMESIZE;
- if ((pkt->write_size >= pkt->frames) &&
- (pkt->state == PACKET_WAITING_STATE)) {
- atomic_inc(&pkt->run_sm);
- wake_up(&pd->wqueue);
- }
- spin_unlock(&pkt->lock);
- spin_unlock(&pd->cdrw.active_list_lock);
- return;
- } else {
- blocked_bio = 1;
- }
- spin_unlock(&pkt->lock);
- }
- }
- spin_unlock(&pd->cdrw.active_list_lock);
-
- /*
- * Test if there is enough room left in the bio work queue
- * (queue size >= congestion on mark).
- * If not, wait till the work queue size is below the congestion off mark.
- */
- spin_lock(&pd->lock);
- if (pd->write_congestion_on > 0
- && pd->bio_queue_size >= pd->write_congestion_on) {
- struct wait_bit_queue_entry wqe;
-
- init_wait_var_entry(&wqe, &pd->congested, 0);
- for (;;) {
- prepare_to_wait_event(__var_waitqueue(&pd->congested),
- &wqe.wq_entry,
- TASK_UNINTERRUPTIBLE);
- if (pd->bio_queue_size <= pd->write_congestion_off)
- break;
- pd->congested = true;
- spin_unlock(&pd->lock);
- schedule();
- spin_lock(&pd->lock);
- }
- }
- spin_unlock(&pd->lock);
-
- /*
- * No matching packet found. Store the bio in the work queue.
- */
- node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
- node->bio = bio;
- spin_lock(&pd->lock);
- BUG_ON(pd->bio_queue_size < 0);
- was_empty = (pd->bio_queue_size == 0);
- pkt_rbtree_insert(pd, node);
- spin_unlock(&pd->lock);
-
- /*
- * Wake up the worker thread.
- */
- atomic_set(&pd->scan_queue, 1);
- if (was_empty) {
- /* This wake_up is required for correct operation */
- wake_up(&pd->wqueue);
- } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
- /*
- * This wake up is not required for correct operation,
- * but improves performance in some cases.
- */
- wake_up(&pd->wqueue);
- }
-}
-
-static void pkt_submit_bio(struct bio *bio)
-{
- struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- struct bio *split;
-
- bio = bio_split_to_limits(bio);
- if (!bio)
- return;
-
- dev_dbg(ddev, "start = %6llx stop = %6llx\n",
- bio->bi_iter.bi_sector, bio_end_sector(bio));
-
- /*
- * Clone READ bios so we can have our own bi_end_io callback.
- */
- if (bio_data_dir(bio) == READ) {
- pkt_make_request_read(pd, bio);
- return;
- }
-
- if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
- dev_notice(ddev, "WRITE for ro device (%llu)\n", bio->bi_iter.bi_sector);
- goto end_io;
- }
-
- if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
- dev_err(ddev, "wrong bio size\n");
- goto end_io;
- }
-
- do {
- sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
- sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
-
- if (last_zone != zone) {
- BUG_ON(last_zone != zone + pd->settings.size);
-
- split = bio_split(bio, last_zone -
- bio->bi_iter.bi_sector,
- GFP_NOIO, &pkt_bio_set);
- bio_chain(split, bio);
- } else {
- split = bio;
- }
-
- pkt_make_request_write(split);
- } while (split != bio);
-
- return;
-end_io:
- bio_io_error(bio);
-}
-
-static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
-{
- struct device *ddev = disk_to_dev(pd->disk);
- int i;
- struct file *bdev_file;
- struct scsi_device *sdev;
-
- if (pd->pkt_dev == dev) {
- dev_err(ddev, "recursive setup not allowed\n");
- return -EBUSY;
- }
- for (i = 0; i < MAX_WRITERS; i++) {
- struct pktcdvd_device *pd2 = pkt_devs[i];
- if (!pd2)
- continue;
- if (file_bdev(pd2->bdev_file)->bd_dev == dev) {
- dev_err(ddev, "%pg already setup\n",
- file_bdev(pd2->bdev_file));
- return -EBUSY;
- }
- if (pd2->pkt_dev == dev) {
- dev_err(ddev, "can't chain pktcdvd devices\n");
- return -EBUSY;
- }
- }
-
- bdev_file = bdev_file_open_by_dev(dev, BLK_OPEN_READ | BLK_OPEN_NDELAY,
- NULL, NULL);
- if (IS_ERR(bdev_file))
- return PTR_ERR(bdev_file);
- sdev = scsi_device_from_queue(file_bdev(bdev_file)->bd_disk->queue);
- if (!sdev) {
- fput(bdev_file);
- return -EINVAL;
- }
- put_device(&sdev->sdev_gendev);
-
- /* This is safe, since we have a reference from open(). */
- __module_get(THIS_MODULE);
-
- pd->bdev_file = bdev_file;
-
- atomic_set(&pd->cdrw.pending_bios, 0);
- pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->disk->disk_name);
- if (IS_ERR(pd->cdrw.thread)) {
- dev_err(ddev, "can't start kernel thread\n");
- goto out_mem;
- }
-
- proc_create_single_data(pd->disk->disk_name, 0, pkt_proc, pkt_seq_show, pd);
- dev_notice(ddev, "writer mapped to %pg\n", file_bdev(bdev_file));
- return 0;
-
-out_mem:
- fput(bdev_file);
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
- return -ENOMEM;
-}
-
-static int pkt_ioctl(struct block_device *bdev, blk_mode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct pktcdvd_device *pd = bdev->bd_disk->private_data;
- struct device *ddev = disk_to_dev(pd->disk);
- int ret;
-
- dev_dbg(ddev, "cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
-
- mutex_lock(&pktcdvd_mutex);
- switch (cmd) {
- case CDROMEJECT:
- /*
- * The door gets locked when the device is opened, so we
- * have to unlock it or else the eject command fails.
- */
- if (pd->refcnt == 1)
- pkt_lock_door(pd, 0);
- fallthrough;
- /*
- * forward selected CDROM ioctls to CD-ROM, for UDF
- */
- case CDROMMULTISESSION:
- case CDROMREADTOCENTRY:
- case CDROM_LAST_WRITTEN:
- case CDROM_SEND_PACKET:
- case SCSI_IOCTL_SEND_COMMAND:
- if (!bdev->bd_disk->fops->ioctl)
- ret = -ENOTTY;
- else
- ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
- break;
- default:
- dev_dbg(ddev, "Unknown ioctl (%x)\n", cmd);
- ret = -ENOTTY;
- }
- mutex_unlock(&pktcdvd_mutex);
-
- return ret;
-}
-
-static unsigned int pkt_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct pktcdvd_device *pd = disk->private_data;
- struct gendisk *attached_disk;
-
- if (!pd)
- return 0;
- if (!pd->bdev_file)
- return 0;
- attached_disk = file_bdev(pd->bdev_file)->bd_disk;
- if (!attached_disk || !attached_disk->fops->check_events)
- return 0;
- return attached_disk->fops->check_events(attached_disk, clearing);
-}
-
-static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
-}
-
-static const struct block_device_operations pktcdvd_ops = {
- .owner = THIS_MODULE,
- .submit_bio = pkt_submit_bio,
- .open = pkt_open,
- .release = pkt_release,
- .ioctl = pkt_ioctl,
- .compat_ioctl = blkdev_compat_ptr_ioctl,
- .check_events = pkt_check_events,
- .devnode = pkt_devnode,
-};
-
-/*
- * Set up mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
-{
- struct queue_limits lim = {
- .max_hw_sectors = PACKET_MAX_SECTORS,
- .logical_block_size = CD_FRAMESIZE,
- .features = BLK_FEAT_ROTATIONAL,
- };
- int idx;
- int ret = -ENOMEM;
- struct pktcdvd_device *pd;
- struct gendisk *disk;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++)
- if (!pkt_devs[idx])
- break;
- if (idx == MAX_WRITERS) {
- pr_err("max %d writers supported\n", MAX_WRITERS);
- ret = -EBUSY;
- goto out_mutex;
- }
-
- pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
- if (!pd)
- goto out_mutex;
-
- ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
- sizeof(struct pkt_rb_node));
- if (ret)
- goto out_mem;
-
- INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
- INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
- spin_lock_init(&pd->cdrw.active_list_lock);
-
- spin_lock_init(&pd->lock);
- spin_lock_init(&pd->iosched.lock);
- bio_list_init(&pd->iosched.read_queue);
- bio_list_init(&pd->iosched.write_queue);
- init_waitqueue_head(&pd->wqueue);
- pd->bio_queue = RB_ROOT;
-
- pd->write_congestion_on = write_congestion_on;
- pd->write_congestion_off = write_congestion_off;
-
- disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_mem;
- }
- pd->disk = disk;
- disk->major = pktdev_major;
- disk->first_minor = idx;
- disk->minors = 1;
- disk->fops = &pktcdvd_ops;
- disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
- snprintf(disk->disk_name, sizeof(disk->disk_name), DRIVER_NAME"%d", idx);
- disk->private_data = pd;
-
- pd->pkt_dev = MKDEV(pktdev_major, idx);
- ret = pkt_new_dev(pd, dev);
- if (ret)
- goto out_mem2;
-
- /* inherit events of the host device */
- disk->events = file_bdev(pd->bdev_file)->bd_disk->events;
-
- ret = add_disk(disk);
- if (ret)
- goto out_mem2;
-
- pkt_sysfs_dev_new(pd);
- pkt_debugfs_dev_new(pd);
-
- pkt_devs[idx] = pd;
- if (pkt_dev)
- *pkt_dev = pd->pkt_dev;
-
- mutex_unlock(&ctl_mutex);
- return 0;
-
-out_mem2:
- put_disk(disk);
-out_mem:
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-out_mutex:
- mutex_unlock(&ctl_mutex);
- pr_err("setup of pktcdvd device failed\n");
- return ret;
-}
-
-/*
- * Tear down mapping from pktcdvd device to CD-ROM device.
- */
-static int pkt_remove_dev(dev_t pkt_dev)
-{
- struct pktcdvd_device *pd;
- struct device *ddev;
- int idx;
- int ret = 0;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- for (idx = 0; idx < MAX_WRITERS; idx++) {
- pd = pkt_devs[idx];
- if (pd && (pd->pkt_dev == pkt_dev))
- break;
- }
- if (idx == MAX_WRITERS) {
- pr_debug("dev not setup\n");
- ret = -ENXIO;
- goto out;
- }
-
- if (pd->refcnt > 0) {
- ret = -EBUSY;
- goto out;
- }
-
- ddev = disk_to_dev(pd->disk);
-
- if (!IS_ERR(pd->cdrw.thread))
- kthread_stop(pd->cdrw.thread);
-
- pkt_devs[idx] = NULL;
-
- pkt_debugfs_dev_remove(pd);
- pkt_sysfs_dev_remove(pd);
-
- fput(pd->bdev_file);
-
- remove_proc_entry(pd->disk->disk_name, pkt_proc);
- dev_notice(ddev, "writer unmapped\n");
-
- del_gendisk(pd->disk);
- put_disk(pd->disk);
-
- mempool_exit(&pd->rb_pool);
- kfree(pd);
-
- /* This is safe: open() is still holding a reference. */
- module_put(THIS_MODULE);
-
-out:
- mutex_unlock(&ctl_mutex);
- return ret;
-}
-
-static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
-{
- struct pktcdvd_device *pd;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
- if (pd) {
- ctrl_cmd->dev = new_encode_dev(file_bdev(pd->bdev_file)->bd_dev);
- ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
- } else {
- ctrl_cmd->dev = 0;
- ctrl_cmd->pkt_dev = 0;
- }
- ctrl_cmd->num_devices = MAX_WRITERS;
-
- mutex_unlock(&ctl_mutex);
-}
-
-static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- struct pkt_ctrl_command ctrl_cmd;
- int ret = 0;
- dev_t pkt_dev = 0;
-
- if (cmd != PACKET_CTRL_CMD)
- return -ENOTTY;
-
- if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
-
- switch (ctrl_cmd.command) {
- case PKT_CTRL_CMD_SETUP:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
- ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
- break;
- case PKT_CTRL_CMD_TEARDOWN:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
- break;
- case PKT_CTRL_CMD_STATUS:
- pkt_get_status(&ctrl_cmd);
- break;
- default:
- return -ENOTTY;
- }
-
- if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
- return -EFAULT;
- return ret;
-}
-
-#ifdef CONFIG_COMPAT
-static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations pkt_ctl_fops = {
- .open = nonseekable_open,
- .unlocked_ioctl = pkt_ctl_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = pkt_ctl_compat_ioctl,
-#endif
- .owner = THIS_MODULE,
-};
-
-static struct miscdevice pkt_misc = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = DRIVER_NAME,
- .nodename = "pktcdvd/control",
- .fops = &pkt_ctl_fops
-};
-
-static int __init pkt_init(void)
-{
- int ret;
-
- mutex_init(&ctl_mutex);
-
- ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
- sizeof(struct packet_stacked_data));
- if (ret)
- return ret;
- ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
- if (ret) {
- mempool_exit(&psd_pool);
- return ret;
- }
-
- ret = register_blkdev(pktdev_major, DRIVER_NAME);
- if (ret < 0) {
- pr_err("unable to register block device\n");
- goto out2;
- }
- if (!pktdev_major)
- pktdev_major = ret;
-
- ret = pkt_sysfs_init();
- if (ret)
- goto out;
-
- pkt_debugfs_init();
-
- ret = misc_register(&pkt_misc);
- if (ret) {
- pr_err("unable to register misc device\n");
- goto out_misc;
- }
-
- pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
-
- return 0;
-
-out_misc:
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-out:
- unregister_blkdev(pktdev_major, DRIVER_NAME);
-out2:
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
- return ret;
-}
-
-static void __exit pkt_exit(void)
-{
- remove_proc_entry("driver/"DRIVER_NAME, NULL);
- misc_deregister(&pkt_misc);
-
- pkt_debugfs_cleanup();
- pkt_sysfs_cleanup();
-
- unregister_blkdev(pktdev_major, DRIVER_NAME);
- mempool_exit(&psd_pool);
- bioset_exit(&pkt_bio_set);
-}
-
-MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
-MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
-MODULE_LICENSE("GPL");
-
-module_init(pkt_init);
-module_exit(pkt_exit);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index b5727dea15bd..7af21fe67671 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -957,8 +957,10 @@ static bool vdc_port_mpgroup_check(struct vio_dev *vdev)
dev = device_find_child(vdev->dev.parent, &port_data,
vdc_device_probed);
- if (dev)
+ if (dev) {
+ put_device(dev);
return true;
+ }
return false;
}
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 9fd284fa76dc..6561d2a561fa 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -48,6 +48,8 @@
#define UBLK_MINORS (1U << MINORBITS)
+#define UBLK_INVALID_BUF_IDX ((u16)-1)
+
/* private ioctl command mirror */
#define UBLK_CMD_DEL_DEV_ASYNC _IOC_NR(UBLK_U_CMD_DEL_DEV_ASYNC)
#define UBLK_CMD_UPDATE_SIZE _IOC_NR(UBLK_U_CMD_UPDATE_SIZE)
@@ -70,7 +72,8 @@
| UBLK_F_UPDATE_SIZE \
| UBLK_F_AUTO_BUF_REG \
| UBLK_F_QUIESCE \
- | UBLK_F_PER_IO_DAEMON)
+ | UBLK_F_PER_IO_DAEMON \
+ | UBLK_F_BUF_REG_OFF_DAEMON)
#define UBLK_F_ALL_RECOVERY_FLAGS (UBLK_F_USER_RECOVERY \
| UBLK_F_USER_RECOVERY_REISSUE \
@@ -82,14 +85,6 @@
UBLK_PARAM_TYPE_DEVT | UBLK_PARAM_TYPE_ZONED | \
UBLK_PARAM_TYPE_DMA_ALIGN | UBLK_PARAM_TYPE_SEGMENT)
-struct ublk_rq_data {
- refcount_t ref;
-
- /* for auto-unregister buffer in case of UBLK_F_AUTO_BUF_REG */
- u16 buf_index;
- void *buf_ctx_handle;
-};
-
struct ublk_uring_cmd_pdu {
/*
* Store requests in same batch temporarily for queuing them to
@@ -110,8 +105,6 @@ struct ublk_uring_cmd_pdu {
*/
struct ublk_queue *ubq;
- struct ublk_auto_buf_reg buf;
-
u16 tag;
};
@@ -155,9 +148,19 @@ struct ublk_uring_cmd_pdu {
/* atomic RW with ubq->cancel_lock */
#define UBLK_IO_FLAG_CANCELED 0x80000000
+/*
+ * Initialize refcount to a large number to include any registered buffers.
+ * UBLK_IO_COMMIT_AND_FETCH_REQ will release these references minus those for
+ * any buffers registered on the io daemon task.
+ */
+#define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
+
struct ublk_io {
/* userspace buffer address from io cmd */
- __u64 addr;
+ union {
+ __u64 addr;
+ struct ublk_auto_buf_reg buf;
+ };
unsigned int flags;
int res;
@@ -169,7 +172,24 @@ struct ublk_io {
};
struct task_struct *task;
-};
+
+ /*
+ * The number of uses of this I/O by the ublk server
+ * if user copy or zero copy are enabled:
+ * - UBLK_REFCOUNT_INIT from dispatch to the server
+ * until UBLK_IO_COMMIT_AND_FETCH_REQ
+ * - 1 for each inflight ublk_ch_{read,write}_iter() call
+ * - 1 for each io_uring registered buffer not registered on task
+ * The I/O can only be completed once all references are dropped.
+ * User copy and buffer registration operations are only permitted
+ * if the reference count is nonzero.
+ */
+ refcount_t ref;
+ /* Count of buffers registered on task and not yet unregistered */
+ unsigned task_registered_buffers;
+
+ void *buf_ctx_handle;
+} ____cacheline_aligned_in_smp;
struct ublk_queue {
int q_id;
@@ -216,6 +236,9 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
unsigned int nr_privileged_daemon;
+ struct mutex cancel_mutex;
+ bool canceling;
+ pid_t ublksrv_tgid;
};
/* header of ublk_params */
@@ -228,7 +251,8 @@ static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, int tag, size_t offset);
+ const struct ublk_queue *ubq, struct ublk_io *io,
+ size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *
@@ -673,38 +697,29 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
}
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+ struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- refcount_set(&data->ref, 1);
- }
+ if (ublk_need_req_ref(ubq))
+ refcount_set(&io->ref, UBLK_REFCOUNT_INIT);
}
-static inline bool ublk_get_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_get_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- return refcount_inc_not_zero(&data->ref);
- }
+ return refcount_inc_not_zero(&io->ref);
+}
- return true;
+static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
+{
+ if (refcount_dec_and_test(&io->ref))
+ __ublk_complete_rq(req);
}
-static inline void ublk_put_req_ref(const struct ublk_queue *ubq,
- struct request *req)
+static inline bool ublk_sub_req_ref(struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq)) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ unsigned sub_refs = UBLK_REFCOUNT_INIT - io->task_registered_buffers;
- if (refcount_dec_and_test(&data->ref))
- __ublk_complete_rq(req);
- } else {
- __ublk_complete_rq(req);
- }
+ io->task_registered_buffers = 0;
+ return refcount_sub_and_test(sub_refs, &io->ref);
}
static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
@@ -981,7 +996,7 @@ static inline bool ublk_need_unmap_req(const struct request *req)
}
static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
- struct ublk_io *io)
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
@@ -1005,7 +1020,7 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
static int ublk_unmap_io(const struct ublk_queue *ubq,
const struct request *req,
- struct ublk_io *io)
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
@@ -1140,7 +1155,7 @@ static inline void __ublk_complete_rq(struct request *req)
if (blk_update_request(req, BLK_STS_OK, io->res))
blk_mq_requeue_request(req, true);
- else
+ else if (likely(!blk_should_fake_timeout(req->q)))
__blk_mq_end_request(req, BLK_STS_OK);
return;
@@ -1188,39 +1203,33 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
blk_mq_end_request(rq, BLK_STS_IOERR);
}
-static void ublk_auto_buf_reg_fallback(struct request *req)
+static void
+ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io)
{
- const struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
+ unsigned tag = io - ubq->ios;
+ struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
- refcount_set(&data->ref, 1);
}
-static bool ublk_auto_buf_reg(struct request *req, struct ublk_io *io,
- unsigned int issue_flags)
+static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, unsigned int issue_flags)
{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(io->cmd);
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
int ret;
ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
- pdu->buf.index, issue_flags);
+ io->buf.index, issue_flags);
if (ret) {
- if (pdu->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
- ublk_auto_buf_reg_fallback(req);
+ if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(ubq, io);
return true;
}
blk_mq_end_request(req, BLK_STS_IOERR);
return false;
}
- /* one extra reference is dropped by ublk_io_release */
- refcount_set(&data->ref, 2);
- data->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
- /* store buffer index in request payload */
- data->buf_index = pdu->buf.index;
+ io->task_registered_buffers = 1;
+ io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
return true;
}
@@ -1229,10 +1238,10 @@ static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
struct request *req, struct ublk_io *io,
unsigned int issue_flags)
{
+ ublk_init_req_ref(ubq, io);
if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
- return ublk_auto_buf_reg(req, io, issue_flags);
+ return ublk_auto_buf_reg(ubq, req, io, issue_flags);
- ublk_init_req_ref(ubq, req);
return true;
}
@@ -1356,14 +1365,23 @@ static void ublk_queue_cmd_list(struct ublk_io *io, struct rq_list *l)
static enum blk_eh_timer_return ublk_timeout(struct request *rq)
{
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[rq->tag];
+ pid_t tgid = ubq->dev->ublksrv_tgid;
+ struct task_struct *p;
+ struct pid *pid;
- if (ubq->flags & UBLK_F_UNPRIVILEGED_DEV) {
- send_sig(SIGKILL, io->task, 0);
- return BLK_EH_DONE;
- }
+ if (!(ubq->flags & UBLK_F_UNPRIVILEGED_DEV))
+ return BLK_EH_RESET_TIMER;
- return BLK_EH_RESET_TIMER;
+ if (unlikely(!tgid))
+ return BLK_EH_RESET_TIMER;
+
+ rcu_read_lock();
+ pid = find_vpid(tgid);
+ p = pid_task(pid, PIDTYPE_PID);
+ if (p)
+ send_sig(SIGKILL, p, 0);
+ rcu_read_unlock();
+ return BLK_EH_DONE;
}
static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
@@ -1504,6 +1522,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
put_task_struct(io->task);
io->task = NULL;
}
+
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
}
}
@@ -1515,6 +1536,7 @@ static int ublk_ch_open(struct inode *inode, struct file *filp)
if (test_and_set_bit(UB_STATE_OPEN, &ub->state))
return -EBUSY;
filp->private_data = ub;
+ ub->ublksrv_tgid = current->tgid;
return 0;
}
@@ -1529,6 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
ub->mm = NULL;
ub->nr_queues_ready = 0;
ub->nr_privileged_daemon = 0;
+ ub->ublksrv_tgid = -1;
}
static struct gendisk *ublk_get_disk(struct ublk_device *ub)
@@ -1550,6 +1573,27 @@ static void ublk_put_disk(struct gendisk *disk)
put_device(disk_to_dev(disk));
}
+/*
+ * Use this function to ensure that ->canceling is consistently set for
+ * the device and all queues. Do not set these flags directly.
+ *
+ * Caller must ensure that:
+ * - cancel_mutex is held. This ensures that there is no concurrent
+ * access to ub->canceling and no concurrent writes to ubq->canceling.
+ * - there are no concurrent reads of ubq->canceling from the queue_rq
+ * path. This can be done by quiescing the queue, or through other
+ * means.
+ */
+static void ublk_set_canceling(struct ublk_device *ub, bool canceling)
+ __must_hold(&ub->cancel_mutex)
+{
+ int i;
+
+ ub->canceling = canceling;
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_get_queue(ub, i)->canceling = canceling;
+}
+
static int ublk_ch_release(struct inode *inode, struct file *filp)
{
struct ublk_device *ub = filp->private_data;
@@ -1578,12 +1622,11 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
* All requests may be inflight, so ->canceling may not be set, set
* it now.
*/
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- struct ublk_queue *ubq = ublk_get_queue(ub, i);
-
- ubq->canceling = true;
- ublk_abort_queue(ub, ubq);
- }
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, true);
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
+ ublk_abort_queue(ub, ublk_get_queue(ub, i));
+ mutex_unlock(&ub->cancel_mutex);
blk_mq_kick_requeue_list(disk->queue);
/*
@@ -1706,23 +1749,17 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
-/* Must be called when queue is frozen */
-static void ublk_mark_queue_canceling(struct ublk_queue *ubq)
+static void ublk_start_cancel(struct ublk_device *ub)
{
- spin_lock(&ubq->cancel_lock);
- if (!ubq->canceling)
- ubq->canceling = true;
- spin_unlock(&ubq->cancel_lock);
-}
-
-static void ublk_start_cancel(struct ublk_queue *ubq)
-{
- struct ublk_device *ub = ubq->dev;
struct gendisk *disk = ublk_get_disk(ub);
/* Our disk has been dead */
if (!disk)
return;
+
+ mutex_lock(&ub->cancel_mutex);
+ if (ub->canceling)
+ goto out;
/*
* Now we are serialized with ublk_queue_rq()
*
@@ -1731,8 +1768,10 @@ static void ublk_start_cancel(struct ublk_queue *ubq)
* touch completed uring_cmd
*/
blk_mq_quiesce_queue(disk->queue);
- ublk_mark_queue_canceling(ubq);
+ ublk_set_canceling(ub, true);
blk_mq_unquiesce_queue(disk->queue);
+out:
+ mutex_unlock(&ub->cancel_mutex);
ublk_put_disk(disk);
}
@@ -1805,8 +1844,7 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
if (WARN_ON_ONCE(task && task != io->task))
return;
- if (!ubq->canceling)
- ublk_start_cancel(ubq);
+ ublk_start_cancel(ubq->dev);
WARN_ON_ONCE(io->cmd != cmd);
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
@@ -1930,9 +1968,11 @@ static void ublk_reset_io_flags(struct ublk_device *ub)
for (j = 0; j < ubq->q_depth; j++)
ubq->ios[j].flags &= ~UBLK_IO_FLAG_CANCELED;
spin_unlock(&ubq->cancel_lock);
- ubq->canceling = false;
ubq->fail_io = false;
}
+ mutex_lock(&ub->cancel_mutex);
+ ublk_set_canceling(ub, false);
+ mutex_unlock(&ub->cancel_mutex);
}
/* device can only be started after all IOs are ready */
@@ -1967,12 +2007,66 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
return 0;
}
-static inline void ublk_fill_io_cmd(struct ublk_io *io,
- struct io_uring_cmd *cmd, unsigned long buf_addr)
+static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd)
+{
+ io->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+
+ if (io->buf.reserved0 || io->buf.reserved1)
+ return -EINVAL;
+
+ if (io->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static int ublk_handle_auto_buf_reg(struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ u16 *buf_idx)
{
+ if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
+ io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
+
+ /*
+ * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
+ * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
+ * `io_ring_ctx`.
+ *
+ * If this uring_cmd's io_ring_ctx isn't same with the
+ * one for registering the buffer, it is ublk server's
+ * responsibility for unregistering the buffer, otherwise
+ * this ublk request gets stuck.
+ */
+ if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
+ *buf_idx = io->buf.index;
+ }
+
+ return ublk_set_auto_buf_reg(io, cmd);
+}
+
+/* Once we return, `io->req` can't be used any more */
+static inline struct request *
+ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
+{
+ struct request *req = io->req;
+
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
+ /* now this cmd slot is owned by ublk driver */
+ io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+
+ return req;
+}
+
+static inline int
+ublk_config_io_buf(const struct ublk_queue *ubq, struct ublk_io *io,
+ struct io_uring_cmd *cmd, unsigned long buf_addr,
+ u16 *buf_idx)
+{
+ if (ublk_support_auto_buf_reg(ubq))
+ return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
+
io->addr = buf_addr;
+ return 0;
}
static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
@@ -1990,30 +2084,25 @@ static inline void ublk_prep_cancel(struct io_uring_cmd *cmd,
io_uring_cmd_mark_cancelable(cmd, issue_flags);
}
-static inline int ublk_set_auto_buf_reg(struct io_uring_cmd *cmd)
-{
- struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
-
- pdu->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
-
- if (pdu->buf.reserved0 || pdu->buf.reserved1)
- return -EINVAL;
-
- if (pdu->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
- return -EINVAL;
- return 0;
-}
-
static void ublk_io_release(void *priv)
{
struct request *rq = priv;
struct ublk_queue *ubq = rq->mq_hctx->driver_data;
+ struct ublk_io *io = &ubq->ios[rq->tag];
- ublk_put_req_ref(ubq, rq);
+ /*
+ * task_registered_buffers may be 0 if buffers were registered off task
+ * but unregistered on task. Or after UBLK_IO_COMMIT_AND_FETCH_REQ.
+ */
+ if (current == io->task && io->task_registered_buffers)
+ io->task_registered_buffers--;
+ else
+ ublk_put_req_ref(io, rq);
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, unsigned int tag,
+ const struct ublk_queue *ubq,
+ struct ublk_io *io,
unsigned int index, unsigned int issue_flags)
{
struct ublk_device *ub = cmd->file->private_data;
@@ -2023,30 +2112,75 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
if (!ublk_support_zero_copy(ubq))
return -EINVAL;
- req = __ublk_check_and_get_req(ub, ubq, tag, 0);
+ req = __ublk_check_and_get_req(ub, ubq, io, 0);
if (!req)
return -EINVAL;
ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
issue_flags);
if (ret) {
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
return 0;
}
+static int
+ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
+ const struct ublk_queue *ubq, struct ublk_io *io,
+ unsigned index, unsigned issue_flags)
+{
+ unsigned new_registered_buffers;
+ struct request *req = io->req;
+ int ret;
+
+ /*
+ * Ensure there are still references for ublk_sub_req_ref() to release.
+ * If not, fall back on the thread-safe buffer registration.
+ */
+ new_registered_buffers = io->task_registered_buffers + 1;
+ if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
+ return ublk_register_io_buf(cmd, ubq, io, index, issue_flags);
+
+ if (!ublk_support_zero_copy(ubq) || !ublk_rq_has_data(req))
+ return -EINVAL;
+
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
+ issue_flags);
+ if (ret)
+ return ret;
+
+ io->task_registered_buffers = new_registered_buffers;
+ return 0;
+}
+
static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq,
+ const struct ublk_device *ub,
unsigned int index, unsigned int issue_flags)
{
- if (!ublk_support_zero_copy(ubq))
+ if (!(ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY))
return -EINVAL;
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
+static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
+{
+ if (ublk_need_map_io(ubq)) {
+ /*
+ * FETCH_RQ has to provide IO buffer if NEED GET
+ * DATA is not enabled
+ */
+ if (!buf_addr && !ublk_need_get_data(ubq))
+ return -EINVAL;
+ } else if (buf_addr) {
+ /* User copy requires addr to be unset */
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
struct ublk_io *io, __u64 buf_addr)
{
@@ -2073,26 +2207,11 @@ static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
- if (ublk_need_map_io(ubq)) {
- /*
- * FETCH_RQ has to provide IO buffer if NEED GET
- * DATA is not enabled
- */
- if (!buf_addr && !ublk_need_get_data(ubq))
- goto out;
- } else if (buf_addr) {
- /* User copy requires addr to be unset */
- ret = -EINVAL;
+ ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ubq, io, cmd, buf_addr, NULL);
+ if (ret)
goto out;
- }
- if (ublk_support_auto_buf_reg(ubq)) {
- ret = ublk_set_auto_buf_reg(cmd);
- if (ret)
- goto out;
- }
-
- ublk_fill_io_cmd(io, cmd, buf_addr);
WRITE_ONCE(io->task, get_task_struct(current));
ublk_mark_io_ready(ub, ubq);
out:
@@ -2100,10 +2219,8 @@ out:
return ret;
}
-static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
- struct ublk_io *io, struct io_uring_cmd *cmd,
- const struct ublksrv_io_cmd *ub_cmd,
- unsigned int issue_flags)
+static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
+ struct ublk_io *io, __u64 buf_addr)
{
struct request *req = io->req;
@@ -2112,10 +2229,10 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
- if (!ub_cmd->addr && (!ublk_need_get_data(ubq) ||
+ if (!buf_addr && (!ublk_need_get_data(ubq) ||
req_op(req) == REQ_OP_READ))
return -EINVAL;
- } else if (req_op(req) != REQ_OP_ZONE_APPEND && ub_cmd->addr) {
+ } else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
/*
* User copy requires addr to be unset when command is
* not zone append
@@ -2123,48 +2240,17 @@ static int ublk_commit_and_fetch(const struct ublk_queue *ubq,
return -EINVAL;
}
- if (ublk_support_auto_buf_reg(ubq)) {
- int ret;
-
- /*
- * `UBLK_F_AUTO_BUF_REG` only works iff `UBLK_IO_FETCH_REQ`
- * and `UBLK_IO_COMMIT_AND_FETCH_REQ` are issued from same
- * `io_ring_ctx`.
- *
- * If this uring_cmd's io_ring_ctx isn't same with the
- * one for registering the buffer, it is ublk server's
- * responsibility for unregistering the buffer, otherwise
- * this ublk request gets stuck.
- */
- if (io->flags & UBLK_IO_FLAG_AUTO_BUF_REG) {
- struct ublk_rq_data *data = blk_mq_rq_to_pdu(req);
-
- if (data->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
- io_buffer_unregister_bvec(cmd, data->buf_index,
- issue_flags);
- io->flags &= ~UBLK_IO_FLAG_AUTO_BUF_REG;
- }
-
- ret = ublk_set_auto_buf_reg(cmd);
- if (ret)
- return ret;
- }
-
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
-
- /* now this cmd slot is owned by ublk driver */
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
- io->res = ub_cmd->result;
-
- if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
-
- if (likely(!blk_should_fake_timeout(req->q)))
- ublk_put_req_ref(ubq, req);
-
return 0;
}
+static bool ublk_need_complete_req(const struct ublk_queue *ubq,
+ struct ublk_io *io)
+{
+ if (ublk_need_req_ref(ubq))
+ return ublk_sub_req_ref(io);
+ return true;
+}
+
static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
struct request *req)
{
@@ -2187,19 +2273,33 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags,
const struct ublksrv_io_cmd *ub_cmd)
{
+ u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
- struct task_struct *task;
struct ublk_queue *ubq;
struct ublk_io *io;
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
- int ret = -EINVAL;
struct request *req;
+ int ret;
+ bool compl;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
ub_cmd->result);
+ ret = ublk_check_cmd_op(cmd_op);
+ if (ret)
+ goto out;
+
+ /*
+ * io_buffer_unregister_bvec() doesn't access the ubq or io,
+ * so no need to validate the q_id, tag, or task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
+ return ublk_unregister_io_buf(cmd, ub, ub_cmd->addr,
+ issue_flags);
+
+ ret = -EINVAL;
if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
goto out;
@@ -2209,21 +2309,37 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
goto out;
io = &ubq->ios[tag];
- task = READ_ONCE(io->task);
- if (task && task != current)
+ /* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
+ if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
+ ret = ublk_check_fetch_buf(ubq, ub_cmd->addr);
+ if (ret)
+ goto out;
+ ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ if (ret)
+ goto out;
+
+ ublk_prep_cancel(cmd, issue_flags, ubq, tag);
+ return -EIOCBQUEUED;
+ }
+
+ if (READ_ONCE(io->task) != current) {
+ /*
+ * ublk_register_io_buf() accesses only the io's refcount,
+ * so can be handled on any task
+ */
+ if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
+ return ublk_register_io_buf(cmd, ubq, io, ub_cmd->addr,
+ issue_flags);
+
goto out;
+ }
/* there is pending io cmd, something must be wrong */
- if (io->flags & UBLK_IO_FLAG_ACTIVE) {
+ if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)) {
ret = -EBUSY;
goto out;
}
- /* only UBLK_IO_FETCH_REQ is allowed if io is not OWNED_BY_SRV */
- if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) &&
- _IOC_NR(cmd_op) != UBLK_IO_FETCH_REQ)
- goto out;
-
/*
* ensure that the user issues UBLK_IO_NEED_GET_DATA
* iff the driver have set the UBLK_IO_FLAG_NEED_GET_DATA.
@@ -2232,23 +2348,27 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
^ (_IOC_NR(cmd_op) == UBLK_IO_NEED_GET_DATA))
goto out;
- ret = ublk_check_cmd_op(cmd_op);
- if (ret)
- goto out;
-
- ret = -EINVAL;
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_REGISTER_IO_BUF:
- return ublk_register_io_buf(cmd, ubq, tag, ub_cmd->addr, issue_flags);
- case UBLK_IO_UNREGISTER_IO_BUF:
- return ublk_unregister_io_buf(cmd, ubq, ub_cmd->addr, issue_flags);
- case UBLK_IO_FETCH_REQ:
- ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ return ublk_daemon_register_io_buf(cmd, ubq, io, ub_cmd->addr,
+ issue_flags);
+ case UBLK_IO_COMMIT_AND_FETCH_REQ:
+ ret = ublk_check_commit_and_fetch(ubq, io, ub_cmd->addr);
if (ret)
goto out;
- break;
- case UBLK_IO_COMMIT_AND_FETCH_REQ:
- ret = ublk_commit_and_fetch(ubq, io, cmd, ub_cmd, issue_flags);
+ io->res = ub_cmd->result;
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, &buf_idx);
+ compl = ublk_need_complete_req(ubq, io);
+
+ /* can't touch 'ublk_io' any more */
+ if (buf_idx != UBLK_INVALID_BUF_IDX)
+ io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ req->__sector = ub_cmd->zone_append_lba;
+ if (compl)
+ __ublk_complete_rq(req);
+
if (ret)
goto out;
break;
@@ -2258,9 +2378,9 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* uring_cmd active first and prepare for handling new requeued
* request
*/
- req = io->req;
- ublk_fill_io_cmd(io, cmd, ub_cmd->addr);
- io->flags &= ~UBLK_IO_FLAG_OWNED_BY_SRV;
+ req = ublk_fill_io_cmd(io, cmd);
+ ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, NULL);
+ WARN_ON_ONCE(ret);
if (likely(ublk_get_data(ubq, io, req))) {
__ublk_prep_compl_io_cmd(io, req);
return UBLK_IO_RES_OK;
@@ -2279,15 +2399,20 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, int tag, size_t offset)
+ const struct ublk_queue *ubq, struct ublk_io *io, size_t offset)
{
+ unsigned tag = io - ubq->ios;
struct request *req;
+ /*
+ * can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
+ * which would overwrite it with io->cmd
+ */
req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
if (!req)
return NULL;
- if (!ublk_get_req_ref(ubq, req))
+ if (!ublk_get_req_ref(io))
return NULL;
if (unlikely(!blk_mq_request_started(req) || req->tag != tag))
@@ -2301,7 +2426,7 @@ static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
return req;
fail_put:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return NULL;
}
@@ -2368,7 +2493,8 @@ static inline bool ublk_check_ubuf_dir(const struct request *req,
}
static struct request *ublk_check_and_get_req(struct kiocb *iocb,
- struct iov_iter *iter, size_t *off, int dir)
+ struct iov_iter *iter, size_t *off, int dir,
+ struct ublk_io **io)
{
struct ublk_device *ub = iocb->ki_filp->private_data;
struct ublk_queue *ubq;
@@ -2402,7 +2528,8 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
if (tag >= ubq->q_depth)
return ERR_PTR(-EINVAL);
- req = __ublk_check_and_get_req(ub, ubq, tag, buf_off);
+ *io = &ubq->ios[tag];
+ req = __ublk_check_and_get_req(ub, ubq, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
@@ -2415,42 +2542,40 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
*off = buf_off;
return req;
fail:
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(*io, req);
return ERR_PTR(-EACCES);
}
static ssize_t ublk_ch_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST);
+ req = ublk_check_and_get_req(iocb, to, &buf_off, ITER_DEST, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, to, ITER_DEST);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
static ssize_t ublk_ch_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
- struct ublk_queue *ubq;
struct request *req;
+ struct ublk_io *io;
size_t buf_off;
size_t ret;
- req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE);
+ req = ublk_check_and_get_req(iocb, from, &buf_off, ITER_SOURCE, &io);
if (IS_ERR(req))
return PTR_ERR(req);
ret = ublk_copy_user_pages(req, buf_off, from, ITER_SOURCE);
- ubq = req->mq_hctx->driver_data;
- ublk_put_req_ref(ubq, req);
+ ublk_put_req_ref(io, req);
return ret;
}
@@ -2475,6 +2600,8 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
struct ublk_io *io = &ubq->ios[i];
if (io->task)
put_task_struct(io->task);
+ WARN_ON_ONCE(refcount_read(&io->ref));
+ WARN_ON_ONCE(io->task_registered_buffers);
}
if (ubq->io_cmd_buf)
@@ -2513,7 +2640,7 @@ static void ublk_deinit_queues(struct ublk_device *ub)
for (i = 0; i < nr_queues; i++)
ublk_deinit_queue(ub, i);
- kfree(ub->__queues);
+ kvfree(ub->__queues);
}
static int ublk_init_queues(struct ublk_device *ub)
@@ -2524,7 +2651,7 @@ static int ublk_init_queues(struct ublk_device *ub)
int i, ret = -ENOMEM;
ub->queue_size = ubq_size;
- ub->__queues = kcalloc(nr_queues, ubq_size, GFP_KERNEL);
+ ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
if (!ub->__queues)
return ret;
@@ -2580,6 +2707,7 @@ static void ublk_cdev_rel(struct device *dev)
ublk_deinit_queues(ub);
ublk_free_dev_number(ub);
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
}
@@ -2627,7 +2755,6 @@ static int ublk_add_tag_set(struct ublk_device *ub)
ub->tag_set.nr_hw_queues = ub->dev_info.nr_hw_queues;
ub->tag_set.queue_depth = ub->dev_info.queue_depth;
ub->tag_set.numa_node = NUMA_NO_NODE;
- ub->tag_set.cmd_size = sizeof(struct ublk_rq_data);
ub->tag_set.driver_data = ub;
return blk_mq_alloc_tag_set(&ub->tag_set);
}
@@ -2729,6 +2856,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
if (wait_for_completion_interruptible(&ub->completion) != 0)
return -EINTR;
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE ||
test_bit(UB_STATE_USED, &ub->state)) {
@@ -2933,6 +3063,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
goto out_unlock;
mutex_init(&ub->mutex);
spin_lock_init(&ub->lock);
+ mutex_init(&ub->cancel_mutex);
ret = ublk_alloc_dev_number(ub, header->dev_id);
if (ret < 0)
@@ -2953,7 +3084,8 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.flags |= UBLK_F_CMD_IOCTL_ENCODE |
UBLK_F_URING_CMD_COMP_IN_TASK |
- UBLK_F_PER_IO_DAEMON;
+ UBLK_F_PER_IO_DAEMON |
+ UBLK_F_BUF_REG_OFF_DAEMON;
/* GET_DATA isn't needed any more with USER_COPY or ZERO COPY */
if (ub->dev_info.flags & (UBLK_F_USER_COPY | UBLK_F_SUPPORT_ZERO_COPY |
@@ -3003,6 +3135,7 @@ out_free_dev_number:
ublk_free_dev_number(ub);
out_free_ub:
mutex_destroy(&ub->mutex);
+ mutex_destroy(&ub->cancel_mutex);
kfree(ub);
out_unlock:
mutex_unlock(&ublk_ctl_mutex);
@@ -3227,6 +3360,9 @@ static int ublk_ctrl_end_recovery(struct ublk_device *ub,
pr_devel("%s: All FETCH_REQs received, dev id %d\n", __func__,
header->dev_id);
+ if (ub->ublksrv_tgid != ublksrv_pid)
+ return -EINVAL;
+
mutex_lock(&ub->mutex);
if (ublk_nosrv_should_stop_dev(ub))
goto out_unlock;
@@ -3340,7 +3476,7 @@ static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
/* zero means wait forever */
u64 timeout_ms = header->data[0];
struct gendisk *disk;
- int i, ret = -ENODEV;
+ int ret = -ENODEV;
if (!(ub->dev_info.flags & UBLK_F_QUIESCE))
return -EOPNOTSUPP;
@@ -3357,14 +3493,12 @@ static int ublk_ctrl_quiesce_dev(struct ublk_device *ub,
if (ub->dev_info.state != UBLK_S_DEV_LIVE)
goto put_disk;
- /* Mark all queues as canceling */
+ /* Mark the device as canceling */
+ mutex_lock(&ub->cancel_mutex);
blk_mq_quiesce_queue(disk->queue);
- for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
- struct ublk_queue *ubq = ublk_get_queue(ub, i);
-
- ubq->canceling = true;
- }
+ ublk_set_canceling(ub, true);
blk_mq_unquiesce_queue(disk->queue);
+ mutex_unlock(&ub->cancel_mutex);
if (!timeout_ms)
timeout_ms = UINT_MAX;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 30bca8cb7106..e649fa67bac1 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -976,9 +976,8 @@ static int init_vq(struct virtio_blk *vblk)
return -EINVAL;
}
- num_vqs = min_t(unsigned int,
- min_not_zero(num_request_queues, nr_cpu_ids),
- num_vqs);
+ num_vqs = blk_mq_num_possible_queues(
+ min_not_zero(num_request_queues, num_vqs));
num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index d26a58c67e95..b1bd1daa0060 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/cpuhotplug.h>
#include <linux/vmalloc.h>
+#include <linux/sysfs.h>
#include "zcomp.h"
@@ -89,23 +90,21 @@ bool zcomp_available_algorithm(const char *comp)
}
/* show available compressors */
-ssize_t zcomp_available_show(const char *comp, char *buf)
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at)
{
- ssize_t sz = 0;
int i;
for (i = 0; i < ARRAY_SIZE(backends) - 1; i++) {
if (!strcmp(comp, backends[i]->name)) {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "[%s] ", backends[i]->name);
+ at += sysfs_emit_at(buf, at, "[%s] ",
+ backends[i]->name);
} else {
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
- "%s ", backends[i]->name);
+ at += sysfs_emit_at(buf, at, "%s ", backends[i]->name);
}
}
- sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
- return sz;
+ at += sysfs_emit_at(buf, at, "\n");
+ return at;
}
struct zcomp_strm *zcomp_stream_get(struct zcomp *comp)
diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h
index 4acffe671a5e..eacfd3f7d61d 100644
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -79,7 +79,7 @@ struct zcomp {
int zcomp_cpu_up_prepare(unsigned int cpu, struct hlist_node *node);
int zcomp_cpu_dead(unsigned int cpu, struct hlist_node *node);
-ssize_t zcomp_available_show(const char *comp, char *buf);
+ssize_t zcomp_available_show(const char *comp, char *buf, ssize_t at);
bool zcomp_available_algorithm(const char *comp);
struct zcomp *zcomp_create(const char *alg, struct zcomp_params *params);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 54c57103715f..8acad3cc6e6e 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -373,7 +373,7 @@ static ssize_t initstate_show(struct device *dev,
val = init_done(zram);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t disksize_show(struct device *dev,
@@ -381,7 +381,7 @@ static ssize_t disksize_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
+ return sysfs_emit(buf, "%llu\n", zram->disksize);
}
static ssize_t mem_limit_store(struct device *dev,
@@ -532,7 +532,7 @@ static ssize_t writeback_limit_enable_show(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%d\n", val);
+ return sysfs_emit(buf, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
@@ -567,7 +567,7 @@ static ssize_t writeback_limit_show(struct device *dev,
spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
- return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
+ return sysfs_emit(buf, "%llu\n", val);
}
static void reset_bdev(struct zram *zram)
@@ -1225,12 +1225,13 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, char *buf)
+static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio,
+ char *buf, ssize_t at)
{
ssize_t sz;
down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf);
+ sz = zcomp_available_show(zram->comp_algs[prio], buf, at);
up_read(&zram->init_lock);
return sz;
@@ -1387,7 +1388,7 @@ static ssize_t comp_algorithm_show(struct device *dev,
{
struct zram *zram = dev_to_zram(dev);
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf);
+ return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf, 0);
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1415,8 +1416,8 @@ static ssize_t recomp_algorithm_show(struct device *dev,
if (!zram->comp_algs[prio])
continue;
- sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf + sz);
+ sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
+ sz += __comp_algorithm_show(zram, prio, buf, sz);
}
return sz;
@@ -1488,7 +1489,7 @@ static ssize_t io_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu 0 %8llu\n",
(u64)atomic64_read(&zram->stats.failed_reads),
(u64)atomic64_read(&zram->stats.failed_writes),
@@ -1518,7 +1519,7 @@ static ssize_t mm_stat_show(struct device *dev,
orig_size = atomic64_read(&zram->stats.pages_stored);
max_used = atomic_long_read(&zram->stats.max_used_pages);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu\n",
orig_size << PAGE_SHIFT,
(u64)atomic64_read(&zram->stats.compr_data_size),
@@ -1543,8 +1544,8 @@ static ssize_t bd_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
- "%8llu %8llu %8llu\n",
+ ret = sysfs_emit(buf,
+ "%8llu %8llu %8llu\n",
FOUR_K((u64)atomic64_read(&zram->stats.bd_count)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_reads)),
FOUR_K((u64)atomic64_read(&zram->stats.bd_writes)));
@@ -1562,7 +1563,7 @@ static ssize_t debug_stat_show(struct device *dev,
ssize_t ret;
down_read(&zram->init_lock);
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"version: %d\n0 %8llu\n",
version,
(u64)atomic64_read(&zram->stats.miss_free));
@@ -2810,7 +2811,7 @@ static ssize_t hot_add_show(const struct class *class,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/* This attribute must be set to 0400, so CLASS_ATTR_RO() can not be used */
static struct class_attribute class_attr_hot_add =
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 0d6ad50da046..8df310983bf6 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -670,7 +670,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
hdev->flush = bfusb_flush;
hdev->send = bfusb_send_frame;
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 1fa58c059cbf..8b43dfc755de 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -398,7 +398,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hdev->send = bpa10x_send_frame;
hdev->set_diag = bpa10x_set_diag;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 0a60660fc8ce..3a3a56ddbb06 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -135,7 +135,7 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
if (btbcm_set_bdaddr_from_efi(hdev) != 0) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -467,7 +467,7 @@ static int btbcm_print_controller_features(struct hci_dev *hdev)
/* Read DMI and disable broken Read LE Min/Max Tx Power */
if (dmi_first_match(disable_broken_read_transmit_power))
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
return 0;
}
@@ -706,7 +706,7 @@ int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_m
btbcm_check_bdaddr(hdev);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
@@ -769,7 +769,7 @@ int btbcm_setup_apple(struct hci_dev *hdev)
kfree_skb(skb);
}
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
return 0;
}
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 55cc1652bfe4..06016ac3965c 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -88,7 +88,7 @@ int btintel_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) {
bt_dev_err(hdev, "Found Intel default device address (%pMR)",
&bda->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
kfree_skb(skb);
@@ -2027,7 +2027,7 @@ static int btintel_download_fw(struct hci_dev *hdev,
*/
if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
download:
@@ -2295,7 +2295,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev,
*/
if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
}
@@ -2670,7 +2670,7 @@ static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
* Distinguish ISO data packets form ACL data packets
* based on their connection handle value range.
*/
- if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
+ if (iso_capable(hdev) && hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) {
__u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE)
@@ -3435,9 +3435,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -3475,8 +3475,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
if (!btintel_test_flag(hdev,
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
@@ -3491,11 +3491,11 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3571,10 +3571,10 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All Legacy bootloader devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* These variants don't seem to support LE Coded PHY */
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -3600,7 +3600,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index e1c688dd2d45..f4e3fb54fe76 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -2081,9 +2081,9 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
}
/* Apply the common HCI quirks for Intel device */
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_DIAG);
/* Set up the quality report callback for Intel devices */
hdev->set_quality_report = btintel_set_quality_report;
@@ -2123,7 +2123,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
*
* All TLV based devices support WBS
*/
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev,
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index c16a3518b8ff..4fc673640bfc 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -1141,7 +1141,7 @@ static int btmtksdio_setup(struct hci_dev *hdev)
}
/* Enable WBS with mSBC codec */
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* Enable GPIO reset mechanism */
if (bdev->reset) {
@@ -1384,7 +1384,7 @@ static int btmtksdio_probe(struct sdio_func *func,
SET_HCIDEV_DEV(hdev, &func->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
sdio_set_drvdata(func, bdev);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index c97e260fcb0c..51400a891f6e 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -872,7 +872,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
SET_HCIDEV_DEV(hdev, &serdev->dev);
hdev->manufacturer = 70;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
if (btmtkuart_is_standalone(bdev)) {
err = clk_prepare_enable(bdev->osc);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 1088db6056a4..24f9b52605a1 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1807,7 +1807,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
"local-bd-address",
(u8 *)&ba, sizeof(ba));
if (bacmp(&ba, BDADDR_ANY))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (hci_register_dev(hdev) < 0) {
dev_err(&serdev->dev, "Can't register HCI device\n");
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index edefb9dc76aa..7c958d6065be 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -739,7 +739,7 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bacmp(&bda->bdaddr, &config->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index c0eb71d6ffd3..d2e13fcb6bab 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -117,7 +117,7 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
/* Devices do not have persistent storage for BD address. Retrieve
* it from the firmware node property.
*/
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
return 0;
}
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 7838c89e529e..4d182cf6e037 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -1287,7 +1287,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Enable central-peripheral role (able to create new connections with
* an existing connection in slave role).
@@ -1301,7 +1301,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8851B:
case CHIP_ID_8922A:
case CHIP_ID_8852BT:
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* RTL8852C needs to transmit mSBC data continuously without
* the zero length of USB packets for the ALT 6 supported chips
@@ -1312,7 +1312,8 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
if (btrtl_dev->project_id == CHIP_ID_8852A ||
btrtl_dev->project_id == CHIP_ID_8852B ||
btrtl_dev->project_id == CHIP_ID_8852C)
- set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER);
hci_set_aosp_capable(hdev);
break;
@@ -1331,8 +1332,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
* but it doesn't support any features from page 2 -
* it either responds with garbage or with error status
*/
- set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
- &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2);
break;
default:
break;
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index a69feb08486a..8325655ce6aa 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -327,7 +327,7 @@ static int btsdio_probe(struct sdio_func *func,
hdev->send = btsdio_send_frame;
if (func->vendor == 0x0104 && func->device == 0x00c5)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 9ab661d2d1e6..f9eeec0aed57 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2472,18 +2472,18 @@ static int btusb_setup_csr(struct hci_dev *hdev)
* Probably will need to be expanded in the future;
* without these the controller will lock up.
*/
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks);
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_VOICE_SETTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_VOICE_SETTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE);
/* Clear the reset quirk since this is not an actual
* early Bluetooth 1.1 device from CSR.
*/
- clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
- clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_clear_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
+ hci_clear_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/*
* Special workaround for these BT 4.0 chip clones, and potentially more:
@@ -3192,6 +3192,32 @@ static const struct qca_device_info qca_devices_table[] = {
{ 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */
};
+static u16 qca_extract_board_id(const struct qca_version *ver)
+{
+ u16 flag = le16_to_cpu(ver->flag);
+ u16 board_id = 0;
+
+ if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
+ /* The board_id should be split into two bytes
+ * The 1st byte is chip ID, and the 2nd byte is platform ID
+ * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
+ * we have several platforms, and platform IDs are continuously added
+ * Platform ID:
+ * 0x00 is for Mobile
+ * 0x01 is for X86
+ * 0x02 is for Automotive
+ * 0x03 is for Consumer electronic
+ */
+ board_id = (ver->chip_id << 8) + ver->platform_id;
+ }
+
+ /* Take 0xffff as invalid board ID */
+ if (board_id == 0xffff)
+ board_id = 0;
+
+ return board_id;
+}
+
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
void *data, u16 size)
{
@@ -3348,44 +3374,28 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size,
const struct qca_version *ver)
{
u32 rom_version = le32_to_cpu(ver->rom_version);
- u16 flag = le16_to_cpu(ver->flag);
+ const char *variant;
+ int len;
+ u16 board_id;
- if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) {
- /* The board_id should be split into two bytes
- * The 1st byte is chip ID, and the 2nd byte is platform ID
- * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID
- * we have several platforms, and platform IDs are continuously added
- * Platform ID:
- * 0x00 is for Mobile
- * 0x01 is for X86
- * 0x02 is for Automotive
- * 0x03 is for Consumer electronic
- */
- u16 board_id = (ver->chip_id << 8) + ver->platform_id;
- const char *variant;
+ board_id = qca_extract_board_id(ver);
- switch (le32_to_cpu(ver->ram_version)) {
- case WCN6855_2_0_RAM_VERSION_GF:
- case WCN6855_2_1_RAM_VERSION_GF:
- variant = "_gf";
- break;
- default:
- variant = "";
- break;
- }
-
- if (board_id == 0) {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin",
- rom_version, variant);
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin",
- rom_version, variant, board_id);
- }
- } else {
- snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin",
- rom_version);
+ switch (le32_to_cpu(ver->ram_version)) {
+ case WCN6855_2_0_RAM_VERSION_GF:
+ case WCN6855_2_1_RAM_VERSION_GF:
+ variant = "_gf";
+ break;
+ default:
+ variant = NULL;
+ break;
}
+ len = snprintf(fwname, max_size, "qca/nvm_usb_%08x", rom_version);
+ if (variant)
+ len += snprintf(fwname + len, max_size - len, "%s", variant);
+ if (board_id)
+ len += snprintf(fwname + len, max_size - len, "_%04x", board_id);
+ len += snprintf(fwname + len, max_size - len, ".bin");
}
static int btusb_setup_qca_load_nvm(struct hci_dev *hdev,
@@ -3494,7 +3504,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
/* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to
* work with the likes of HSP/HFP mSBC.
*/
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
return 0;
}
@@ -4008,10 +4018,10 @@ static int btusb_probe(struct usb_interface *intf,
}
#endif
if (id->driver_info & BTUSB_CW6622)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM2045)
- set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_STORED_LINK_KEY);
if (id->driver_info & BTUSB_BCM92035)
hdev->setup = btusb_setup_bcm92035;
@@ -4068,8 +4078,8 @@ static int btusb_probe(struct usb_interface *intf,
hdev->reset = btmtk_reset_sync;
hdev->set_bdaddr = btmtk_set_bdaddr;
hdev->send = btusb_send_frame_mtk;
- set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
data->recv_acl = btmtk_usb_recv_acl;
data->suspend = btmtk_usb_suspend;
data->resume = btmtk_usb_resume;
@@ -4077,20 +4087,20 @@ static int btusb_probe(struct usb_interface *intf,
}
if (id->driver_info & BTUSB_SWAVE) {
- set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_INQUIRY_MODE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LOCAL_COMMANDS);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
hdev->manufacturer = 2;
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_ATH3012) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
}
if (id->driver_info & BTUSB_QCA_ROME) {
@@ -4098,7 +4108,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
hdev->reset = btusb_qca_reset;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
btusb_check_needs_reset_resume(intf);
}
@@ -4112,7 +4122,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
hdev->reset = btusb_qca_reset;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
hci_set_msft_opcode(hdev, 0xFD70);
}
@@ -4140,35 +4150,35 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_ACTIONS_SEMI) {
/* Support is advertised, but not implemented */
- set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &hdev->quirks);
- set_bit(HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_ERR_DATA_REPORTING);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_CREATE_CONN);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT);
}
if (!reset)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
- set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_BUFFER_SIZE);
}
if (id->driver_info & BTUSB_BROKEN_ISOC)
data->isoc = NULL;
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
if (id->driver_info & BTUSB_INVALID_LE_STATES)
- set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
}
if (id->driver_info & BTUSB_CSR) {
@@ -4177,10 +4187,10 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (bcdDevice < 0x117)
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* This must be set first in case we disable it for fakes */
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
/* Fake CSR devices with broken commands */
if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 &&
@@ -4193,7 +4203,7 @@ static int btusb_probe(struct usb_interface *intf,
/* New sniffer firmware has crippled HCI interface */
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
}
if (id->driver_info & BTUSB_INTEL_BOOT) {
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index 1394c575aa6d..707e90f80130 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -424,7 +424,7 @@ static int aml_check_bdaddr(struct hci_dev *hdev)
if (!bacmp(&paddr->bdaddr, AML_BDADDR_DEFAULT)) {
bt_dev_info(hdev, "amlbt using default bdaddr (%pM)", &paddr->bdaddr);
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
exit:
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 9684eb16059b..f96617b85d87 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -643,8 +643,8 @@ static int bcm_setup(struct hci_uart *hu)
* Allow the bootloader to set a valid address through the
* device tree.
*/
- if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks);
+ if (hci_test_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR))
+ hci_set_quirk(hu->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
if (!bcm_request_irq(bcm))
err = bcm_setup_sleep(hu);
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 9bce53e49cfa..8a9aa33776b0 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -1435,7 +1435,7 @@ static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377)
bda = (struct hci_rp_read_bd_addr *)skb->data;
if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr))
- set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &bcm4377->hdev->quirks);
+ hci_set_quirk(bcm4377->hdev, HCI_QUIRK_USE_BDADDR_PROPERTY);
kfree_skb(skb);
return 0;
@@ -2389,13 +2389,13 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hdev->setup = bcm4377_hci_setup;
if (bcm4377->hw->broken_mws_transport_config)
- set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG);
if (bcm4377->hw->broken_ext_scan)
- set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_EXT_SCAN);
if (bcm4377->hw->broken_le_coded)
- set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_CODED);
if (bcm4377->hw->broken_le_ext_adv_report_phy)
- set_bit(HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY);
pci_set_drvdata(pdev, bcm4377);
hci_set_drvdata(hdev, bcm4377);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 811f33701f84..d22fbb7f9fc5 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -660,7 +660,7 @@ static int intel_setup(struct hci_uart *hu)
*/
if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) {
bt_dev_info(hdev, "No device address configured");
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* With this Intel bootloader only the hardware variant and device
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index acba83156de9..d0adae3267b4 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -667,13 +667,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
SET_HCIDEV_DEV(hdev, hu->tty->dev);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RESET_ON_CLOSE);
/* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it.
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index e19e9bd49555..7044c86325ce 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -649,11 +649,11 @@ static int ll_setup(struct hci_uart *hu)
/* This means that there was an error getting the BD address
* during probe, so mark the device as having a bad address.
*/
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
} else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) {
err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr);
if (err)
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
}
/* Operational speed if any */
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 9fc10a16fd96..cd7575c20f65 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -439,7 +439,7 @@ static int nokia_setup(struct hci_uart *hu)
if (btdev->man_id == NOKIA_ID_BCM2048) {
hu->hdev->set_bdaddr = btbcm_set_bdaddr;
- set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks);
+ hci_set_quirk(hu->hdev, HCI_QUIRK_INVALID_BDADDR);
dev_dbg(dev, "bcm2048 has invalid bluetooth address!");
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 3ec0be496820..33c43503714b 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1892,7 +1892,7 @@ static int qca_setup(struct hci_uart *hu)
/* Enable controller to do both LE scan and BR/EDR inquiry
* simultaneously.
*/
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
switch (soc_type) {
case QCA_QCA2066:
@@ -1944,7 +1944,7 @@ retry:
case QCA_WCN7850:
qcadev = serdev_device_get_drvdata(hu->serdev);
if (qcadev->bdaddr_property_broken)
- set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BDADDR_PROPERTY_BROKEN);
hci_set_aosp_capable(hdev);
@@ -2487,7 +2487,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
hdev = qcadev->serdev_hu.hdev;
if (power_ctrl_enabled) {
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
hdev->shutdown = qca_power_off;
}
@@ -2496,11 +2496,11 @@ static int qca_serdev_probe(struct serdev_device *serdev)
* be queried via hci. Same with the valid le states quirk.
*/
if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
- &hdev->quirks);
+ hci_set_quirk(hdev,
+ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
if (!(data->capabilities & QCA_CAP_VALID_LE_STATES))
- set_bit(HCI_QUIRK_BROKEN_LE_STATES, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_BROKEN_LE_STATES);
}
return 0;
@@ -2550,7 +2550,7 @@ static void qca_serdev_shutdown(struct device *dev)
* invoked and the SOC is already in the initial state, so
* don't also need to send the VSC.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP) ||
hci_dev_test_flag(hdev, HCI_SETUP))
return;
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 89a22e9b3253..593d9cefbbf9 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -152,7 +152,7 @@ static int hci_uart_close(struct hci_dev *hdev)
* BT SOC is completely powered OFF during BT OFF, holding port
* open may drain the battery.
*/
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ if (hci_test_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP)) {
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
@@ -358,13 +358,13 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
SET_HCIDEV_DEV(hdev, &hu->serdev->dev);
if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags))
- set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NO_SUSPEND_NOTIFIER);
if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 59f4d7bdffdc..f7d8c3c00655 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -415,16 +415,16 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->get_codec_config_data = vhci_get_codec_config_data;
hdev->wakeup = vhci_wakeup;
hdev->setup = vhci_setup;
- set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
- set_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP);
+ hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED);
/* bit 6 is for external configuration */
if (opcode & 0x40)
- set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG);
/* bit 7 is for raw device */
if (opcode & 0x80)
- set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE);
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 756f292df9e8..6f1a37e85c6a 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -327,17 +327,17 @@ static int virtbt_probe(struct virtio_device *vdev)
hdev->setup = virtbt_setup_intel;
hdev->shutdown = virtbt_shutdown_generic;
hdev->set_bdaddr = virtbt_set_bdaddr_intel;
- set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_STRICT_DUPLICATE_FILTER);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
case VIRTIO_BT_CONFIG_VENDOR_REALTEK:
hdev->manufacturer = 93;
hdev->setup = virtbt_setup_realtek;
hdev->shutdown = virtbt_shutdown_generic;
- set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
- set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_quirk(hdev, HCI_QUIRK_SIMULTANEOUS_DISCOVERY);
+ hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
break;
}
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 7671bd158545..c1c0a4759c7e 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -943,6 +943,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
struct fsl_mc_obj_desc endpoint_desc = {{ 0 }};
struct dprc_endpoint endpoint1 = {{ 0 }};
struct dprc_endpoint endpoint2 = {{ 0 }};
+ struct fsl_mc_bus *mc_bus;
int state, err;
mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
@@ -966,6 +967,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
strcpy(endpoint_desc.type, endpoint2.type);
endpoint_desc.id = endpoint2.id;
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
+ if (endpoint)
+ return endpoint;
/*
* We know that the device has an endpoint because we verified by
@@ -973,17 +976,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev,
* yet discovered by the fsl-mc bus, thus the lookup returned NULL.
* Force a rescan of the devices in this container and retry the lookup.
*/
- if (!endpoint) {
- struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
-
- if (mutex_trylock(&mc_bus->scan_mutex)) {
- err = dprc_scan_objects(mc_bus_dev, true);
- mutex_unlock(&mc_bus->scan_mutex);
- }
-
- if (err < 0)
- return ERR_PTR(err);
+ mc_bus = to_fsl_mc_bus(mc_bus_dev);
+ if (mutex_trylock(&mc_bus->scan_mutex)) {
+ err = dprc_scan_objects(mc_bus_dev, true);
+ mutex_unlock(&mc_bus->scan_mutex);
}
+ if (err < 0)
+ return ERR_PTR(err);
endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev);
/*
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 21a10552da61..31ba1f8c1f78 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -624,9 +624,6 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
if (check_media_type == 1)
cdi->options |= (int) CDO_CHECK_TYPE;
- if (CDROM_CAN(CDC_MRW_W))
- cdi->exit = cdrom_mrw_exit;
-
if (cdi->ops->read_cdda_bpc)
cdi->cdda_method = CDDA_BPC_FULL;
else
@@ -651,9 +648,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
list_del(&cdi->list);
mutex_unlock(&cdrom_mutex);
- if (cdi->exit)
- cdi->exit(cdi);
-
cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
}
EXPORT_SYMBOL(unregister_cdrom);
@@ -1264,6 +1258,8 @@ void cdrom_release(struct cdrom_device_info *cdi)
cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n",
cdi->name);
cdrom_dvd_rw_close_write(cdi);
+ if (CDROM_CAN(CDC_MRW_W))
+ cdrom_mrw_exit(cdi);
if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cd_dbg(CD_CLOSE, "Unlocking door!\n");
diff --git a/drivers/char/tpm/eventlog/common.c b/drivers/char/tpm/eventlog/common.c
index 4c0bbba64ee5..691813d2a5a2 100644
--- a/drivers/char/tpm/eventlog/common.c
+++ b/drivers/char/tpm/eventlog/common.c
@@ -32,7 +32,7 @@ static int tpm_bios_measurements_open(struct inode *inode,
struct tpm_chip *chip;
inode_lock(inode);
- if (!inode->i_private) {
+ if (!inode->i_nlink) {
inode_unlock(inode);
return -ENODEV;
}
@@ -105,7 +105,7 @@ static int tpm_read_log(struct tpm_chip *chip)
void tpm_bios_log_setup(struct tpm_chip *chip)
{
const char *name = dev_name(&chip->dev);
- unsigned int cnt;
+ struct dentry *dentry;
int log_version;
int rc = 0;
@@ -117,14 +117,12 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
return;
log_version = rc;
- cnt = 0;
- chip->bios_dir[cnt] = securityfs_create_dir(name, NULL);
+ chip->bios_dir = securityfs_create_dir(name, NULL);
/* NOTE: securityfs_create_dir can return ENODEV if securityfs is
* compiled out. The caller should ignore the ENODEV return code.
*/
- if (IS_ERR(chip->bios_dir[cnt]))
- goto err;
- cnt++;
+ if (IS_ERR(chip->bios_dir))
+ return;
chip->bin_log_seqops.chip = chip;
if (log_version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
@@ -135,14 +133,13 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
&tpm1_binary_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("binary_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->bin_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
@@ -150,42 +147,23 @@ void tpm_bios_log_setup(struct tpm_chip *chip)
chip->ascii_log_seqops.seqops =
&tpm1_ascii_b_measurements_seqops;
- chip->bios_dir[cnt] =
+ dentry =
securityfs_create_file("ascii_bios_measurements",
- 0440, chip->bios_dir[0],
+ 0440, chip->bios_dir,
(void *)&chip->ascii_log_seqops,
&tpm_bios_measurements_ops);
- if (IS_ERR(chip->bios_dir[cnt]))
+ if (IS_ERR(dentry))
goto err;
- cnt++;
}
return;
err:
- chip->bios_dir[cnt] = NULL;
tpm_bios_log_teardown(chip);
return;
}
void tpm_bios_log_teardown(struct tpm_chip *chip)
{
- int i;
- struct inode *inode;
-
- /* securityfs_remove currently doesn't take care of handling sync
- * between removal and opening of pseudo files. To handle this, a
- * workaround is added by making i_private = NULL here during removal
- * and to check it during open(), both within inode_lock()/unlock().
- * This design ensures that open() either safely gets kref or fails.
- */
- for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) {
- if (chip->bios_dir[i]) {
- inode = d_inode(chip->bios_dir[i]);
- inode_lock(inode);
- inode->i_private = NULL;
- inode_unlock(inode);
- securityfs_remove(chip->bios_dir[i]);
- }
- }
+ securityfs_remove(chip->bios_dir);
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index 930fe43d5daf..92cec9722ee4 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -24,16 +24,10 @@
static int tpm_read_log_memory_region(struct tpm_chip *chip)
{
- struct device_node *node;
struct resource res;
int rc;
- node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
- if (!node)
- return -ENODEV;
-
- rc = of_address_to_resource(node, 0, &res);
- of_node_put(node);
+ rc = of_reserved_mem_region_to_resource(chip->dev.parent->of_node, 0, &res);
if (rc)
return rc;
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index c0771980bc2f..2ed7815e4899 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -300,7 +300,7 @@ static irqreturn_t tpm_ioserirq_handler(int irq, void *dev_id)
* send TPM commands through the I2C bus.
*/
static int st33zp24_send(struct tpm_chip *chip, unsigned char *buf,
- size_t len)
+ size_t bufsiz, size_t len)
{
struct st33zp24_dev *tpm_dev = dev_get_drvdata(&chip->dev);
u32 status, i, size, ordinal;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 8d7e4da6ed53..b71725827743 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -82,6 +82,13 @@ static bool tpm_chip_req_canceled(struct tpm_chip *chip, u8 status)
return chip->ops->req_canceled(chip, status);
}
+static bool tpm_transmit_completed(u8 status, struct tpm_chip *chip)
+{
+ u8 status_masked = status & chip->ops->req_complete_mask;
+
+ return status_masked == chip->ops->req_complete_val;
+}
+
static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
{
struct tpm_header *header = buf;
@@ -106,7 +113,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return -E2BIG;
}
- rc = chip->ops->send(chip, buf, count);
+ rc = chip->ops->send(chip, buf, bufsiz, count);
if (rc < 0) {
if (rc != -EPIPE)
dev_err(&chip->dev,
@@ -114,8 +121,19 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
return rc;
}
- /* A sanity check. send() should just return zero on success e.g.
- * not the command length.
+ /*
+ * Synchronous devices return the response directly during the send()
+ * call in the same buffer.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_SYNC) {
+ len = rc;
+ rc = 0;
+ goto out_sync;
+ }
+
+ /*
+ * A sanity check. send() of asynchronous devices should just return
+ * zero on success e.g. not the command length.
*/
if (rc > 0) {
dev_warn(&chip->dev,
@@ -129,8 +147,7 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
stop = jiffies + tpm_calc_ordinal_duration(chip, ordinal);
do {
u8 status = tpm_chip_status(chip);
- if ((status & chip->ops->req_complete_mask) ==
- chip->ops->req_complete_val)
+ if (tpm_transmit_completed(status, chip))
goto out_recv;
if (tpm_chip_req_canceled(chip, status)) {
@@ -142,6 +159,13 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip, void *buf, size_t bufsiz)
rmb();
} while (time_before(jiffies, stop));
+ /*
+ * Check for completion one more time, just in case the device reported
+ * it while the driver was sleeping in the busy loop above.
+ */
+ if (tpm_transmit_completed(tpm_chip_status(chip), chip))
+ goto out_recv;
+
tpm_chip_cancel(chip);
dev_err(&chip->dev, "Operation Timed out\n");
return -ETIME;
@@ -151,7 +175,10 @@ out_recv:
if (len < 0) {
rc = len;
dev_err(&chip->dev, "tpm_transmit: tpm_recv: error %d\n", rc);
- } else if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
+ return rc;
+ }
+out_sync:
+ if (len < TPM_HEADER_SIZE || len != be32_to_cpu(header->length))
rc = -EFAULT;
return rc ? rc : len;
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index 7b5049b3d476..bdb119453dfb 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -390,7 +390,7 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
* on every operation, so we weld the hmac init and final functions in
* here to give it the same usage characteristics as a regular hash
*/
-static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
+static void tpm2_hmac_init(struct sha256_ctx *sctx, u8 *key, u32 key_len)
{
u8 pad[SHA256_BLOCK_SIZE];
int i;
@@ -406,7 +406,7 @@ static void tpm2_hmac_init(struct sha256_state *sctx, u8 *key, u32 key_len)
sha256_update(sctx, pad, sizeof(pad));
}
-static void tpm2_hmac_final(struct sha256_state *sctx, u8 *key, u32 key_len,
+static void tpm2_hmac_final(struct sha256_ctx *sctx, u8 *key, u32 key_len,
u8 *out)
{
u8 pad[SHA256_BLOCK_SIZE];
@@ -440,7 +440,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
__be32 c = cpu_to_be32(counter);
tpm2_hmac_init(&sctx, key, key_len);
@@ -467,7 +467,7 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
static void tpm2_KDFe(u8 z[EC_PT_SZ], const char *str, u8 *pt_u, u8 *pt_v,
u8 *out)
{
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
/*
* this should be an iterative counter, but because we know
* we're only taking 32 bytes for the point using a sha256
@@ -592,7 +592,7 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u8 *hmac = NULL;
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
if (!auth)
return;
@@ -750,7 +750,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
off_t offset_s, offset_p;
u8 rphash[SHA256_DIGEST_SIZE];
u32 attrs, cc;
- struct sha256_state sctx;
+ struct sha256_ctx sctx;
u16 tag = be16_to_cpu(head->tag);
int parm_len, len, i, handles;
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index 54a0360a3c95..f25faf468bba 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -148,7 +148,8 @@ static int tpm_atml_recv(struct tpm_chip *chip, u8 *buf, size_t count)
return size;
}
-static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_atml_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_atmel_priv *priv = dev_get_drvdata(&chip->dev);
int i;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 876edf2705ab..ed97344f2324 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -426,7 +426,7 @@ static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
}
#endif
-static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
{
struct crb_priv *priv = dev_get_drvdata(&chip->dev);
int rc = 0;
diff --git a/drivers/char/tpm/tpm_crb_ffa.c b/drivers/char/tpm/tpm_crb_ffa.c
index 4ead61f01299..755b77b32ea4 100644
--- a/drivers/char/tpm/tpm_crb_ffa.c
+++ b/drivers/char/tpm/tpm_crb_ffa.c
@@ -10,8 +10,16 @@
#define pr_fmt(fmt) "CRB_FFA: " fmt
#include <linux/arm_ffa.h>
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include "tpm_crb_ffa.h"
+static unsigned int busy_timeout_ms = 2000;
+
+module_param(busy_timeout_ms, uint, 0644);
+MODULE_PARM_DESC(busy_timeout_ms,
+ "Maximum time in ms to retry before giving up on busy");
+
/* TPM service function status codes */
#define CRB_FFA_OK 0x05000001
#define CRB_FFA_OK_RESULTS_RETURNED 0x05000002
@@ -115,6 +123,7 @@ struct tpm_crb_ffa {
};
static struct tpm_crb_ffa *tpm_crb_ffa;
+static struct ffa_driver tpm_crb_ffa_driver;
static int tpm_crb_ffa_to_linux_errno(int errno)
{
@@ -168,50 +177,51 @@ static int tpm_crb_ffa_to_linux_errno(int errno)
*/
int tpm_crb_ffa_init(void)
{
+ int ret = 0;
+
+ if (!IS_MODULE(CONFIG_TCG_ARM_CRB_FFA)) {
+ ret = ffa_register(&tpm_crb_ffa_driver);
+ if (ret) {
+ tpm_crb_ffa = ERR_PTR(-ENODEV);
+ return ret;
+ }
+ }
+
if (!tpm_crb_ffa)
- return -ENOENT;
+ ret = -ENOENT;
if (IS_ERR_VALUE(tpm_crb_ffa))
- return -ENODEV;
+ ret = -ENODEV;
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(tpm_crb_ffa_init);
-static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
- unsigned long a0,
- unsigned long a1,
- unsigned long a2)
+static int __tpm_crb_ffa_try_send_receive(unsigned long func_id,
+ unsigned long a0, unsigned long a1,
+ unsigned long a2)
{
const struct ffa_msg_ops *msg_ops;
int ret;
- if (!tpm_crb_ffa)
- return -ENOENT;
-
msg_ops = tpm_crb_ffa->ffa_dev->ops->msg_ops;
if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
- memset(&tpm_crb_ffa->direct_msg_data2, 0x00,
- sizeof(struct ffa_send_direct_data2));
-
- tpm_crb_ffa->direct_msg_data2.data[0] = func_id;
- tpm_crb_ffa->direct_msg_data2.data[1] = a0;
- tpm_crb_ffa->direct_msg_data2.data[2] = a1;
- tpm_crb_ffa->direct_msg_data2.data[3] = a2;
+ tpm_crb_ffa->direct_msg_data2 = (struct ffa_send_direct_data2){
+ .data = { func_id, a0, a1, a2 },
+ };
ret = msg_ops->sync_send_receive2(tpm_crb_ffa->ffa_dev,
&tpm_crb_ffa->direct_msg_data2);
if (!ret)
ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data2.data[0]);
} else {
- memset(&tpm_crb_ffa->direct_msg_data, 0x00,
- sizeof(struct ffa_send_direct_data));
-
- tpm_crb_ffa->direct_msg_data.data1 = func_id;
- tpm_crb_ffa->direct_msg_data.data2 = a0;
- tpm_crb_ffa->direct_msg_data.data3 = a1;
- tpm_crb_ffa->direct_msg_data.data4 = a2;
+ tpm_crb_ffa->direct_msg_data = (struct ffa_send_direct_data){
+ .data1 = func_id,
+ .data2 = a0,
+ .data3 = a1,
+ .data4 = a2,
+ };
ret = msg_ops->sync_send_receive(tpm_crb_ffa->ffa_dev,
&tpm_crb_ffa->direct_msg_data);
@@ -219,6 +229,33 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
ret = tpm_crb_ffa_to_linux_errno(tpm_crb_ffa->direct_msg_data.data1);
}
+ return ret;
+}
+
+static int __tpm_crb_ffa_send_receive(unsigned long func_id, unsigned long a0,
+ unsigned long a1, unsigned long a2)
+{
+ ktime_t start, stop;
+ int ret;
+
+ if (!tpm_crb_ffa)
+ return -ENOENT;
+
+ start = ktime_get();
+ stop = ktime_add(start, ms_to_ktime(busy_timeout_ms));
+
+ for (;;) {
+ ret = __tpm_crb_ffa_try_send_receive(func_id, a0, a1, a2);
+ if (ret != -EBUSY)
+ break;
+
+ usleep_range(50, 100);
+ if (ktime_after(ktime_get(), stop)) {
+ dev_warn(&tpm_crb_ffa->ffa_dev->dev,
+ "Busy retry timed out\n");
+ break;
+ }
+ }
return ret;
}
@@ -236,7 +273,7 @@ static int __tpm_crb_ffa_send_recieve(unsigned long func_id,
*
* Return: 0 on success, negative error code on failure.
*/
-int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
+static int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
{
int rc;
@@ -251,7 +288,7 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
guard(mutex)(&tpm_crb_ffa->msg_data_lock);
- rc = __tpm_crb_ffa_send_recieve(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
+ rc = __tpm_crb_ffa_send_receive(CRB_FFA_GET_INTERFACE_VERSION, 0x00, 0x00, 0x00);
if (!rc) {
if (ffa_partition_supports_direct_req2_recv(tpm_crb_ffa->ffa_dev)) {
*major = CRB_FFA_MAJOR_VERSION(tpm_crb_ffa->direct_msg_data2.data[1]);
@@ -264,7 +301,6 @@ int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor)
return rc;
}
-EXPORT_SYMBOL_GPL(tpm_crb_ffa_get_interface_version);
/**
* tpm_crb_ffa_start() - signals the TPM that a field has changed in the CRB
@@ -289,7 +325,7 @@ int tpm_crb_ffa_start(int request_type, int locality)
guard(mutex)(&tpm_crb_ffa->msg_data_lock);
- return __tpm_crb_ffa_send_recieve(CRB_FFA_START, request_type, locality, 0x00);
+ return __tpm_crb_ffa_send_receive(CRB_FFA_START, request_type, locality, 0x00);
}
EXPORT_SYMBOL_GPL(tpm_crb_ffa_start);
@@ -369,7 +405,9 @@ static struct ffa_driver tpm_crb_ffa_driver = {
.id_table = tpm_crb_ffa_device_id,
};
+#ifdef MODULE
module_ffa_driver(tpm_crb_ffa_driver);
+#endif
MODULE_AUTHOR("Arm");
MODULE_DESCRIPTION("TPM CRB FFA driver");
diff --git a/drivers/char/tpm/tpm_crb_ffa.h b/drivers/char/tpm/tpm_crb_ffa.h
index 645c41ede10e..d7e1344ea003 100644
--- a/drivers/char/tpm/tpm_crb_ffa.h
+++ b/drivers/char/tpm/tpm_crb_ffa.h
@@ -11,11 +11,9 @@
#if IS_REACHABLE(CONFIG_TCG_ARM_CRB_FFA)
int tpm_crb_ffa_init(void);
-int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor);
int tpm_crb_ffa_start(int request_type, int locality);
#else
static inline int tpm_crb_ffa_init(void) { return 0; }
-static inline int tpm_crb_ffa_get_interface_version(u16 *major, u16 *minor) { return 0; }
static inline int tpm_crb_ffa_start(int request_type, int locality) { return 0; }
#endif
diff --git a/drivers/char/tpm/tpm_ftpm_tee.c b/drivers/char/tpm/tpm_ftpm_tee.c
index 53ba28ccd5d3..4e63c30aeaf1 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.c
+++ b/drivers/char/tpm/tpm_ftpm_tee.c
@@ -31,45 +31,19 @@ static const uuid_t ftpm_ta_uuid =
0x82, 0xCB, 0x34, 0x3F, 0xB7, 0xF3, 0x78, 0x96);
/**
- * ftpm_tee_tpm_op_recv() - retrieve fTPM response.
- * @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h.
- * @buf: the buffer to store data.
- * @count: the number of bytes to read.
- *
- * Return:
- * In case of success the number of bytes received.
- * On failure, -errno.
- */
-static int ftpm_tee_tpm_op_recv(struct tpm_chip *chip, u8 *buf, size_t count)
-{
- struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
- size_t len;
-
- len = pvt_data->resp_len;
- if (count < len) {
- dev_err(&chip->dev,
- "%s: Invalid size in recv: count=%zd, resp_len=%zd\n",
- __func__, count, len);
- return -EIO;
- }
-
- memcpy(buf, pvt_data->resp_buf, len);
- pvt_data->resp_len = 0;
-
- return len;
-}
-
-/**
- * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory.
+ * ftpm_tee_tpm_op_send() - send TPM commands through the TEE shared memory
+ * and retrieve the response.
* @chip: the tpm_chip description as specified in driver/char/tpm/tpm.h
- * @buf: the buffer to send.
- * @len: the number of bytes to send.
+ * @buf: the buffer to send and to store the response.
+ * @bufsiz: the size of the buffer.
+ * @cmd_len: the number of bytes to send.
*
* Return:
- * In case of success, returns 0.
+ * In case of success, returns the number of bytes received.
* On failure, -errno
*/
-static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
{
struct ftpm_tee_private *pvt_data = dev_get_drvdata(chip->dev.parent);
size_t resp_len;
@@ -80,16 +54,15 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct tee_param command_params[4];
struct tee_shm *shm = pvt_data->shm;
- if (len > MAX_COMMAND_SIZE) {
+ if (cmd_len > MAX_COMMAND_SIZE) {
dev_err(&chip->dev,
"%s: len=%zd exceeds MAX_COMMAND_SIZE supported by fTPM TA\n",
- __func__, len);
+ __func__, cmd_len);
return -EIO;
}
memset(&transceive_args, 0, sizeof(transceive_args));
memset(command_params, 0, sizeof(command_params));
- pvt_data->resp_len = 0;
/* Invoke FTPM_OPTEE_TA_SUBMIT_COMMAND function of fTPM TA */
transceive_args = (struct tee_ioctl_invoke_arg) {
@@ -103,7 +76,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT,
.u.memref = {
.shm = shm,
- .size = len,
+ .size = cmd_len,
.shm_offs = 0,
},
};
@@ -115,7 +88,7 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
return PTR_ERR(temp_buf);
}
memset(temp_buf, 0, (MAX_COMMAND_SIZE + MAX_RESPONSE_SIZE));
- memcpy(temp_buf, buf, len);
+ memcpy(temp_buf, buf, cmd_len);
command_params[1] = (struct tee_param) {
.attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT,
@@ -156,17 +129,20 @@ static int ftpm_tee_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t len)
__func__, resp_len);
return -EIO;
}
+ if (resp_len > bufsiz) {
+ dev_err(&chip->dev,
+ "%s: resp_len=%zd exceeds bufsiz=%zd\n",
+ __func__, resp_len, bufsiz);
+ return -EIO;
+ }
- /* sanity checks look good, cache the response */
- memcpy(pvt_data->resp_buf, temp_buf, resp_len);
- pvt_data->resp_len = resp_len;
+ memcpy(buf, temp_buf, resp_len);
- return 0;
+ return resp_len;
}
static const struct tpm_class_ops ftpm_tee_tpm_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
- .recv = ftpm_tee_tpm_op_recv,
.send = ftpm_tee_tpm_op_send,
};
@@ -251,7 +227,7 @@ static int ftpm_tee_probe(struct device *dev)
}
pvt_data->chip = chip;
- pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2;
+ pvt_data->chip->flags |= TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_SYNC;
/* Create a character device for the fTPM */
rc = tpm_chip_register(pvt_data->chip);
diff --git a/drivers/char/tpm/tpm_ftpm_tee.h b/drivers/char/tpm/tpm_ftpm_tee.h
index e39903b7ea07..8d5c3f0d2879 100644
--- a/drivers/char/tpm/tpm_ftpm_tee.h
+++ b/drivers/char/tpm/tpm_ftpm_tee.h
@@ -22,16 +22,12 @@
* struct ftpm_tee_private - fTPM's private data
* @chip: struct tpm_chip instance registered with tpm framework.
* @session: fTPM TA session identifier.
- * @resp_len: cached response buffer length.
- * @resp_buf: cached response buffer.
* @ctx: TEE context handler.
* @shm: Memory pool shared with fTPM TA in TEE.
*/
struct ftpm_tee_private {
struct tpm_chip *chip;
u32 session;
- size_t resp_len;
- u8 resp_buf[MAX_RESPONSE_SIZE];
struct tee_context *ctx;
struct tee_shm *shm;
};
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index d1d27fdfe523..4f229656a8e2 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -37,7 +37,8 @@ struct priv_data {
u8 buffer[sizeof(struct tpm_header) + 25];
};
-static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct i2c_client *client = to_i2c_client(chip->dev.parent);
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 81d8a78dc655..bdf1f329a679 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -514,7 +514,8 @@ out:
return size;
}
-static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, status;
ssize_t burstcnt;
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 3c3ee5f551db..d44903b29929 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -350,7 +350,8 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
struct priv_data *priv = dev_get_drvdata(&chip->dev);
struct device *dev = chip->dev.parent;
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 76d048f63d55..4734a69406ce 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -191,13 +191,15 @@ static int tpm_ibmvtpm_resume(struct device *dev)
* tpm_ibmvtpm_send() - Send a TPM command
* @chip: tpm chip struct
* @buf: buffer contains data to send
- * @count: size of buffer
+ * @bufsiz: size of the buffer
+ * @count: length of the command
*
* Return:
* 0 on success,
* -errno on error
*/
-static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
bool retry = true;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 2d2ae37153ba..7638b65b851b 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -312,7 +312,8 @@ recv_begin:
return -EIO;
}
-static int tpm_inf_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_inf_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
int i;
int ret;
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 0f62bbc940da..879ac88f5783 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -178,7 +178,8 @@ static int tpm_nsc_recv(struct tpm_chip *chip, u8 * buf, size_t count)
return size;
}
-static int tpm_nsc_send(struct tpm_chip *chip, u8 * buf, size_t count)
+static int tpm_nsc_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_nsc_priv *priv = dev_get_drvdata(&chip->dev);
u8 data;
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index bc7b1b4501b3..d53fce1c9d6f 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -52,7 +52,7 @@ static ssize_t tpm_show_ppi_version(struct device *dev,
{
struct tpm_chip *chip = to_tpm_chip(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", chip->ppi_version);
+ return sysfs_emit(buf, "%s\n", chip->ppi_version);
}
static ssize_t tpm_show_ppi_request(struct device *dev,
@@ -87,12 +87,10 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
else {
req = obj->package.elements[1].integer.value;
if (tpm_ppi_req_has_parameter(req))
- size = scnprintf(buf, PAGE_SIZE,
- "%llu %llu\n", req,
- obj->package.elements[2].integer.value);
+ size = sysfs_emit(buf, "%llu %llu\n", req,
+ obj->package.elements[2].integer.value);
else
- size = scnprintf(buf, PAGE_SIZE,
- "%llu\n", req);
+ size = sysfs_emit(buf, "%llu\n", req);
}
} else if (obj->package.count == 2 &&
obj->package.elements[0].type == ACPI_TYPE_INTEGER &&
@@ -100,8 +98,8 @@ static ssize_t tpm_show_ppi_request(struct device *dev,
if (obj->package.elements[0].integer.value)
size = -EFAULT;
else
- size = scnprintf(buf, PAGE_SIZE, "%llu\n",
- obj->package.elements[1].integer.value);
+ size = sysfs_emit(buf, "%llu\n",
+ obj->package.elements[1].integer.value);
}
ACPI_FREE(obj);
@@ -211,10 +209,10 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
}
if (ret < ARRAY_SIZE(info) - 1)
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret, info[ret]);
+ status = sysfs_emit(buf, "%d: %s\n", ret, info[ret]);
else
- status = scnprintf(buf, PAGE_SIZE, "%d: %s\n", ret,
- info[ARRAY_SIZE(info)-1]);
+ status = sysfs_emit(buf, "%d: %s\n", ret,
+ info[ARRAY_SIZE(info) - 1]);
return status;
}
@@ -255,23 +253,23 @@ static ssize_t tpm_show_ppi_response(struct device *dev,
res = ret_obj[2].integer.value;
if (req) {
if (res == 0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0: Success");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0: Success");
else if (res == 0xFFFFFFF0)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF0: User Abort");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF0: User Abort");
else if (res == 0xFFFFFFF1)
- status = scnprintf(buf, PAGE_SIZE, "%llu %s\n", req,
- "0xFFFFFFF1: BIOS Failure");
+ status = sysfs_emit(buf, "%llu %s\n", req,
+ "0xFFFFFFF1: BIOS Failure");
else if (res >= 1 && res <= 0x00000FFF)
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Corresponding TPM error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Corresponding TPM error");
else
- status = scnprintf(buf, PAGE_SIZE, "%llu %llu: %s\n",
- req, res, "Error");
+ status = sysfs_emit(buf, "%llu %llu: %s\n",
+ req, res, "Error");
} else {
- status = scnprintf(buf, PAGE_SIZE, "%llu: %s\n",
- req, "No Recent Request");
+ status = sysfs_emit(buf, "%llu: %s\n",
+ req, "No Recent Request");
}
cleanup:
@@ -284,7 +282,7 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
{
int i;
u32 ret;
- char *str = buf;
+ int len = 0;
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
@@ -314,11 +312,11 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
}
if (ret > 0 && ret < ARRAY_SIZE(info))
- str += scnprintf(str, PAGE_SIZE, "%d %d: %s\n",
- i, ret, info[ret]);
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, info[ret]);
}
- return str - buf;
+ return len;
}
static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
diff --git a/drivers/char/tpm/tpm_svsm.c b/drivers/char/tpm/tpm_svsm.c
index 4280edf427d6..f5ba0f64850b 100644
--- a/drivers/char/tpm/tpm_svsm.c
+++ b/drivers/char/tpm/tpm_svsm.c
@@ -25,37 +25,32 @@ struct tpm_svsm_priv {
void *buffer;
};
-static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_svsm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t cmd_len)
{
struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
int ret;
- ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, len);
+ ret = svsm_vtpm_cmd_request_fill(priv->buffer, 0, buf, cmd_len);
if (ret)
return ret;
/*
* The SVSM call uses the same buffer for the command and for the
- * response, so after this call, the buffer will contain the response
- * that can be used by .recv() op.
+ * response, so after this call, the buffer will contain the response.
+ *
+ * Note: we have to use an internal buffer because the device in SVSM
+ * expects the svsm_vtpm header + data to be physically contiguous.
*/
- return snp_svsm_vtpm_send_command(priv->buffer);
-}
-
-static int tpm_svsm_recv(struct tpm_chip *chip, u8 *buf, size_t len)
-{
- struct tpm_svsm_priv *priv = dev_get_drvdata(&chip->dev);
+ ret = snp_svsm_vtpm_send_command(priv->buffer);
+ if (ret)
+ return ret;
- /*
- * The internal buffer contains the response after we send the command
- * to SVSM.
- */
- return svsm_vtpm_cmd_response_parse(priv->buffer, buf, len);
+ return svsm_vtpm_cmd_response_parse(priv->buffer, buf, bufsiz);
}
static struct tpm_class_ops tpm_chip_ops = {
.flags = TPM_OPS_AUTO_STARTUP,
- .recv = tpm_svsm_recv,
.send = tpm_svsm_send,
};
@@ -84,6 +79,7 @@ static int __init tpm_svsm_probe(struct platform_device *pdev)
dev_set_drvdata(&chip->dev, priv);
+ chip->flags |= TPM_CHIP_FLAG_SYNC;
err = tpm2_probe(chip);
if (err)
return err;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index ed0d3d8449b3..4b12c4b9da8b 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -580,7 +580,8 @@ out_err:
return rc;
}
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
int rc, irq;
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index 3b55a7b05c46..fc6891a0b693 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -546,13 +546,15 @@ out_err:
* tpm_cr50_i2c_tis_send() - TPM transmission callback.
* @chip: A TPM chip.
* @buf: Buffer to send.
- * @len: Buffer length.
+ * @bufsiz: Buffer size.
+ * @len: Command length.
*
* Return:
* - 0: Success.
* - -errno: A POSIX error code.
*/
-static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_cr50_i2c_tis_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t len)
{
size_t burstcnt, limit, sent = 0;
u8 tpm_go[4] = { TPM_STS_GO };
diff --git a/drivers/char/tpm/tpm_vtpm_proxy.c b/drivers/char/tpm/tpm_vtpm_proxy.c
index 8fe4a01eea12..0818bb517805 100644
--- a/drivers/char/tpm/tpm_vtpm_proxy.c
+++ b/drivers/char/tpm/tpm_vtpm_proxy.c
@@ -321,12 +321,14 @@ static int vtpm_proxy_is_driver_command(struct tpm_chip *chip,
*
* @chip: tpm chip to use
* @buf: send buffer
+ * @bufsiz: size of the buffer
* @count: bytes to send
*
* Return:
* 0 in case of success, negative error value otherwise.
*/
-static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_proxy_tpm_op_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct proxy_dev *proxy_dev = dev_get_drvdata(&chip->dev);
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 80cca3b83b22..556bf2256716 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -131,7 +131,8 @@ static size_t shr_data_offset(struct vtpm_shared_page *shr)
return struct_size(shr, extra_pages, shr->nr_extra_pages);
}
-static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz,
+ size_t count)
{
struct tpm_private *priv = dev_get_drvdata(&chip->dev);
struct vtpm_shared_page *shr = priv->shr;
diff --git a/drivers/clk/qcom/apcs-sdx55.c b/drivers/clk/qcom/apcs-sdx55.c
index 3ba01622d8f0..90dd1f1855c2 100644
--- a/drivers/clk/qcom/apcs-sdx55.c
+++ b/drivers/clk/qcom/apcs-sdx55.c
@@ -111,7 +111,7 @@ static int qcom_apcs_sdx55_clk_probe(struct platform_device *pdev)
* driver, there seems to be no better place to do this. So do it here!
*/
cpu_dev = get_cpu_device(0);
- ret = dev_pm_domain_attach(cpu_dev, true);
+ ret = dev_pm_domain_attach(cpu_dev, PD_FLAG_ATTACH_POWER_ON);
if (ret) {
dev_err_probe(dev, ret, "can't get PM domain: %d\n", ret);
goto err;
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
index 9efb9fd24b42..1a9a1cb869e2 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -385,7 +385,8 @@ static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_FEAT(mbus_clk, "mbus", mbus_parents,
0, 0, /* no P */
24, 3, /* mux */
BIT(31), /* gate */
- 0, CCU_FEATURE_UPDATE_BIT);
+ CLK_IS_CRITICAL,
+ CCU_FEATURE_UPDATE_BIT);
static const struct clk_hw *mbus_hws[] = { &mbus_clk.common.hw };
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index 579a81bb46df..52e4369664c5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -350,7 +350,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(de_clk, "de", de_parents,
0x104, 0, 4, 24, 2, BIT(31),
CLK_SET_RATE_PARENT);
-static const char * const tcon_parents[] = { "pll-video" };
+static const char * const tcon_parents[] = { "pll-video", "pll-periph0" };
static SUNXI_CCU_M_WITH_MUX_GATE(tcon_clk, "tcon", tcon_parents,
0x118, 0, 4, 24, 3, BIT(31), 0);
@@ -362,11 +362,11 @@ static const char * const csi_mclk_parents[] = { "osc24M", "pll-video",
static SUNXI_CCU_M_WITH_MUX_GATE(csi0_mclk_clk, "csi0-mclk", csi_mclk_parents,
0x130, 0, 5, 8, 3, BIT(15), 0);
-static const char * const csi1_sclk_parents[] = { "pll-video", "pll-isp" };
-static SUNXI_CCU_M_WITH_MUX_GATE(csi1_sclk_clk, "csi-sclk", csi1_sclk_parents,
+static const char * const csi_sclk_parents[] = { "pll-video", "pll-isp" };
+static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
0x134, 16, 4, 24, 3, BIT(31), 0);
-static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi-mclk", csi_mclk_parents,
+static SUNXI_CCU_M_WITH_MUX_GATE(csi1_mclk_clk, "csi1-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
@@ -452,7 +452,7 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
&tcon_clk.common,
&csi_misc_clk.common,
&csi0_mclk_clk.common,
- &csi1_sclk_clk.common,
+ &csi_sclk_clk.common,
&csi1_mclk_clk.common,
&ve_clk.common,
&ac_dig_clk.common,
@@ -551,7 +551,7 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
[CLK_TCON0] = &tcon_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
- [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
@@ -633,7 +633,7 @@ static struct clk_hw_onecell_data sun8i_v3_hw_clks = {
[CLK_TCON0] = &tcon_clk.common.hw,
[CLK_CSI_MISC] = &csi_misc_clk.common.hw,
[CLK_CSI0_MCLK] = &csi0_mclk_clk.common.hw,
- [CLK_CSI1_SCLK] = &csi1_sclk_clk.common.hw,
+ [CLK_CSI_SCLK] = &csi_sclk_clk.common.hw,
[CLK_CSI1_MCLK] = &csi1_mclk_clk.common.hw,
[CLK_VE] = &ve_clk.common.hw,
[CLK_AC_DIG] = &ac_dig_clk.common.hw,
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 09549451dd51..2edc13ca184e 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,6 +22,7 @@
#include <linux/irq.h>
#include <linux/acpi.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
#include <hyperv/hvhdk.h>
#include <asm/mshyperv.h>
diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
index 49e86cb70a7a..61f1e27fc41e 100644
--- a/drivers/clocksource/timer-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -43,7 +43,7 @@ static struct delay_timer orion_delay_timer = {
.read_current_timer = orion_read_timer,
};
-static void orion_delay_timer_init(unsigned long rate)
+static void __init orion_delay_timer_init(unsigned long rate)
{
orion_delay_timer.freq = rate;
register_current_timer_delay(&orion_delay_timer);
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 3383a7ce27ff..c83fd14dd7ad 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1556,21 +1556,27 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
}
for (i = 0; i < n_insns; ++i) {
+ unsigned int n = insns[i].n;
+
if (insns[i].insn & INSN_MASK_WRITE) {
if (copy_from_user(data, insns[i].data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_from_user failed\n");
ret = -EFAULT;
goto error;
}
+ if (n < MIN_SAMPLES) {
+ memset(&data[n], 0, (MIN_SAMPLES - n) *
+ sizeof(unsigned int));
+ }
}
ret = parse_insn(dev, insns + i, data, file);
if (ret < 0)
goto error;
if (insns[i].insn & INSN_MASK_READ) {
if (copy_to_user(insns[i].data, data,
- insns[i].n * sizeof(unsigned int))) {
+ n * sizeof(unsigned int))) {
dev_dbg(dev->class_dev,
"copy_to_user failed\n");
ret = -EFAULT;
@@ -1589,6 +1595,16 @@ error:
return i;
}
+#define MAX_INSNS MAX_SAMPLES
+static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns)
+{
+ if (n_insns > MAX_INSNS) {
+ dev_dbg(dev->class_dev, "insnlist length too large\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* COMEDI_INSN ioctl
* synchronous instruction
@@ -1633,6 +1649,10 @@ static int do_insn_ioctl(struct comedi_device *dev,
ret = -EFAULT;
goto error;
}
+ if (insn->n < MIN_SAMPLES) {
+ memset(&data[insn->n], 0,
+ (MIN_SAMPLES - insn->n) * sizeof(unsigned int));
+ }
}
ret = parse_insn(dev, insn, data, file);
if (ret < 0)
@@ -2239,6 +2259,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
rc = -EFAULT;
break;
}
+ rc = check_insnlist_len(dev, insnlist.n_insns);
+ if (rc)
+ break;
insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns) {
rc = -ENOMEM;
@@ -3142,6 +3165,9 @@ static int compat_insnlist(struct file *file, unsigned long arg)
if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32)))
return -EFAULT;
+ rc = check_insnlist_len(dev, insnlist32.n_insns);
+ if (rc)
+ return rc;
insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL);
if (!insns)
return -ENOMEM;
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 376130bfba8a..9e4b7c840a8f 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -339,10 +339,10 @@ int comedi_dio_insn_config(struct comedi_device *dev,
unsigned int *data,
unsigned int mask)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if (!mask)
- mask = chan_mask;
+ if (!mask && chan < 32)
+ mask = 1U << chan;
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
@@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
unsigned int *data)
{
- unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1)
+ unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1)
: 0xffffffff;
unsigned int mask = data[0] & chanmask;
unsigned int bits = data[1];
@@ -615,6 +615,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
unsigned int _data[2];
int ret;
+ if (insn->n == 0)
+ return 0;
+
memset(_data, 0, sizeof(_data));
memset(&_insn, 0, sizeof(_insn));
_insn.insn = INSN_BITS;
@@ -625,8 +628,8 @@ static int insn_rw_emulate_bits(struct comedi_device *dev,
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- _data[0] = 1 << (chan - base_chan); /* mask */
- _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
+ _data[0] = 1U << (chan - base_chan); /* mask */
+ _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */
}
ret = s->insn_bits(dev, s, &_insn, _data);
@@ -709,7 +712,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev)
if (s->type == COMEDI_SUBD_DO) {
if (s->n_chan < 32)
- s->io_bits = (1 << s->n_chan) - 1;
+ s->io_bits = (1U << s->n_chan) - 1;
else
s->io_bits = 0xffffffff;
}
diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
index b00fab0b89d4..739cc4db52ac 100644
--- a/drivers/comedi/drivers/aio_iiro_16.c
+++ b/drivers/comedi/drivers/aio_iiro_16.c
@@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev,
* Digital input change of state interrupts are optionally supported
* using IRQ 2-7, 10-12, 14, or 15.
*/
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], aio_iiro_16_cos, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 9747e6d1f6eb..7984950f0f99 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -792,7 +792,7 @@ static void waveform_detach(struct comedi_device *dev)
{
struct waveform_private *devpriv = dev->private;
- if (devpriv) {
+ if (devpriv && dev->n_subdevices) {
timer_delete_sync(&devpriv->ai_timer);
timer_delete_sync(&devpriv->ao_timer);
}
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index b8ea737ad3d1..1b638f5b5a4f 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev,
devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
/* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
- if ((1 << it->options[1]) & 0xdcfc) {
+ if (it->options[1] >= 2 && it->options[1] <= 15 &&
+ (1 << it->options[1]) & 0xdcfc) {
ret = request_irq(it->options[1], das16m1_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 68f95330de45..7660487e563c 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev,
das6402_reset(dev);
/* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */
- if ((1 << it->options[1]) & 0x8cec) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & 0x8cec) {
ret = request_irq(it->options[1], das6402_interrupt, 0,
dev->board_name, dev);
if (ret == 0) {
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index 0df639c6a595..abca61a72cf7 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (IS_ERR(dev->pacer))
return PTR_ERR(dev->pacer);
- if ((1 << it->options[1]) & board->irq_bits) {
+ if (it->options[1] > 0 && it->options[1] < 16 &&
+ (1 << it->options[1]) & board->irq_bits) {
ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0d46402e3094..9be0503df55a 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -28,7 +28,6 @@ config ARM_APPLE_SOC_CPUFREQ
tristate "Apple Silicon SoC CPUFreq support"
depends on ARCH_APPLE || (COMPILE_TEST && 64BIT)
select PM_OPP
- default ARCH_APPLE
help
This adds the CPUFreq driver for Apple Silicon machines
(e.g. Apple M1).
@@ -238,7 +237,7 @@ config ARM_TEGRA20_CPUFREQ
This adds the CPUFreq driver support for Tegra20/30 SOCs.
config ARM_TEGRA124_CPUFREQ
- bool "Tegra124 CPUFreq support"
+ tristate "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
default ARCH_TEGRA
diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
index 5a3545bd0d8d..d96c1718f7f8 100644
--- a/drivers/cpufreq/armada-8k-cpufreq.c
+++ b/drivers/cpufreq/armada-8k-cpufreq.c
@@ -103,7 +103,7 @@ static void armada_8k_cpufreq_free_table(struct freq_table *freq_tables)
{
int opps_index, nb_cpus = num_possible_cpus();
- for (opps_index = 0 ; opps_index <= nb_cpus; opps_index++) {
+ for (opps_index = 0 ; opps_index < nb_cpus; opps_index++) {
int i;
/* If cpu_dev is NULL then we reached the end of the array */
@@ -132,7 +132,7 @@ static int __init armada_8k_cpufreq_init(void)
int ret = 0, opps_index = 0, cpu, nb_cpus;
struct freq_table *freq_tables;
struct device_node *node;
- static struct cpumask cpus;
+ static struct cpumask cpus, shared_cpus;
node = of_find_matching_node_and_match(NULL, armada_8k_cpufreq_of_match,
NULL);
@@ -154,7 +154,6 @@ static int __init armada_8k_cpufreq_init(void)
* divisions of it).
*/
for_each_cpu(cpu, &cpus) {
- struct cpumask shared_cpus;
struct device *cpu_dev;
struct clk *clk;
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 7b841a086acc..5940d262374f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -765,7 +765,7 @@ static void brcm_avs_cpufreq_remove(struct platform_device *pdev)
}
static const struct of_device_id brcm_avs_cpufreq_match[] = {
- { .compatible = BRCM_AVS_CPU_DATA },
+ { .compatible = "brcm,avs-cpu-data-mem" },
{ }
};
MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index b7c688a5659c..4a17162a392d 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -26,14 +26,6 @@
#include <acpi/cppc_acpi.h>
-/*
- * This list contains information parsed from per CPU ACPI _CPC and _PSD
- * structures: e.g. the highest and lowest supported performance, capabilities,
- * desired performance, level requested etc. Depending on the share_type, not
- * all CPUs will have an entry in the list.
- */
-static LIST_HEAD(cpu_data_list);
-
static struct cpufreq_driver cppc_cpufreq_driver;
#ifdef CONFIG_ACPI_CPPC_CPUFREQ_FIE
@@ -352,7 +344,6 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
#if defined(CONFIG_ARM64) && defined(CONFIG_ENERGY_MODEL)
static DEFINE_PER_CPU(unsigned int, efficiency_class);
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy);
/* Create an artificial performance state every CPPC_EM_CAP_STEP capacity unit. */
#define CPPC_EM_CAP_STEP (20)
@@ -488,7 +479,19 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
return 0;
}
-static int populate_efficiency_class(void)
+static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
+{
+ struct cppc_cpudata *cpu_data;
+ struct em_data_callback em_cb =
+ EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
+
+ cpu_data = policy->driver_data;
+ em_dev_register_perf_domain(get_cpu_device(policy->cpu),
+ get_perf_level_count(policy), &em_cb,
+ cpu_data->shared_cpu_map, 0);
+}
+
+static void populate_efficiency_class(void)
{
struct acpi_madt_generic_interrupt *gicc;
DECLARE_BITMAP(used_classes, 256) = {};
@@ -503,7 +506,7 @@ static int populate_efficiency_class(void)
if (bitmap_weight(used_classes, 256) <= 1) {
pr_debug("Efficiency classes are all equal (=%d). "
"No EM registered", class);
- return -EINVAL;
+ return;
}
/*
@@ -520,26 +523,11 @@ static int populate_efficiency_class(void)
index++;
}
cppc_cpufreq_driver.register_em = cppc_cpufreq_register_em;
-
- return 0;
-}
-
-static void cppc_cpufreq_register_em(struct cpufreq_policy *policy)
-{
- struct cppc_cpudata *cpu_data;
- struct em_data_callback em_cb =
- EM_ADV_DATA_CB(cppc_get_cpu_power, cppc_get_cpu_cost);
-
- cpu_data = policy->driver_data;
- em_dev_register_perf_domain(get_cpu_device(policy->cpu),
- get_perf_level_count(policy), &em_cb,
- cpu_data->shared_cpu_map, 0);
}
#else
-static int populate_efficiency_class(void)
+static void populate_efficiency_class(void)
{
- return 0;
}
#endif
@@ -567,8 +555,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
goto free_mask;
}
- list_add(&cpu_data->node, &cpu_data_list);
-
return cpu_data;
free_mask:
@@ -583,7 +569,6 @@ static void cppc_cpufreq_put_cpu_data(struct cpufreq_policy *policy)
{
struct cppc_cpudata *cpu_data = policy->driver_data;
- list_del(&cpu_data->node);
free_cpumask_var(cpu_data->shared_cpu_map);
kfree(cpu_data);
policy->driver_data = NULL;
@@ -925,7 +910,7 @@ static struct freq_attr *cppc_cpufreq_attr[] = {
};
static struct cpufreq_driver cppc_cpufreq_driver = {
- .flags = CPUFREQ_CONST_LOOPS,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = cppc_verify_policy,
.target = cppc_cpufreq_set_target,
.get = cppc_cpufreq_get_rate,
@@ -954,24 +939,10 @@ static int __init cppc_cpufreq_init(void)
return ret;
}
-static inline void free_cpu_data(void)
-{
- struct cppc_cpudata *iter, *tmp;
-
- list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
- free_cpumask_var(iter->shared_cpu_map);
- list_del(&iter->node);
- kfree(iter);
- }
-
-}
-
static void __exit cppc_cpufreq_exit(void)
{
cpufreq_unregister_driver(&cppc_cpufreq_driver);
cppc_freq_invariance_exit();
-
- free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a010da0f6337..015dd393eaba 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -143,6 +143,7 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "nvidia,tegra20", },
{ .compatible = "nvidia,tegra30", },
+ { .compatible = "nvidia,tegra114", },
{ .compatible = "nvidia,tegra124", },
{ .compatible = "nvidia,tegra210", },
{ .compatible = "nvidia,tegra234", },
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index e80dd982a3e2..506437489b4d 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -329,6 +329,17 @@ static struct platform_driver dt_cpufreq_platdrv = {
};
module_platform_driver(dt_cpufreq_platdrv);
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev)
+{
+ struct platform_device_info cpufreq_dt_devinfo = {};
+
+ cpufreq_dt_devinfo.name = "cpufreq-dt";
+ cpufreq_dt_devinfo.parent = dev;
+
+ return platform_device_register_full(&cpufreq_dt_devinfo);
+}
+EXPORT_SYMBOL_GPL(cpufreq_dt_pdev_register);
+
MODULE_ALIAS("platform:cpufreq-dt");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
diff --git a/drivers/cpufreq/cpufreq-dt.h b/drivers/cpufreq/cpufreq-dt.h
index 28c8af7ec5ef..fc1889aeb4f1 100644
--- a/drivers/cpufreq/cpufreq-dt.h
+++ b/drivers/cpufreq/cpufreq-dt.h
@@ -22,4 +22,6 @@ struct cpufreq_dt_platform_data {
int (*resume)(struct cpufreq_policy *policy);
};
+struct platform_device *cpufreq_dt_pdev_register(struct device *dev);
+
#endif /* __CPUFREQ_DT_H__ */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d7426e1d8bdd..fc7eace8b65b 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -109,6 +109,8 @@ void disable_cpufreq(void)
{
off = 1;
}
+EXPORT_SYMBOL_GPL(disable_cpufreq);
+
static DEFINE_MUTEX(cpufreq_governor_mutex);
bool have_governor_per_policy(void)
@@ -967,6 +969,7 @@ static struct attribute *cpufreq_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
+ &scaling_cur_freq.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
&affected_cpus.attr,
@@ -1095,10 +1098,6 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
- if (ret)
- return ret;
-
if (cpufreq_driver->bios_limit) {
ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
if (ret)
@@ -1284,6 +1283,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
goto err_free_real_cpus;
}
+ init_rwsem(&policy->rwsem);
+
freq_constraints_init(&policy->constraints);
policy->nb_min.notifier_call = cpufreq_notifier_min;
@@ -1306,7 +1307,6 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
}
INIT_LIST_HEAD(&policy->policy_list);
- init_rwsem(&policy->rwsem);
spin_lock_init(&policy->transition_lock);
init_waitqueue_head(&policy->transition_wait);
INIT_WORK(&policy->update, handle_update);
@@ -1694,14 +1694,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
return;
}
- if (has_target())
+ if (has_target()) {
strscpy(policy->last_governor, policy->governor->name,
CPUFREQ_NAME_LEN);
- else
- policy->last_policy = policy->policy;
-
- if (has_target())
cpufreq_exit_governor(policy);
+ } else {
+ policy->last_policy = policy->policy;
+ }
/*
* Perform the ->offline() during light-weight tear-down, as
@@ -1803,6 +1802,9 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
{
unsigned int new_freq;
+ if (!cpufreq_driver->get)
+ return 0;
+
new_freq = cpufreq_driver->get(policy->cpu);
if (!new_freq)
return 0;
@@ -1925,10 +1927,7 @@ unsigned int cpufreq_get(unsigned int cpu)
guard(cpufreq_policy_read)(policy);
- if (cpufreq_driver->get)
- return __cpufreq_get(policy);
-
- return 0;
+ return __cpufreq_get(policy);
}
EXPORT_SYMBOL(cpufreq_get);
@@ -2482,8 +2481,7 @@ int cpufreq_start_governor(struct cpufreq_policy *policy)
pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
- if (cpufreq_driver->get)
- cpufreq_verify_current_freq(policy, false);
+ cpufreq_verify_current_freq(policy, false);
if (policy->governor->start) {
ret = policy->governor->start(policy);
@@ -2715,10 +2713,12 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("starting governor %s failed\n", policy->governor->name);
if (old_gov) {
policy->governor = old_gov;
- if (cpufreq_init_governor(policy))
+ if (cpufreq_init_governor(policy)) {
policy->governor = NULL;
- else
- cpufreq_start_governor(policy);
+ } else if (cpufreq_start_governor(policy)) {
+ cpufreq_exit_governor(policy);
+ policy->governor = NULL;
+ }
}
return ret;
@@ -2944,15 +2944,6 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
if (driver_data->setpolicy)
driver_data->flags |= CPUFREQ_CONST_LOOPS;
@@ -2983,6 +2974,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("supports frequency invariance");
+ }
+
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 2c42fee76daa..77d62152cd38 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -134,6 +134,7 @@ static struct cpufreq_governor cpufreq_gov_userspace = {
.store_setspeed = cpufreq_set,
.show_setspeed = show_speed,
.owner = THIS_MODULE,
+ .flags = CPUFREQ_GOV_STRICT_TARGET,
};
MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>, "
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 64587d318267..06a1c7dd081f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2775,6 +2775,8 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
X86_MATCH(INTEL_TIGERLAKE, core_funcs),
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, core_funcs),
X86_MATCH(INTEL_EMERALDRAPIDS_X, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_D, core_funcs),
+ X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -3249,8 +3251,8 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int max_pstate = policy->strict_target ?
target_pstate : cpu->max_perf_ratio;
- intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
- fast_switch);
+ intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate,
+ target_pstate, fast_switch);
} else if (target_pstate != old_pstate) {
intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
}
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 514146d98bca..f8a76bbecef9 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -16,6 +16,10 @@
#include <linux/pm_opp.h>
#include <linux/types.h>
+#include "cpufreq-dt.h"
+
+static struct platform_device *tegra124_cpufreq_pdev;
+
struct tegra124_cpufreq_priv {
struct clk *cpu_clk;
struct clk *pllp_clk;
@@ -55,7 +59,6 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
struct device_node *np __free(device_node) = of_cpu_device_node_get(0);
struct tegra124_cpufreq_priv *priv;
struct device *cpu_dev;
- struct platform_device_info cpufreq_dt_devinfo = {};
int ret;
if (!np)
@@ -95,11 +98,7 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
if (ret)
goto out_put_pllp_clk;
- cpufreq_dt_devinfo.name = "cpufreq-dt";
- cpufreq_dt_devinfo.parent = &pdev->dev;
-
- priv->cpufreq_dt_pdev =
- platform_device_register_full(&cpufreq_dt_devinfo);
+ priv->cpufreq_dt_pdev = cpufreq_dt_pdev_register(&pdev->dev);
if (IS_ERR(priv->cpufreq_dt_pdev)) {
ret = PTR_ERR(priv->cpufreq_dt_pdev);
goto out_put_pllp_clk;
@@ -173,6 +172,21 @@ disable_cpufreq:
return err;
}
+static void tegra124_cpufreq_remove(struct platform_device *pdev)
+{
+ struct tegra124_cpufreq_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (!IS_ERR(priv->cpufreq_dt_pdev)) {
+ platform_device_unregister(priv->cpufreq_dt_pdev);
+ priv->cpufreq_dt_pdev = ERR_PTR(-ENODEV);
+ }
+
+ clk_put(priv->pllp_clk);
+ clk_put(priv->pllx_clk);
+ clk_put(priv->dfll_clk);
+ clk_put(priv->cpu_clk);
+}
+
static const struct dev_pm_ops tegra124_cpufreq_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra124_cpufreq_suspend,
tegra124_cpufreq_resume)
@@ -182,15 +196,16 @@ static struct platform_driver tegra124_cpufreq_platdrv = {
.driver.name = "cpufreq-tegra124",
.driver.pm = &tegra124_cpufreq_pm_ops,
.probe = tegra124_cpufreq_probe,
+ .remove = tegra124_cpufreq_remove,
};
static int __init tegra_cpufreq_init(void)
{
int ret;
- struct platform_device *pdev;
- if (!(of_machine_is_compatible("nvidia,tegra124") ||
- of_machine_is_compatible("nvidia,tegra210")))
+ if (!(of_machine_is_compatible("nvidia,tegra114") ||
+ of_machine_is_compatible("nvidia,tegra124") ||
+ of_machine_is_compatible("nvidia,tegra210")))
return -ENODEV;
/*
@@ -201,15 +216,25 @@ static int __init tegra_cpufreq_init(void)
if (ret)
return ret;
- pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
- if (IS_ERR(pdev)) {
+ tegra124_cpufreq_pdev = platform_device_register_simple("cpufreq-tegra124", -1, NULL, 0);
+ if (IS_ERR(tegra124_cpufreq_pdev)) {
platform_driver_unregister(&tegra124_cpufreq_platdrv);
- return PTR_ERR(pdev);
+ return PTR_ERR(tegra124_cpufreq_pdev);
}
return 0;
}
module_init(tegra_cpufreq_init);
+static void __exit tegra_cpufreq_module_exit(void)
+{
+ if (!IS_ERR_OR_NULL(tegra124_cpufreq_pdev))
+ platform_device_unregister(tegra124_cpufreq_pdev);
+
+ platform_driver_unregister(&tegra124_cpufreq_platdrv);
+}
+module_exit(tegra_cpufreq_module_exit);
+
MODULE_AUTHOR("Tuomas Tynkkynen <ttynkkynen@nvidia.com>");
MODULE_DESCRIPTION("cpufreq driver for NVIDIA Tegra124");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 4e1ba35deda9..b19bc60cc627 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -45,7 +45,6 @@ struct psci_cpuidle_domain_state {
static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data);
static DEFINE_PER_CPU(struct psci_cpuidle_domain_state, psci_domain_state);
static bool psci_cpuidle_use_syscore;
-static bool psci_cpuidle_use_cpuhp;
void psci_set_domain_state(struct generic_pm_domain *pd, unsigned int state_idx,
u32 state)
@@ -124,8 +123,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu)
{
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
- if (pd_dev)
- pm_runtime_get_sync(pd_dev);
+ if (pd_dev) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_get_sync(pd_dev);
+ else
+ dev_pm_genpd_resume(pd_dev);
+ }
return 0;
}
@@ -135,7 +138,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu)
struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev);
if (pd_dev) {
- pm_runtime_put_sync(pd_dev);
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ pm_runtime_put_sync(pd_dev);
+ else
+ dev_pm_genpd_suspend(pd_dev);
+
/* Clear domain state to start fresh at next online. */
psci_clear_domain_state();
}
@@ -196,9 +203,6 @@ static void psci_idle_init_cpuhp(void)
{
int err;
- if (!psci_cpuidle_use_cpuhp)
- return;
-
err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING,
"cpuidle/psci:online",
psci_idle_cpuhp_up,
@@ -259,10 +263,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* s2ram and s2idle.
*/
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
- if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
- psci_cpuidle_use_cpuhp = true;
- }
return 0;
}
@@ -339,7 +341,6 @@ static void psci_cpu_deinit_idle(int cpu)
dt_idle_detach_cpu(data->dev);
psci_cpuidle_use_syscore = false;
- psci_cpuidle_use_cpuhp = false;
}
static int psci_idle_init_cpu(struct device *dev, int cpu)
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 97feb7d8fb23..558d49838990 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -98,7 +98,6 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
{
int cpu;
struct device_node *cpu_node, *curr_state_node;
- bool valid = true;
/*
* Compare idle state phandles for index idx on all CPUs in the
@@ -107,20 +106,17 @@ static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
* retrieved from. If a mismatch is found bail out straight
* away since we certainly hit a firmware misconfiguration.
*/
- for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
- cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
+ cpu = cpumask_first(cpumask) + 1;
+ for_each_cpu_from(cpu, cpumask) {
cpu_node = of_cpu_device_node_get(cpu);
curr_state_node = of_get_cpu_state_node(cpu_node, idx);
- if (state_node != curr_state_node)
- valid = false;
-
of_node_put(curr_state_node);
of_node_put(cpu_node);
- if (!valid)
- break;
+ if (state_node != curr_state_node)
+ return false;
}
- return valid;
+ return true;
}
/**
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index af37477ffd8d..be21e4e2016c 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -314,30 +314,30 @@ static int chcr_compute_partial_hash(struct shash_desc *desc,
if (digest_size == SHA1_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha1_st);
+ crypto_shash_export_core(desc, &sha1_st);
memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
} else if (digest_size == SHA224_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
+ crypto_shash_export_core(desc, &sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA256_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha256_st);
+ crypto_shash_export_core(desc, &sha256_st);
memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
} else if (digest_size == SHA384_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
+ crypto_shash_export_core(desc, &sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else if (digest_size == SHA512_DIGEST_SIZE) {
error = crypto_shash_init(desc) ?:
crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export(desc, (void *)&sha512_st);
+ crypto_shash_export_core(desc, &sha512_st);
memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
} else {
error = -EINVAL;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index e050f5ff5efb..a8f735390f0d 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -705,17 +705,17 @@ static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha1-generic");
+ return img_hash_cra_init(tfm, "sha1-lib");
}
static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha224-generic");
+ return img_hash_cra_init(tfm, "sha224-lib");
}
static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "sha256-generic");
+ return img_hash_cra_init(tfm, "sha256-lib");
}
static void img_hash_cra_exit(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9ca80d082c4f..c3b2b22934b7 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1218,7 +1218,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_xts_aes,
&safexcel_alg_gcm,
&safexcel_alg_ccm,
- &safexcel_alg_crc32,
&safexcel_alg_cbcmac,
&safexcel_alg_xcbcmac,
&safexcel_alg_cmac,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 0c79ad78d1c0..0f27367a85fa 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -959,7 +959,6 @@ extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_xts_aes;
extern struct safexcel_alg_template safexcel_alg_gcm;
extern struct safexcel_alg_template safexcel_alg_ccm;
-extern struct safexcel_alg_template safexcel_alg_crc32;
extern struct safexcel_alg_template safexcel_alg_cbcmac;
extern struct safexcel_alg_template safexcel_alg_xcbcmac;
extern struct safexcel_alg_template safexcel_alg_cmac;
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index d2b632193beb..fd34dc8f5707 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -289,14 +289,8 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv,
return 1;
}
- if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
- ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
- /* Undo final XOR with 0xffffffff ...*/
- *(__le32 *)areq->result = ~sreq->state[0];
- } else {
- memcpy(areq->result, sreq->state,
- crypto_ahash_digestsize(ahash));
- }
+ memcpy(areq->result, sreq->state,
+ crypto_ahash_digestsize(ahash));
}
cache_len = safexcel_queued_len(sreq);
@@ -1881,88 +1875,6 @@ struct safexcel_alg_template safexcel_alg_hmac_md5 = {
},
};
-static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret = safexcel_ahash_cra_init(tfm);
-
- /* Default 'key' is all zeroes */
- memset(&ctx->base.ipad, 0, sizeof(u32));
- return ret;
-}
-
-static int safexcel_crc32_init(struct ahash_request *areq)
-{
- struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
- struct safexcel_ahash_req *req = ahash_request_ctx_dma(areq);
-
- memset(req, 0, sizeof(*req));
-
- /* Start from loaded key */
- req->state[0] = cpu_to_le32(~ctx->base.ipad.word[0]);
- /* Set processed to non-zero to enable invalidation detection */
- req->len = sizeof(u32);
- req->processed = sizeof(u32);
-
- ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
- req->digest = CONTEXT_CONTROL_DIGEST_XCM;
- req->state_sz = sizeof(u32);
- req->digest_sz = sizeof(u32);
- req->block_sz = sizeof(u32);
-
- return 0;
-}
-
-static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- memcpy(&ctx->base.ipad, key, sizeof(u32));
- return 0;
-}
-
-static int safexcel_crc32_digest(struct ahash_request *areq)
-{
- return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
-}
-
-struct safexcel_alg_template safexcel_alg_crc32 = {
- .type = SAFEXCEL_ALG_TYPE_AHASH,
- .algo_mask = 0,
- .alg.ahash = {
- .init = safexcel_crc32_init,
- .update = safexcel_ahash_update,
- .final = safexcel_ahash_final,
- .finup = safexcel_ahash_finup,
- .digest = safexcel_crc32_digest,
- .setkey = safexcel_crc32_setkey,
- .export = safexcel_ahash_export,
- .import = safexcel_ahash_import,
- .halg = {
- .digestsize = sizeof(u32),
- .statesize = sizeof(struct safexcel_ahash_export_state),
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "safexcel-crc32",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
- .cra_init = safexcel_crc32_cra_init,
- .cra_exit = safexcel_ahash_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
- },
-};
-
static int safexcel_cbcmac_init(struct ahash_request *areq)
{
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 3c4bba4a8779..c03a69851114 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -5,11 +5,11 @@
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
+#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hash.h>
#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
@@ -154,19 +154,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
+ if (crypto_shash_export_core(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
+ if (crypto_shash_export_core(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
+ if (crypto_shash_export_core(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
@@ -190,19 +190,19 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &ctx->sha1))
+ if (crypto_shash_export_core(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &ctx->sha256))
+ if (crypto_shash_export_core(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
*hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &ctx->sha512))
+ if (crypto_shash_export_core(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
*hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 2c60a1047bc3..6cfe0238f615 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -493,25 +493,25 @@ static int starfive_hash_setkey(struct crypto_ahash *hash,
static int starfive_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha224-generic",
+ return starfive_hash_init_tfm(hash, "sha224-lib",
STARFIVE_HASH_SHA224, 0);
}
static int starfive_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha256-generic",
+ return starfive_hash_init_tfm(hash, "sha256-lib",
STARFIVE_HASH_SHA256, 0);
}
static int starfive_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha384-generic",
+ return starfive_hash_init_tfm(hash, "sha384-lib",
STARFIVE_HASH_SHA384, 0);
}
static int starfive_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "sha512-generic",
+ return starfive_hash_init_tfm(hash, "sha512-lib",
STARFIVE_HASH_SHA512, 0);
}
@@ -523,25 +523,25 @@ static int starfive_sm3_init_tfm(struct crypto_ahash *hash)
static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha224-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha224-lib",
STARFIVE_HASH_SHA224, 1);
}
static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha256-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha256-lib",
STARFIVE_HASH_SHA256, 1);
}
static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha384-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha384-lib",
STARFIVE_HASH_SHA384, 1);
}
static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash)
{
- return starfive_hash_init_tfm(hash, "hmac(sha512-generic)",
+ return starfive_hash_init_tfm(hash, "hmac-sha512-lib",
STARFIVE_HASH_SHA512, 1);
}
diff --git a/drivers/crypto/stm32/Kconfig b/drivers/crypto/stm32/Kconfig
index 49dfd161e9b9..d6dc848c82ee 100644
--- a/drivers/crypto/stm32/Kconfig
+++ b/drivers/crypto/stm32/Kconfig
@@ -1,13 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config CRYPTO_DEV_STM32_CRC
- tristate "Support for STM32 crc accelerators"
- depends on ARCH_STM32
- select CRYPTO_HASH
- select CRC32
- help
- This enables support for the CRC32 hw accelerator which can be found
- on STMicroelectronics STM32 SOC.
-
config CRYPTO_DEV_STM32_HASH
tristate "Support for STM32 hash accelerators"
depends on ARCH_STM32 || ARCH_U8500
diff --git a/drivers/crypto/stm32/Makefile b/drivers/crypto/stm32/Makefile
index 518e0e0b11a9..c63004026afb 100644
--- a/drivers/crypto/stm32/Makefile
+++ b/drivers/crypto/stm32/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_STM32_CRC) += stm32-crc32.o
obj-$(CONFIG_CRYPTO_DEV_STM32_HASH) += stm32-hash.o
obj-$(CONFIG_CRYPTO_DEV_STM32_CRYP) += stm32-cryp.o
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
deleted file mode 100644
index fd29785a3ecf..000000000000
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ /dev/null
@@ -1,480 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) STMicroelectronics SA 2017
- * Author: Fabien Dessenne <fabien.dessenne@st.com>
- */
-
-#include <linux/bitrev.h>
-#include <linux/clk.h>
-#include <linux/crc32.h>
-#include <linux/crc32poly.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-
-#include <crypto/internal/hash.h>
-
-#include <linux/unaligned.h>
-
-#define DRIVER_NAME "stm32-crc32"
-#define CHKSUM_DIGEST_SIZE 4
-#define CHKSUM_BLOCK_SIZE 1
-
-/* Registers */
-#define CRC_DR 0x00000000
-#define CRC_CR 0x00000008
-#define CRC_INIT 0x00000010
-#define CRC_POL 0x00000014
-
-/* Registers values */
-#define CRC_CR_RESET BIT(0)
-#define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5))
-#define CRC_CR_REV_IN_BYTE BIT(5)
-#define CRC_CR_REV_OUT BIT(7)
-#define CRC32C_INIT_DEFAULT 0xFFFFFFFF
-
-#define CRC_AUTOSUSPEND_DELAY 50
-
-static unsigned int burst_size;
-module_param(burst_size, uint, 0644);
-MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
-
-struct stm32_crc {
- struct list_head list;
- struct device *dev;
- void __iomem *regs;
- struct clk *clk;
- spinlock_t lock;
-};
-
-struct stm32_crc_list {
- struct list_head dev_list;
- spinlock_t lock; /* protect dev_list */
-};
-
-static struct stm32_crc_list crc_list = {
- .dev_list = LIST_HEAD_INIT(crc_list.dev_list),
- .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock),
-};
-
-struct stm32_crc_ctx {
- u32 key;
- u32 poly;
-};
-
-struct stm32_crc_desc_ctx {
- u32 partial; /* crc32c: partial in first 4 bytes of that struct */
-};
-
-static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = 0;
- mctx->poly = CRC32_POLY_LE;
- return 0;
-}
-
-static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
-{
- struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
-
- mctx->key = CRC32C_INIT_DEFAULT;
- mctx->poly = CRC32C_POLY_LE;
- return 0;
-}
-
-static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
-
- if (keylen != sizeof(u32))
- return -EINVAL;
-
- mctx->key = get_unaligned_le32(key);
- return 0;
-}
-
-static struct stm32_crc *stm32_crc_get_next_crc(void)
-{
- struct stm32_crc *crc;
-
- spin_lock_bh(&crc_list.lock);
- crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
- if (crc)
- list_move_tail(&crc->list, &crc_list.dev_list);
- spin_unlock_bh(&crc_list.lock);
-
- return crc;
-}
-
-static int stm32_crc_init(struct shash_desc *desc)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
- unsigned long flags;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- spin_lock_irqsave(&crc->lock, flags);
-
- /* Reset, set key, poly and configure in bit reverse mode */
- writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock_irqrestore(&crc->lock, flags);
-
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int burst_update(struct shash_desc *desc, const u8 *d8,
- size_t length)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
- struct stm32_crc *crc;
-
- crc = stm32_crc_get_next_crc();
- if (!crc)
- return -ENODEV;
-
- pm_runtime_get_sync(crc->dev);
-
- if (!spin_trylock(&crc->lock)) {
- /* Hardware is busy, calculate crc32 by software */
- if (mctx->poly == CRC32_POLY_LE)
- ctx->partial = crc32_le(ctx->partial, d8, length);
- else
- ctx->partial = crc32c(ctx->partial, d8, length);
-
- goto pm_out;
- }
-
- /*
- * Restore previously calculated CRC for this context as init value
- * Restore polynomial configuration
- * Configure in register for word input data,
- * Configure out register in reversed bit mode data.
- */
- writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
- writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
- writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
-
- if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- length--;
- }
- /* Configure for word data */
- writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- }
-
- for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
- writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
-
- if (length) {
- /* Configure for byte data */
- writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
- crc->regs + CRC_CR);
- while (length--)
- writeb_relaxed(*d8++, crc->regs + CRC_DR);
- }
-
- /* Store partial result */
- ctx->partial = readl_relaxed(crc->regs + CRC_DR);
-
- spin_unlock(&crc->lock);
-
-pm_out:
- pm_runtime_mark_last_busy(crc->dev);
- pm_runtime_put_autosuspend(crc->dev);
-
- return 0;
-}
-
-static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
- unsigned int length)
-{
- const unsigned int burst_sz = burst_size;
- unsigned int rem_sz;
- const u8 *cur;
- size_t size;
- int ret;
-
- if (!burst_sz)
- return burst_update(desc, d8, length);
-
- /* Digest first bytes not 32bit aligned at first pass in the loop */
- size = min_t(size_t, length, burst_sz + (size_t)d8 -
- ALIGN_DOWN((size_t)d8, sizeof(u32)));
- for (rem_sz = length, cur = d8; rem_sz;
- rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
- ret = burst_update(desc, cur, size);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int stm32_crc_final(struct shash_desc *desc, u8 *out)
-{
- struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
- struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
-
- /* Send computed CRC */
- put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
- ~ctx->partial : ctx->partial, out);
-
- return 0;
-}
-
-static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_update(desc, data, length) ?:
- stm32_crc_final(desc, out);
-}
-
-static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
- unsigned int length, u8 *out)
-{
- return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
-}
-
-static unsigned int refcnt;
-static DEFINE_MUTEX(refcnt_lock);
-static struct shash_alg algs[] = {
- /* CRC-32 */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32",
- .cra_driver_name = "stm32-crc32-crc32",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32_cra_init,
- }
- },
- /* CRC-32Castagnoli */
- {
- .setkey = stm32_crc_setkey,
- .init = stm32_crc_init,
- .update = stm32_crc_update,
- .final = stm32_crc_final,
- .finup = stm32_crc_finup,
- .digest = stm32_crc_digest,
- .descsize = sizeof(struct stm32_crc_desc_ctx),
- .digestsize = CHKSUM_DIGEST_SIZE,
- .base = {
- .cra_name = "crc32c",
- .cra_driver_name = "stm32-crc32-crc32c",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct stm32_crc_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = stm32_crc32c_cra_init,
- }
- }
-};
-
-static int stm32_crc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct stm32_crc *crc;
- int ret;
-
- crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
- if (!crc)
- return -ENOMEM;
-
- crc->dev = dev;
-
- crc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(crc->regs)) {
- dev_err(dev, "Cannot map CRC IO\n");
- return PTR_ERR(crc->regs);
- }
-
- crc->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(crc->clk)) {
- dev_err(dev, "Could not get clock\n");
- return PTR_ERR(crc->clk);
- }
-
- ret = clk_prepare_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
-
- pm_runtime_get_noresume(dev);
- pm_runtime_set_active(dev);
- pm_runtime_irq_safe(dev);
- pm_runtime_enable(dev);
-
- spin_lock_init(&crc->lock);
-
- platform_set_drvdata(pdev, crc);
-
- spin_lock(&crc_list.lock);
- list_add(&crc->list, &crc_list.dev_list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!refcnt) {
- ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
- if (ret) {
- mutex_unlock(&refcnt_lock);
- dev_err(dev, "Failed to register\n");
- clk_disable_unprepare(crc->clk);
- return ret;
- }
- }
- refcnt++;
- mutex_unlock(&refcnt_lock);
-
- dev_info(dev, "Initialized\n");
-
- pm_runtime_put_sync(dev);
-
- return 0;
-}
-
-static void stm32_crc_remove(struct platform_device *pdev)
-{
- struct stm32_crc *crc = platform_get_drvdata(pdev);
- int ret = pm_runtime_get_sync(crc->dev);
-
- spin_lock(&crc_list.lock);
- list_del(&crc->list);
- spin_unlock(&crc_list.lock);
-
- mutex_lock(&refcnt_lock);
- if (!--refcnt)
- crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
- mutex_unlock(&refcnt_lock);
-
- pm_runtime_disable(crc->dev);
- pm_runtime_put_noidle(crc->dev);
-
- if (ret >= 0)
- clk_disable(crc->clk);
- clk_unprepare(crc->clk);
-}
-
-static int __maybe_unused stm32_crc_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
-
- clk_unprepare(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_prepare(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to prepare clock\n");
- return ret;
- }
-
- return pm_runtime_force_resume(dev);
-}
-
-static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
-
- clk_disable(crc->clk);
-
- return 0;
-}
-
-static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
-{
- struct stm32_crc *crc = dev_get_drvdata(dev);
- int ret;
-
- ret = clk_enable(crc->clk);
- if (ret) {
- dev_err(crc->dev, "Failed to enable clock\n");
- return ret;
- }
-
- return 0;
-}
-
-static const struct dev_pm_ops stm32_crc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
- stm32_crc_resume)
- SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
- stm32_crc_runtime_resume, NULL)
-};
-
-static const struct of_device_id stm32_dt_ids[] = {
- { .compatible = "st,stm32f7-crc", },
- {},
-};
-MODULE_DEVICE_TABLE(of, stm32_dt_ids);
-
-static struct platform_driver stm32_crc_driver = {
- .probe = stm32_crc_probe,
- .remove = stm32_crc_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &stm32_crc_pm_ops,
- .of_match_table = stm32_dt_ids,
- },
-};
-
-module_platform_driver(stm32_crc_driver);
-
-MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 3c4862a752b5..c999c4a1e567 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -90,6 +90,17 @@ config ARM_EXYNOS_BUS_DEVFREQ
and adjusts the operating frequencies and voltages with OPP support.
This does not yet operate with optimal voltages.
+config ARM_HISI_UNCORE_DEVFREQ
+ tristate "HiSilicon uncore DEVFREQ Driver"
+ depends on ACPI && ACPI_PPTT && PCC
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_USERSPACE
+ help
+ This adds a DEVFREQ driver that manages uncore frequency scaling for
+ HiSilicon Kunpeng SoCs. This enables runtime management of uncore
+ frequency scaling from kernel and userspace. The uncore domain
+ contains system interconnects and L3 cache.
+
config ARM_IMX_BUS_DEVFREQ
tristate "i.MX Generic Bus DEVFREQ Driver"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index bf40d04928d0..404179d79a9d 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o
+obj-$(CONFIG_ARM_HISI_UNCORE_DEVFREQ) += hisi_uncore_freq.o
obj-$(CONFIG_ARM_IMX_BUS_DEVFREQ) += imx-bus.o
obj-$(CONFIG_ARM_IMX8M_DDRC_DEVFREQ) += imx8m-ddrc.o
obj-$(CONFIG_ARM_MEDIATEK_CCI_DEVFREQ) += mtk-cci-devfreq.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 98657d3b9435..2e8d01d47f69 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -152,11 +152,8 @@ void devfreq_get_freq_range(struct devfreq *devfreq,
(unsigned long)HZ_PER_KHZ * qos_max_freq);
/* Apply constraints from OPP interface */
- *min_freq = max(*min_freq, devfreq->scaling_min_freq);
- *max_freq = min(*max_freq, devfreq->scaling_max_freq);
-
- if (*min_freq > *max_freq)
- *min_freq = *max_freq;
+ *max_freq = clamp(*max_freq, devfreq->scaling_min_freq, devfreq->scaling_max_freq);
+ *min_freq = clamp(*min_freq, devfreq->scaling_min_freq, *max_freq);
}
EXPORT_SYMBOL(devfreq_get_freq_range);
@@ -807,7 +804,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- unsigned long min_freq, max_freq;
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -835,6 +831,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
+ devfreq->dev.groups = profile->dev_groups;
devfreq->dev.release = devfreq_dev_release;
INIT_LIST_HEAD(&devfreq->node);
devfreq->profile = profile;
@@ -875,8 +872,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
- devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
-
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
if (IS_ERR(devfreq->opp_table))
@@ -1382,15 +1377,11 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
int ret;
struct device *dev = devfreq->dev.parent;
+ if (!devfreq->governor)
+ continue;
+
if (!strncmp(devfreq->governor->name, governor->name,
DEVFREQ_NAME_LEN)) {
- /* we should have a devfreq governor! */
- if (!devfreq->governor) {
- dev_warn(dev, "%s: Governor %s NOT present\n",
- __func__, governor->name);
- continue;
- /* Fall through */
- }
ret = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
@@ -1743,7 +1734,7 @@ static ssize_t trans_stat_show(struct device *dev,
for (i = 0; i < max_state; i++) {
if (len >= PAGE_SIZE - 1)
break;
- if (df->freq_table[2] == df->previous_freq)
+ if (df->freq_table[i] == df->previous_freq)
len += sysfs_emit_at(buf, len, "*");
else
len += sysfs_emit_at(buf, len, " ");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index d1aa6806b683..175de0c0b50e 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/kstrtox.h>
#include <linux/pm.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -39,10 +40,13 @@ static ssize_t set_freq_store(struct device *dev, struct device_attribute *attr,
unsigned long wanted;
int err = 0;
+ err = kstrtoul(buf, 0, &wanted);
+ if (err)
+ return err;
+
mutex_lock(&devfreq->lock);
data = devfreq->governor_data;
- sscanf(buf, "%lu", &wanted);
data->user_frequency = wanted;
data->valid = true;
err = update_devfreq(devfreq);
diff --git a/drivers/devfreq/hisi_uncore_freq.c b/drivers/devfreq/hisi_uncore_freq.c
new file mode 100644
index 000000000000..96d1815059e3
--- /dev/null
+++ b/drivers/devfreq/hisi_uncore_freq.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HiSilicon uncore frequency scaling driver
+ *
+ * Copyright (c) 2025 HiSilicon Co., Ltd
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/devfreq.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+#include <linux/topology.h>
+#include <linux/units.h>
+#include <acpi/pcc.h>
+
+#include "governor.h"
+
+struct hisi_uncore_pcc_data {
+ u16 status;
+ u16 resv;
+ u32 data;
+};
+
+struct hisi_uncore_pcc_shmem {
+ struct acpi_pcct_shared_memory head;
+ struct hisi_uncore_pcc_data pcc_data;
+};
+
+enum hisi_uncore_pcc_cmd_type {
+ HUCF_PCC_CMD_GET_CAP = 0,
+ HUCF_PCC_CMD_GET_FREQ,
+ HUCF_PCC_CMD_SET_FREQ,
+ HUCF_PCC_CMD_GET_MODE,
+ HUCF_PCC_CMD_SET_MODE,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ HUCF_PCC_CMD_MAX = 256
+};
+
+static int hisi_platform_gov_usage;
+static DEFINE_MUTEX(hisi_platform_gov_usage_lock);
+
+enum hisi_uncore_freq_mode {
+ HUCF_MODE_PLATFORM = 0,
+ HUCF_MODE_OS,
+ HUCF_MODE_MAX
+};
+
+#define HUCF_CAP_PLATFORM_CTRL BIT(0)
+
+/**
+ * struct hisi_uncore_freq - hisi uncore frequency scaling device data
+ * @dev: device of this frequency scaling driver
+ * @cl: mailbox client object
+ * @pchan: PCC mailbox channel
+ * @chan_id: PCC channel ID
+ * @last_cmd_cmpl_time: timestamp of the last completed PCC command
+ * @pcc_lock: PCC channel lock
+ * @devfreq: devfreq data of this hisi_uncore_freq device
+ * @related_cpus: CPUs whose performance is majorly affected by this
+ * uncore frequency domain
+ * @cap: capability flag
+ */
+struct hisi_uncore_freq {
+ struct device *dev;
+ struct mbox_client cl;
+ struct pcc_mbox_chan *pchan;
+ int chan_id;
+ ktime_t last_cmd_cmpl_time;
+ struct mutex pcc_lock;
+ struct devfreq *devfreq;
+ struct cpumask related_cpus;
+ u32 cap;
+};
+
+/* PCC channel timeout = PCC nominal latency * NUM */
+#define HUCF_PCC_POLL_TIMEOUT_NUM 1000
+#define HUCF_PCC_POLL_INTERVAL_US 5
+
+/* Default polling interval in ms for devfreq governors*/
+#define HUCF_DEFAULT_POLLING_MS 100
+
+static void hisi_uncore_free_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ guard(mutex)(&uncore->pcc_lock);
+ pcc_mbox_free_channel(uncore->pchan);
+ uncore->pchan = NULL;
+}
+
+static void devm_hisi_uncore_free_pcc_chan(void *data)
+{
+ hisi_uncore_free_pcc_chan(data);
+}
+
+static int hisi_uncore_request_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ struct pcc_mbox_chan *pcc_chan;
+
+ uncore->cl = (struct mbox_client) {
+ .dev = dev,
+ .tx_block = false,
+ .knows_txdone = true,
+ };
+
+ pcc_chan = pcc_mbox_request_channel(&uncore->cl, uncore->chan_id);
+ if (IS_ERR(pcc_chan))
+ return dev_err_probe(dev, PTR_ERR(pcc_chan),
+ "Failed to request PCC channel %u\n", uncore->chan_id);
+
+ if (!pcc_chan->shmem_base_addr) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory address\n");
+ }
+
+ if (pcc_chan->shmem_size < sizeof(struct hisi_uncore_pcc_shmem)) {
+ pcc_mbox_free_channel(pcc_chan);
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid PCC shared memory size (%lluB)\n",
+ pcc_chan->shmem_size);
+ }
+
+ uncore->pchan = pcc_chan;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_free_pcc_chan, uncore);
+}
+
+static acpi_status hisi_uncore_pcc_reg_scan(struct acpi_resource *res,
+ void *ctx)
+{
+ struct acpi_resource_generic_register *reg;
+ struct hisi_uncore_freq *uncore;
+
+ if (!res || res->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
+ return AE_OK;
+
+ reg = &res->data.generic_reg;
+ if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM)
+ return AE_OK;
+
+ if (!ctx)
+ return AE_ERROR;
+
+ uncore = ctx;
+ /* PCC subspace ID stored in Access Size */
+ uncore->chan_id = reg->access_size;
+
+ return AE_CTRL_TERMINATE;
+}
+
+static int hisi_uncore_init_pcc_chan(struct hisi_uncore_freq *uncore)
+{
+ acpi_handle handle = ACPI_HANDLE(uncore->dev);
+ acpi_status status;
+ int rc;
+
+ uncore->chan_id = -1;
+ status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+ hisi_uncore_pcc_reg_scan, uncore);
+ if (ACPI_FAILURE(status) || uncore->chan_id < 0)
+ return dev_err_probe(uncore->dev, -ENODEV,
+ "Failed to get a PCC channel\n");
+
+
+ rc = devm_mutex_init(uncore->dev, &uncore->pcc_lock);
+ if (rc)
+ return rc;
+
+ return hisi_uncore_request_pcc_chan(uncore);
+}
+
+static int hisi_uncore_cmd_send(struct hisi_uncore_freq *uncore,
+ u8 cmd, u32 *data)
+{
+ struct hisi_uncore_pcc_shmem __iomem *addr;
+ struct hisi_uncore_pcc_shmem shmem;
+ struct pcc_mbox_chan *pchan;
+ unsigned int mrtt;
+ s64 time_delta;
+ u16 status;
+ int rc;
+
+ guard(mutex)(&uncore->pcc_lock);
+
+ pchan = uncore->pchan;
+ if (!pchan)
+ return -ENODEV;
+
+ addr = (struct hisi_uncore_pcc_shmem __iomem *)pchan->shmem;
+ if (!addr)
+ return -EINVAL;
+
+ /* Handle the Minimum Request Turnaround Time (MRTT) */
+ mrtt = pchan->min_turnaround_time;
+ time_delta = ktime_us_delta(ktime_get(), uncore->last_cmd_cmpl_time);
+ if (mrtt > time_delta)
+ udelay(mrtt - time_delta);
+
+ /* Copy data */
+ shmem.head = (struct acpi_pcct_shared_memory) {
+ .signature = PCC_SIGNATURE | uncore->chan_id,
+ .command = cmd,
+ };
+ shmem.pcc_data.data = *data;
+ memcpy_toio(addr, &shmem, sizeof(shmem));
+
+ /* Ring doorbell */
+ rc = mbox_send_message(pchan->mchan, &cmd);
+ if (rc < 0) {
+ dev_err(uncore->dev, "Failed to send mbox message, %d\n", rc);
+ return rc;
+ }
+
+ /* Wait status */
+ rc = readw_poll_timeout(&addr->head.status, status,
+ status & (PCC_STATUS_CMD_COMPLETE |
+ PCC_STATUS_ERROR),
+ HUCF_PCC_POLL_INTERVAL_US,
+ pchan->latency * HUCF_PCC_POLL_TIMEOUT_NUM);
+ if (rc) {
+ dev_err(uncore->dev, "PCC channel response timeout, cmd=%u\n", cmd);
+ } else if (status & PCC_STATUS_ERROR) {
+ dev_err(uncore->dev, "PCC cmd error, cmd=%u\n", cmd);
+ rc = -EIO;
+ }
+
+ uncore->last_cmd_cmpl_time = ktime_get();
+
+ /* Copy data back */
+ memcpy_fromio(data, &addr->pcc_data.data, sizeof(*data));
+
+ /* Clear mailbox active req */
+ mbox_client_txdone(pchan->mchan, rc);
+
+ return rc;
+}
+
+static int hisi_uncore_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Failed to get opp for freq %lu hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+ dev_pm_opp_put(opp);
+
+ data = (u32)(dev_pm_opp_get_freq(opp) / HZ_PER_MHZ);
+
+ return hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_FREQ, &data);
+}
+
+static int hisi_uncore_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ /* Not used */
+ return 0;
+}
+
+static int hisi_uncore_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev);
+ u32 data = 0;
+ int rc;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_FREQ, &data);
+
+ /*
+ * Upon a failure, 'data' remains 0 and 'freq' is set to 0 rather than a
+ * random value. devfreq shouldn't use 'freq' in that case though.
+ */
+ *freq = data * HZ_PER_MHZ;
+
+ return rc;
+}
+
+static void devm_hisi_uncore_remove_opp(void *data)
+{
+ struct hisi_uncore_freq *uncore = data;
+
+ dev_pm_opp_remove_all_dynamic(uncore->dev);
+}
+
+static int hisi_uncore_init_opp(struct hisi_uncore_freq *uncore)
+{
+ struct device *dev = uncore->dev;
+ unsigned long freq_mhz;
+ u32 num, index;
+ u32 data = 0;
+ int rc;
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_PLAT_FREQ_NUM,
+ &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat freq num\n");
+
+ num = data;
+
+ for (index = 0; index < num; index++) {
+ data = index;
+ rc = hisi_uncore_cmd_send(uncore,
+ HUCF_PCC_CMD_GET_PLAT_FREQ_BY_IDX,
+ &data);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Failed to get plat freq at index %u\n", index);
+ }
+ freq_mhz = data;
+
+ /* Don't care OPP voltage, take 1V as default */
+ rc = dev_pm_opp_add(dev, freq_mhz * HZ_PER_MHZ, 1000000);
+ if (rc) {
+ dev_pm_opp_remove_all_dynamic(dev);
+ return dev_err_probe(dev, rc,
+ "Add OPP %lu failed\n", freq_mhz);
+ }
+ }
+
+ return devm_add_action_or_reset(dev, devm_hisi_uncore_remove_opp,
+ uncore);
+}
+
+static int hisi_platform_gov_func(struct devfreq *df, unsigned long *freq)
+{
+ /*
+ * Platform-controlled mode doesn't care the frequency issued from
+ * devfreq, so just pick the max freq.
+ */
+ *freq = DEVFREQ_MAX_FREQ;
+
+ return 0;
+}
+
+static int hisi_platform_gov_handler(struct devfreq *df, unsigned int event,
+ void *val)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(df->dev.parent);
+ int rc = 0;
+ u32 data;
+
+ if (WARN_ON(!uncore || !uncore->pchan))
+ return -ENODEV;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ data = HUCF_MODE_PLATFORM;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode (%d)\n", rc);
+ break;
+ case DEVFREQ_GOV_STOP:
+ data = HUCF_MODE_OS;
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set os mode (%d)\n", rc);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * In the platform-controlled mode, the platform decides the uncore frequency
+ * and ignores the frequency issued from the driver.
+ * Thus, create a pseudo 'hisi_platform' governor that stops devfreq monitor
+ * from working so as to save meaningless overhead.
+ */
+static struct devfreq_governor hisi_platform_governor = {
+ .name = "hisi_platform",
+ /*
+ * Set interrupt_driven to skip the devfreq monitor mechanism, though
+ * this governor is not interrupt-driven.
+ */
+ .flags = DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
+ .get_target_freq = hisi_platform_gov_func,
+ .event_handler = hisi_platform_gov_handler,
+};
+
+static void hisi_uncore_remove_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ u32 data = HUCF_MODE_PLATFORM;
+ int rc;
+
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (--hisi_platform_gov_usage == 0) {
+ rc = devfreq_remove_governor(&hisi_platform_governor);
+ if (rc)
+ dev_err(uncore->dev, "Failed to remove hisi_platform gov (%d)\n", rc);
+ }
+
+ /*
+ * Set to the platform-controlled mode on exit if supported, so as to
+ * have a certain behaviour when the driver is detached.
+ */
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_MODE, &data);
+ if (rc)
+ dev_err(uncore->dev, "Failed to set platform mode on exit (%d)\n", rc);
+}
+
+static void devm_hisi_uncore_remove_platform_gov(void *data)
+{
+ hisi_uncore_remove_platform_gov(data);
+}
+
+static int hisi_uncore_add_platform_gov(struct hisi_uncore_freq *uncore)
+{
+ if (!(uncore->cap & HUCF_CAP_PLATFORM_CTRL))
+ return 0;
+
+ guard(mutex)(&hisi_platform_gov_usage_lock);
+
+ if (hisi_platform_gov_usage == 0) {
+ int rc = devfreq_add_governor(&hisi_platform_governor);
+ if (rc)
+ return rc;
+ }
+ hisi_platform_gov_usage++;
+
+ return devm_add_action_or_reset(uncore->dev,
+ devm_hisi_uncore_remove_platform_gov,
+ uncore);
+}
+
+/*
+ * Returns:
+ * 0 if success, uncore->related_cpus is set.
+ * -EINVAL if property not found, or property found but without elements in it,
+ * or invalid arguments received in any of the subroutine.
+ * Other error codes if it goes wrong.
+ */
+static int hisi_uncore_mark_related_cpus(struct hisi_uncore_freq *uncore,
+ char *property, int (*get_topo_id)(int cpu),
+ const struct cpumask *(*get_cpumask)(int cpu))
+{
+ unsigned int i, cpu;
+ size_t len;
+ int rc;
+
+ rc = device_property_count_u32(uncore->dev, property);
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return -EINVAL;
+
+ len = rc;
+ u32 *num __free(kfree) = kcalloc(len, sizeof(*num), GFP_KERNEL);
+ if (!num)
+ return -ENOMEM;
+
+ rc = device_property_read_u32_array(uncore->dev, property, num, len);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < len; i++) {
+ for_each_possible_cpu(cpu) {
+ if (get_topo_id(cpu) != num[i])
+ continue;
+
+ cpumask_or(&uncore->related_cpus,
+ &uncore->related_cpus, get_cpumask(cpu));
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int get_package_id(int cpu)
+{
+ return topology_physical_package_id(cpu);
+}
+
+static const struct cpumask *get_package_cpumask(int cpu)
+{
+ return topology_core_cpumask(cpu);
+}
+
+static int get_cluster_id(int cpu)
+{
+ return topology_cluster_id(cpu);
+}
+
+static const struct cpumask *get_cluster_cpumask(int cpu)
+{
+ return topology_cluster_cpumask(cpu);
+}
+
+static int hisi_uncore_mark_related_cpus_wrap(struct hisi_uncore_freq *uncore)
+{
+ int rc;
+
+ cpumask_clear(&uncore->related_cpus);
+
+ rc = hisi_uncore_mark_related_cpus(uncore, "related-package",
+ get_package_id,
+ get_package_cpumask);
+ /* Success, or firmware probably broken */
+ if (!rc || rc != -EINVAL)
+ return rc;
+
+ /* Try another property name if rc == -EINVAL */
+ return hisi_uncore_mark_related_cpus(uncore, "related-cluster",
+ get_cluster_id,
+ get_cluster_cpumask);
+}
+
+static ssize_t related_cpus_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hisi_uncore_freq *uncore = dev_get_drvdata(dev->parent);
+
+ return cpumap_print_to_pagebuf(true, buf, &uncore->related_cpus);
+}
+
+static DEVICE_ATTR_RO(related_cpus);
+
+static struct attribute *hisi_uncore_freq_attrs[] = {
+ &dev_attr_related_cpus.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(hisi_uncore_freq);
+
+static int hisi_uncore_devfreq_register(struct hisi_uncore_freq *uncore)
+{
+ struct devfreq_dev_profile *profile;
+ struct device *dev = uncore->dev;
+ unsigned long freq;
+ u32 data;
+ int rc;
+
+ rc = hisi_uncore_get_cur_freq(dev, &freq);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get plat init freq\n");
+
+ profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return -ENOMEM;
+
+ *profile = (struct devfreq_dev_profile) {
+ .initial_freq = freq,
+ .polling_ms = HUCF_DEFAULT_POLLING_MS,
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .target = hisi_uncore_target,
+ .get_dev_status = hisi_uncore_get_dev_status,
+ .get_cur_freq = hisi_uncore_get_cur_freq,
+ .dev_groups = hisi_uncore_freq_groups,
+ };
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_MODE, &data);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get operate mode\n");
+
+ if (data == HUCF_MODE_PLATFORM)
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ hisi_platform_governor.name, NULL);
+ else
+ uncore->devfreq = devm_devfreq_add_device(dev, profile,
+ DEVFREQ_GOV_PERFORMANCE, NULL);
+ if (IS_ERR(uncore->devfreq))
+ return dev_err_probe(dev, PTR_ERR(uncore->devfreq),
+ "Failed to add devfreq device\n");
+
+ return 0;
+}
+
+static int hisi_uncore_freq_probe(struct platform_device *pdev)
+{
+ struct hisi_uncore_freq *uncore;
+ struct device *dev = &pdev->dev;
+ u32 cap;
+ int rc;
+
+ uncore = devm_kzalloc(dev, sizeof(*uncore), GFP_KERNEL);
+ if (!uncore)
+ return -ENOMEM;
+
+ uncore->dev = dev;
+ platform_set_drvdata(pdev, uncore);
+
+ rc = hisi_uncore_init_pcc_chan(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init PCC channel\n");
+
+ rc = hisi_uncore_init_opp(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to init OPP\n");
+
+ rc = hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_GET_CAP, &cap);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get capability\n");
+
+ uncore->cap = cap;
+
+ rc = hisi_uncore_add_platform_gov(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to add hisi_platform governor\n");
+
+ rc = hisi_uncore_mark_related_cpus_wrap(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to mark related cpus\n");
+
+ rc = hisi_uncore_devfreq_register(uncore);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to register devfreq\n");
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_uncore_freq_acpi_match[] = {
+ { "HISI04F1", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_uncore_freq_acpi_match);
+
+static struct platform_driver hisi_uncore_freq_drv = {
+ .probe = hisi_uncore_freq_probe,
+ .driver = {
+ .name = "hisi_uncore_freq",
+ .acpi_match_table = hisi_uncore_freq_acpi_match,
+ },
+};
+module_platform_driver(hisi_uncore_freq_drv);
+
+MODULE_DESCRIPTION("HiSilicon uncore frequency scaling driver");
+MODULE_AUTHOR("Jie Zhan <zhanjie9@hisilicon.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/devfreq/sun8i-a33-mbus.c b/drivers/devfreq/sun8i-a33-mbus.c
index 7c6ae91ede1f..4bd5657558d6 100644
--- a/drivers/devfreq/sun8i-a33-mbus.c
+++ b/drivers/devfreq/sun8i-a33-mbus.c
@@ -360,7 +360,7 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
if (IS_ERR(priv->reg_mbus))
return PTR_ERR(priv->reg_mbus);
- priv->clk_bus = devm_clk_get(dev, "bus");
+ priv->clk_bus = devm_clk_get_enabled(dev, "bus");
if (IS_ERR(priv->clk_bus))
return dev_err_probe(dev, PTR_ERR(priv->clk_bus),
"failed to get bus clock\n");
@@ -375,24 +375,15 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(priv->clk_mbus),
"failed to get mbus clock\n");
- ret = clk_prepare_enable(priv->clk_bus);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to enable bus clock\n");
-
/* Lock the DRAM clock rate to keep priv->nominal_bw in sync. */
- ret = clk_rate_exclusive_get(priv->clk_dram);
- if (ret) {
- err = "failed to lock dram clock rate\n";
- goto err_disable_bus;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_dram);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock dram clock rate\n");
/* Lock the MBUS clock rate to keep MBUS_TMR_PERIOD in sync. */
- ret = clk_rate_exclusive_get(priv->clk_mbus);
- if (ret) {
- err = "failed to lock mbus clock rate\n";
- goto err_unlock_dram;
- }
+ ret = devm_clk_rate_exclusive_get(dev, priv->clk_mbus);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to lock mbus clock rate\n");
priv->gov_data.upthreshold = 10;
priv->gov_data.downdifferential = 5;
@@ -405,10 +396,8 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
priv->profile.max_state = max_state;
ret = devm_pm_opp_set_clkname(dev, "dram");
- if (ret) {
- err = "failed to add OPP table\n";
- goto err_unlock_mbus;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add OPP table\n");
base_freq = clk_get_rate(clk_get_parent(priv->clk_dram));
for (i = 0; i < max_state; ++i) {
@@ -448,12 +437,6 @@ static int sun8i_a33_mbus_probe(struct platform_device *pdev)
err_remove_opps:
dev_pm_opp_remove_all_dynamic(dev);
-err_unlock_mbus:
- clk_rate_exclusive_put(priv->clk_mbus);
-err_unlock_dram:
- clk_rate_exclusive_put(priv->clk_dram);
-err_disable_bus:
- clk_disable_unprepare(priv->clk_bus);
return dev_err_probe(dev, ret, err);
}
@@ -472,9 +455,6 @@ static void sun8i_a33_mbus_remove(struct platform_device *pdev)
dev_warn(dev, "failed to restore DRAM frequency: %d\n", ret);
dev_pm_opp_remove_all_dynamic(dev);
- clk_rate_exclusive_put(priv->clk_mbus);
- clk_rate_exclusive_put(priv->clk_dram);
- clk_disable_unprepare(priv->clk_bus);
}
static const struct sun8i_a33_mbus_variant sun50i_a64_mbus = {
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index 49f09998e5c0..3371e0a76d3c 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -161,12 +161,16 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
- struct dw_edma_pcie_data vsec_data;
+ struct dw_edma_pcie_data *vsec_data __free(kfree) = NULL;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
int err, nr_irqs;
int i, mask;
+ vsec_data = kmalloc(sizeof(*vsec_data), GFP_KERNEL);
+ if (!vsec_data)
+ return -ENOMEM;
+
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
@@ -174,23 +178,23 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return err;
}
- memcpy(&vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
+ memcpy(vsec_data, pdata, sizeof(struct dw_edma_pcie_data));
/*
* Tries to find if exists a PCIe Vendor-Specific Extended Capability
* for the DMA, if one exists, then reconfigures it.
*/
- dw_edma_pcie_get_vsec_dma_data(pdev, &vsec_data);
+ dw_edma_pcie_get_vsec_dma_data(pdev, vsec_data);
/* Mapping PCI BAR regions */
- mask = BIT(vsec_data.rg.bar);
- for (i = 0; i < vsec_data.wr_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_wr[i].bar);
- mask |= BIT(vsec_data.dt_wr[i].bar);
+ mask = BIT(vsec_data->rg.bar);
+ for (i = 0; i < vsec_data->wr_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_wr[i].bar);
+ mask |= BIT(vsec_data->dt_wr[i].bar);
}
- for (i = 0; i < vsec_data.rd_ch_cnt; i++) {
- mask |= BIT(vsec_data.ll_rd[i].bar);
- mask |= BIT(vsec_data.dt_rd[i].bar);
+ for (i = 0; i < vsec_data->rd_ch_cnt; i++) {
+ mask |= BIT(vsec_data->ll_rd[i].bar);
+ mask |= BIT(vsec_data->dt_rd[i].bar);
}
err = pcim_iomap_regions(pdev, mask, pci_name(pdev));
if (err) {
@@ -213,7 +217,7 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
return -ENOMEM;
/* IRQs allocation */
- nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data.irqs,
+ nr_irqs = pci_alloc_irq_vectors(pdev, 1, vsec_data->irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
@@ -224,22 +228,22 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->mf = vsec_data.mf;
+ chip->mf = vsec_data->mf;
chip->nr_irqs = nr_irqs;
chip->ops = &dw_edma_pcie_plat_ops;
- chip->ll_wr_cnt = vsec_data.wr_ch_cnt;
- chip->ll_rd_cnt = vsec_data.rd_ch_cnt;
+ chip->ll_wr_cnt = vsec_data->wr_ch_cnt;
+ chip->ll_rd_cnt = vsec_data->rd_ch_cnt;
- chip->reg_base = pcim_iomap_table(pdev)[vsec_data.rg.bar];
+ chip->reg_base = pcim_iomap_table(pdev)[vsec_data->rg.bar];
if (!chip->reg_base)
return -ENOMEM;
for (i = 0; i < chip->ll_wr_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_wr[i];
struct dw_edma_region *dt_region = &chip->dt_region_wr[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_wr[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_wr[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -263,8 +267,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
for (i = 0; i < chip->ll_rd_cnt; i++) {
struct dw_edma_region *ll_region = &chip->ll_region_rd[i];
struct dw_edma_region *dt_region = &chip->dt_region_rd[i];
- struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
- struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
+ struct dw_edma_block *ll_block = &vsec_data->ll_rd[i];
+ struct dw_edma_block *dt_block = &vsec_data->dt_rd[i];
ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
if (!ll_region->vaddr.io)
@@ -298,31 +302,31 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p)\n",
- vsec_data.rg.bar, vsec_data.rg.off, vsec_data.rg.sz,
+ vsec_data->rg.bar, vsec_data->rg.off, vsec_data->rg.sz,
chip->reg_base);
for (i = 0; i < chip->ll_wr_cnt; i++) {
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_wr[i].bar,
- vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
+ i, vsec_data->ll_wr[i].bar,
+ vsec_data->ll_wr[i].off, chip->ll_region_wr[i].sz,
chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_wr[i].bar,
- vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
+ i, vsec_data->dt_wr[i].bar,
+ vsec_data->dt_wr[i].off, chip->dt_region_wr[i].sz,
chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.ll_rd[i].bar,
- vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
+ i, vsec_data->ll_rd[i].bar,
+ vsec_data->ll_rd[i].off, chip->ll_region_rd[i].sz,
chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
- i, vsec_data.dt_rd[i].bar,
- vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
+ i, vsec_data->dt_rd[i].bar,
+ vsec_data->dt_rd[i].off, chip->dt_region_rd[i].sz,
chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 47c8adfdc155..9f0c41ca7770 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -449,9 +449,9 @@ static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c,
return ret;
spin_lock_irqsave(&cvc->pc->lock, flags);
- spin_lock_irqsave(&cvc->vc.lock, flags);
+ spin_lock(&cvc->vc.lock);
vd = mtk_cqdma_find_active_desc(c, cookie);
- spin_unlock_irqrestore(&cvc->vc.lock, flags);
+ spin_unlock(&cvc->vc.lock);
spin_unlock_irqrestore(&cvc->pc->lock, flags);
if (vd) {
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 0d6324c4e2be..7a2488a0d6a3 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == 1) {
eirq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irqbuf[0];
} else {
eirq = platform_get_irq_byname(pdev, "error");
@@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev)
if (irqs == num_channels + 1) {
struct nbpf_channel *chan;
- for (i = 0, chan = nbpf->chan; i <= num_channels;
+ for (i = 0, chan = nbpf->chan; i < num_channels;
i++, chan++) {
/* Skip the error IRQ */
if (irqbuf[i] == eirq)
i++;
+ if (i >= ARRAY_SIZE(irqbuf))
+ return -EINVAL;
chan->irq = irqbuf[i];
}
-
- if (chan != nbpf->chan + num_channels)
- return -EINVAL;
} else {
/* 2 IRQs and more than one channel */
if (irqbuf[0] == eirq)
@@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev)
else
irq = irqbuf[0];
- for (i = 0; i <= num_channels; i++)
+ for (i = 0; i < num_channels; i++)
nbpf->chan[i].irq = irq;
}
}
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 37eb2e6c2f9f..65bf1685350a 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -2059,7 +2059,7 @@ free_drv_info:
kfree(drv_info);
return ret;
}
-module_init(ffa_init);
+rootfs_initcall(ffa_init);
static void __exit ffa_exit(void)
{
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 560724ce21aa..f51047d8ea64 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -311,6 +311,11 @@ static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
static const struct cs_dsp_ops cs_dsp_halo_ops;
static const struct cs_dsp_ops cs_dsp_halo_ao_ops;
+struct cs_dsp_alg_region_list_item {
+ struct list_head list;
+ struct cs_dsp_alg_region alg_region;
+};
+
struct cs_dsp_buf {
struct list_head list;
void *buf;
@@ -1752,13 +1757,13 @@ static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
int type, unsigned int id)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
lockdep_assert_held(&dsp->pwr_lock);
- list_for_each_entry(alg_region, &dsp->alg_regions, list) {
- if (id == alg_region->alg && type == alg_region->type)
- return alg_region;
+ list_for_each_entry(item, &dsp->alg_regions, list) {
+ if (id == item->alg_region.alg && type == item->alg_region.type)
+ return &item->alg_region;
}
return NULL;
@@ -1769,35 +1774,35 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
__be32 ver, __be32 base)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
- alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
- if (!alg_region)
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item)
return ERR_PTR(-ENOMEM);
- alg_region->type = type;
- alg_region->alg = be32_to_cpu(id);
- alg_region->ver = be32_to_cpu(ver);
- alg_region->base = be32_to_cpu(base);
+ item->alg_region.type = type;
+ item->alg_region.alg = be32_to_cpu(id);
+ item->alg_region.ver = be32_to_cpu(ver);
+ item->alg_region.base = be32_to_cpu(base);
- list_add_tail(&alg_region->list, &dsp->alg_regions);
+ list_add_tail(&item->list, &dsp->alg_regions);
if (dsp->wmfw_ver > 0)
- cs_dsp_ctl_fixup_base(dsp, alg_region);
+ cs_dsp_ctl_fixup_base(dsp, &item->alg_region);
- return alg_region;
+ return &item->alg_region;
}
static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
{
- struct cs_dsp_alg_region *alg_region;
+ struct cs_dsp_alg_region_list_item *item;
while (!list_empty(&dsp->alg_regions)) {
- alg_region = list_first_entry(&dsp->alg_regions,
- struct cs_dsp_alg_region,
- list);
- list_del(&alg_region->list);
- kfree(alg_region);
+ item = list_first_entry(&dsp->alg_regions,
+ struct cs_dsp_alg_region_list_item,
+ list);
+ list_del(&item->list);
+ kfree(item);
}
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 939a4955e00b..94b05e4451dd 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -22,16 +22,16 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
-cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_STACKLEAK_PLUGIN) \
+cflags-$(CONFIG_ARM64) += -fpie $(DISABLE_KSTACK_ERASE) \
-fno-unwind-tables -fno-asynchronous-unwind-tables
cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
-DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
-DEFI_HAVE_STRCMP -fno-builtin -fpic \
$(call cc-option,-mno-single-pic-base) \
- $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE -mno-relax \
- $(DISABLE_STACKLEAK_PLUGIN)
-cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_STACKLEAK_PLUGIN)
+ $(DISABLE_KSTACK_ERASE)
+cflags-$(CONFIG_LOONGARCH) += -fpie $(DISABLE_KSTACK_ERASE)
cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 44f922e10db2..e43abb322fa6 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -12,6 +12,9 @@ menuconfig GPIOLIB
If unsure, say N.
+config GPIOLIB_LEGACY
+ def_bool y
+
if GPIOLIB
config GPIOLIB_FASTPATH_LIMIT
@@ -69,6 +72,14 @@ config GPIO_SYSFS
use the character device /dev/gpiochipN with the appropriate
ioctl() operations instead.
+config GPIO_SYSFS_LEGACY
+ bool "Enable legacy functionalities of the sysfs interface"
+ depends on GPIO_SYSFS
+ default y if GPIO_SYSFS
+ help
+ Say Y here if you want to enable the legacy, global GPIO
+ numberspace-based functionalities of the sysfs interface.
+
config GPIO_CDEV
bool "Character device (/dev/gpiochipN) support" if EXPERT
default y
@@ -1263,6 +1274,7 @@ config GPIO_ADP5520
config GPIO_ADP5585
tristate "GPIO Support for ADP5585"
depends on MFD_ADP5585
+ select GPIOLIB_IRQCHIP
help
This option enables support for the GPIO function found in the Analog
Devices ADP5585.
@@ -1464,6 +1476,16 @@ config GPIO_LP87565
This driver can also be built as a module. If so, the module will be
called gpio-lp87565.
+config GPIO_MACSMC
+ tristate "Apple Mac SMC GPIO"
+ depends on MFD_MACSMC
+ help
+ Support for GPIOs controlled by the SMC microcontroller on Apple Mac
+ systems.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-macsmc.
+
config GPIO_MADERA
tristate "Cirrus Logic Madera class codecs"
depends on PINCTRL_MADERA
@@ -1501,7 +1523,7 @@ config GPIO_MAX77759
called gpio-max77759.
config GPIO_PALMAS
- bool "TI PALMAS series PMICs GPIO"
+ tristate "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
help
Select this option to enable GPIO driver for the TI PALMAS
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 88dedd298256..379f55e9ed1e 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,7 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIOLIB) += gpiolib-devres.o
-obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o
+obj-$(CONFIG_GPIOLIB_LEGACY) += gpiolib-legacy.o
obj-$(CONFIG_OF_GPIO) += gpiolib-of.o
obj-$(CONFIG_GPIO_CDEV) += gpiolib-cdev.o
obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
@@ -99,6 +99,7 @@ obj-$(CONFIG_GPIO_LP873X) += gpio-lp873x.o
obj-$(CONFIG_GPIO_LP87565) += gpio-lp87565.o
obj-$(CONFIG_GPIO_LPC18XX) += gpio-lpc18xx.o
obj-$(CONFIG_GPIO_LPC32XX) += gpio-lpc32xx.o
+obj-$(CONFIG_GPIO_MACSMC) += gpio-macsmc.o
obj-$(CONFIG_GPIO_MADERA) += gpio-madera.o
obj-$(CONFIG_GPIO_MAX3191X) += gpio-max3191x.o
obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 4a8b349f2483..7a09a4f58551 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -131,6 +131,11 @@ Work items:
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
+- Move the MMIO GPIO specific fields out of struct gpio_chip into a
+ dedicated structure. Currently every GPIO chip has them if gpio-mmio is
+ enabled in Kconfig even if it itself doesn't register with the helper
+ library.
+
-------------------------------------------------------------------------------
Generic regmap GPIO
@@ -183,16 +188,12 @@ remove the old ones and finally rename the new ones back to the old names.
-------------------------------------------------------------------------------
-Extend the sysfs ABI to allow exporting lines by their HW offsets
-
-The need to support the sysfs GPIO class is one of the main obstacles to
-removing the global GPIO numberspace from the kernel. In order to wean users
-off using global numbers from user-space, extend the existing interface with
-new per-gpiochip export/unexport attributes that allow to refer to GPIOs using
-their hardware offsets within the chip.
+Remove legacy sysfs features
-Encourage users to switch to using them and eventually remove the existing
-global export/unexport attribues.
+We have two parallel per-chip class devices and per-exported-line attribute
+groups in sysfs. One is using the obsolete global GPIO numberspace and the
+second relies on hardware offsets of pins within the chip. Remove the former
+once user-space has switched to using the latter.
-------------------------------------------------------------------------------
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
index c7ac5a9ffb1f..bd2cc5f4f851 100644
--- a/drivers/gpio/gpio-74xx-mmio.c
+++ b/drivers/gpio/gpio-74xx-mmio.c
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,8 +19,8 @@
#define MMIO_74XX_BIT_CNT(x) ((x) & GENMASK(7, 0))
struct mmio_74xx_gpio_priv {
- struct gpio_chip gc;
- unsigned flags;
+ struct gpio_generic_chip gen_gc;
+ unsigned int flags;
};
static const struct of_device_id mmio_74xx_gpio_ids[] = {
@@ -99,16 +100,15 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct mmio_74xx_gpio_priv *priv = gpiochip_get_data(gc);
- if (priv->flags & MMIO_74XX_DIR_OUT) {
- gc->set(gc, gpio, val);
- return 0;
- }
+ if (priv->flags & MMIO_74XX_DIR_OUT)
+ return gpio_generic_chip_set(&priv->gen_gc, gpio, val);
return -ENOTSUPP;
}
static int mmio_74xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct mmio_74xx_gpio_priv *priv;
void __iomem *dat;
int err;
@@ -123,19 +123,21 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dat))
return PTR_ERR(dat);
- err = bgpio_init(&priv->gc, &pdev->dev,
- DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
- dat, NULL, NULL, NULL, NULL, 0);
+ config.dev = &pdev->dev;
+ config.sz = DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8);
+ config.dat = dat;
+
+ err = gpio_generic_chip_init(&priv->gen_gc, &config);
if (err)
return err;
- priv->gc.direction_input = mmio_74xx_dir_in;
- priv->gc.direction_output = mmio_74xx_dir_out;
- priv->gc.get_direction = mmio_74xx_get_direction;
- priv->gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
- priv->gc.owner = THIS_MODULE;
+ priv->gen_gc.gc.direction_input = mmio_74xx_dir_in;
+ priv->gen_gc.gc.direction_output = mmio_74xx_dir_out;
+ priv->gen_gc.gc.get_direction = mmio_74xx_get_direction;
+ priv->gen_gc.gc.ngpio = MMIO_74XX_BIT_CNT(priv->flags);
+ priv->gen_gc.gc.owner = THIS_MODULE;
- return devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ return devm_gpiochip_add_data(&pdev->dev, &priv->gen_gc.gc, priv);
}
static struct platform_driver mmio_74xx_gpio_driver = {
diff --git a/drivers/gpio/gpio-adp5585.c b/drivers/gpio/gpio-adp5585.c
index d5c0f1b267c8..b2c8836c5f84 100644
--- a/drivers/gpio/gpio-adp5585.c
+++ b/drivers/gpio/gpio-adp5585.c
@@ -4,67 +4,131 @@
*
* Copyright 2022 NXP
* Copyright 2024 Ideas on Board Oy
+ * Copyright 2025 Analog Devices, Inc.
*/
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
#include <linux/mfd/adp5585.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
-#define ADP5585_GPIO_MAX 11
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the
+ * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to
+ * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver
+ * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is
+ * marked as reserved in the device tree for variants that don't support it.
+ */
+#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0)
+#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n))
+
+/*
+ * Bank 0 covers pins "GPIO 1/R0" to "GPIO 8/R7", numbered 0 to 7 by the
+ * driver, bank 1 covers pins "GPIO 9/C0" to "GPIO 16/C7", numbered 8 to
+ * 15 and bank 3 covers pins "GPIO 17/C8" to "GPIO 19/C10", numbered 16 to 18.
+ */
+#define ADP5589_BANK(n) ((n) >> 3)
+#define ADP5589_BIT(n) BIT((n) & 0x7)
+
+struct adp5585_gpio_chip {
+ int (*bank)(unsigned int off);
+ int (*bit)(unsigned int off);
+ unsigned int debounce_dis_a;
+ unsigned int rpull_cfg_a;
+ unsigned int gpo_data_a;
+ unsigned int gpo_out_a;
+ unsigned int gpio_dir_a;
+ unsigned int gpi_stat_a;
+ unsigned int gpi_int_lvl_a;
+ unsigned int gpi_ev_a;
+ unsigned int gpi_ev_min;
+ unsigned int gpi_ev_max;
+ bool has_bias_hole;
+};
struct adp5585_gpio_dev {
struct gpio_chip gpio_chip;
+ struct notifier_block nb;
+ const struct adp5585_gpio_chip *info;
struct regmap *regmap;
+ unsigned long irq_mask;
+ unsigned long irq_en;
+ unsigned long irq_active_high;
+ /* used for irqchip bus locking */
+ struct mutex bus_lock;
};
+static int adp5585_gpio_bank(unsigned int off)
+{
+ return ADP5585_BANK(off);
+}
+
+static int adp5585_gpio_bit(unsigned int off)
+{
+ return ADP5585_BIT(off);
+}
+
+static int adp5589_gpio_bank(unsigned int off)
+{
+ return ADP5589_BANK(off);
+}
+
+static int adp5589_gpio_bit(unsigned int off)
+{
+ return ADP5589_BIT(off);
+}
+
static int adp5585_gpio_get_direction(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
unsigned int val;
- regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
+ regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off), &val);
- return val & bit ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+ return val & info->bit(off) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
}
static int adp5585_gpio_direction_input(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
- return regmap_clear_bits(adp5585_gpio->regmap,
- ADP5585_GPIO_DIRECTION_A + bank, bit);
+ return regmap_clear_bits(adp5585_gpio->regmap, info->gpio_dir_a + info->bank(off),
+ info->bit(off));
}
static int adp5585_gpio_direction_output(struct gpio_chip *chip, unsigned int off, int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bank = info->bank(off);
+ unsigned int bit = info->bit(off);
int ret;
- ret = regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_DATA_OUT_A + bank, bit,
- val ? bit : 0);
+ ret = regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + bank,
+ bit, val ? bit : 0);
if (ret)
return ret;
- return regmap_set_bits(adp5585_gpio->regmap,
- ADP5585_GPIO_DIRECTION_A + bank, bit);
+ return regmap_set_bits(adp5585_gpio->regmap, info->gpio_dir_a + bank,
+ bit);
}
static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bank = info->bank(off);
+ unsigned int bit = info->bit(off);
unsigned int reg;
unsigned int val;
@@ -79,8 +143,8 @@ static int adp5585_gpio_get_value(struct gpio_chip *chip, unsigned int off)
* .direction_input(), .direction_output() or .set() operations racing
* with this.
*/
- regmap_read(adp5585_gpio->regmap, ADP5585_GPIO_DIRECTION_A + bank, &val);
- reg = val & bit ? ADP5585_GPO_DATA_OUT_A : ADP5585_GPI_STATUS_A;
+ regmap_read(adp5585_gpio->regmap, info->gpio_dir_a + bank, &val);
+ reg = val & bit ? info->gpo_data_a : info->gpi_stat_a;
regmap_read(adp5585_gpio->regmap, reg + bank, &val);
return !!(val & bit);
@@ -90,17 +154,17 @@ static int adp5585_gpio_set_value(struct gpio_chip *chip, unsigned int off,
int val)
{
struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
- return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_DATA_OUT_A + bank,
+ return regmap_update_bits(adp5585_gpio->regmap, info->gpo_data_a + info->bank(off),
bit, val ? bit : 0);
}
static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, unsigned int bias)
{
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
unsigned int bit, reg, mask, val;
/*
@@ -108,8 +172,10 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
* consecutive registers ADP5585_RPULL_CONFIG_*, with a hole of 4 bits
* after R5.
*/
- bit = off * 2 + (off > 5 ? 4 : 0);
- reg = ADP5585_RPULL_CONFIG_A + bit / 8;
+ bit = off * 2;
+ if (info->has_bias_hole)
+ bit += (off > 5 ? 4 : 0);
+ reg = info->rpull_cfg_a + bit / 8;
mask = ADP5585_Rx_PULL_CFG_MASK << (bit % 8);
val = bias << (bit % 8);
@@ -119,22 +185,22 @@ static int adp5585_gpio_set_bias(struct adp5585_gpio_dev *adp5585_gpio,
static int adp5585_gpio_set_drive(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, enum pin_config_param drive)
{
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_GPO_OUT_MODE_A + bank, bit,
+ info->gpo_out_a + info->bank(off), bit,
drive == PIN_CONFIG_DRIVE_OPEN_DRAIN ? bit : 0);
}
static int adp5585_gpio_set_debounce(struct adp5585_gpio_dev *adp5585_gpio,
unsigned int off, unsigned int debounce)
{
- unsigned int bank = ADP5585_BANK(off);
- unsigned int bit = ADP5585_BIT(off);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ unsigned int bit = adp5585_gpio->info->bit(off);
return regmap_update_bits(adp5585_gpio->regmap,
- ADP5585_DEBOUNCE_DIS_A + bank, bit,
+ info->debounce_dis_a + info->bank(off), bit,
debounce ? 0 : bit);
}
@@ -172,11 +238,175 @@ static int adp5585_gpio_set_config(struct gpio_chip *chip, unsigned int off,
};
}
+static int adp5585_gpio_request(struct gpio_chip *chip, unsigned int off)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ struct device *dev = chip->parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+ const struct adp5585_regs *regs = adp5585->regs;
+ int ret;
+
+ ret = test_and_set_bit(off, adp5585->pin_usage);
+ if (ret)
+ return -EBUSY;
+
+ /* make sure it's configured for GPIO */
+ return regmap_clear_bits(adp5585_gpio->regmap,
+ regs->pin_cfg_a + info->bank(off),
+ info->bit(off));
+}
+
+static void adp5585_gpio_free(struct gpio_chip *chip, unsigned int off)
+{
+ struct device *dev = chip->parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+
+ clear_bit(off, adp5585->pin_usage);
+}
+
+static int adp5585_gpio_key_event(struct notifier_block *nb, unsigned long key,
+ void *data)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = container_of(nb, struct adp5585_gpio_dev, nb);
+ struct device *dev = adp5585_gpio->gpio_chip.parent;
+ unsigned long key_press = (unsigned long)data;
+ unsigned int irq, irq_type;
+ struct irq_data *irqd;
+ bool active_high;
+ unsigned int off;
+
+ /* make sure the event is for me */
+ if (key < adp5585_gpio->info->gpi_ev_min || key > adp5585_gpio->info->gpi_ev_max)
+ return NOTIFY_DONE;
+
+ off = key - adp5585_gpio->info->gpi_ev_min;
+ active_high = test_bit(off, &adp5585_gpio->irq_active_high);
+
+ irq = irq_find_mapping(adp5585_gpio->gpio_chip.irq.domain, off);
+ if (!irq)
+ return NOTIFY_BAD;
+
+ irqd = irq_get_irq_data(irq);
+ if (!irqd) {
+ dev_err(dev, "Could not get irq(%u) data\n", irq);
+ return NOTIFY_BAD;
+ }
+
+ dev_dbg_ratelimited(dev, "gpio-keys event(%u) press=%lu, a_high=%u\n",
+ off, key_press, active_high);
+
+ if (!active_high)
+ key_press = !key_press;
+
+ irq_type = irqd_get_trigger_type(irqd);
+
+ if ((irq_type & IRQ_TYPE_EDGE_RISING && key_press) ||
+ (irq_type & IRQ_TYPE_EDGE_FALLING && !key_press))
+ handle_nested_irq(irq);
+
+ return NOTIFY_STOP;
+}
+
+static void adp5585_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+
+ mutex_lock(&adp5585_gpio->bus_lock);
+}
+
+static void adp5585_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(chip);
+ const struct adp5585_gpio_chip *info = adp5585_gpio->info;
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ bool active_high = test_bit(hwirq, &adp5585_gpio->irq_active_high);
+ bool enabled = test_bit(hwirq, &adp5585_gpio->irq_en);
+ bool masked = test_bit(hwirq, &adp5585_gpio->irq_mask);
+ unsigned int bank = adp5585_gpio->info->bank(hwirq);
+ unsigned int bit = adp5585_gpio->info->bit(hwirq);
+
+ if (masked && !enabled)
+ goto out_unlock;
+ if (!masked && enabled)
+ goto out_unlock;
+
+ regmap_update_bits(adp5585_gpio->regmap, info->gpi_int_lvl_a + bank, bit,
+ active_high ? bit : 0);
+ regmap_update_bits(adp5585_gpio->regmap, info->gpi_ev_a + bank, bit,
+ masked ? 0 : bit);
+ assign_bit(hwirq, &adp5585_gpio->irq_en, !masked);
+
+out_unlock:
+ mutex_unlock(&adp5585_gpio->bus_lock);
+}
+
+static void adp5585_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ __set_bit(hwirq, &adp5585_gpio->irq_mask);
+ gpiochip_disable_irq(gc, hwirq);
+}
+
+static void adp5585_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gc, hwirq);
+ __clear_bit(hwirq, &adp5585_gpio->irq_mask);
+}
+
+static int adp5585_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct adp5585_gpio_dev *adp5585_gpio = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ assign_bit(hwirq, &adp5585_gpio->irq_active_high,
+ type == IRQ_TYPE_EDGE_RISING);
+
+ irq_set_handler_locked(d, handle_edge_irq);
+ return 0;
+}
+
+static const struct irq_chip adp5585_irq_chip = {
+ .name = "adp5585",
+ .irq_mask = adp5585_irq_mask,
+ .irq_unmask = adp5585_irq_unmask,
+ .irq_bus_lock = adp5585_irq_bus_lock,
+ .irq_bus_sync_unlock = adp5585_irq_bus_sync_unlock,
+ .irq_set_type = adp5585_irq_set_type,
+ .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void adp5585_gpio_unreg_notifier(void *data)
+{
+ struct adp5585_gpio_dev *adp5585_gpio = data;
+ struct device *dev = adp5585_gpio->gpio_chip.parent;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+
+ blocking_notifier_chain_unregister(&adp5585->event_notifier,
+ &adp5585_gpio->nb);
+}
+
static int adp5585_gpio_probe(struct platform_device *pdev)
{
struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct adp5585_gpio_dev *adp5585_gpio;
struct device *dev = &pdev->dev;
+ struct gpio_irq_chip *girq;
struct gpio_chip *gc;
int ret;
@@ -186,6 +416,10 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
adp5585_gpio->regmap = adp5585->regmap;
+ adp5585_gpio->info = (const struct adp5585_gpio_chip *)id->driver_data;
+ if (!adp5585_gpio->info)
+ return -ENODEV;
+
device_set_of_node_from_dev(dev, dev->parent);
gc = &adp5585_gpio->gpio_chip;
@@ -196,13 +430,43 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
gc->get = adp5585_gpio_get_value;
gc->set_rv = adp5585_gpio_set_value;
gc->set_config = adp5585_gpio_set_config;
+ gc->request = adp5585_gpio_request;
+ gc->free = adp5585_gpio_free;
gc->can_sleep = true;
gc->base = -1;
- gc->ngpio = ADP5585_GPIO_MAX;
+ gc->ngpio = adp5585->n_pins;
gc->label = pdev->name;
gc->owner = THIS_MODULE;
+ if (device_property_present(dev->parent, "interrupt-controller")) {
+ if (!adp5585->irq)
+ return dev_err_probe(dev, -EINVAL,
+ "Unable to serve as interrupt controller without IRQ\n");
+
+ girq = &adp5585_gpio->gpio_chip.irq;
+ gpio_irq_chip_set_chip(girq, &adp5585_irq_chip);
+ girq->handler = handle_bad_irq;
+ girq->threaded = true;
+
+ adp5585_gpio->nb.notifier_call = adp5585_gpio_key_event;
+ ret = blocking_notifier_chain_register(&adp5585->event_notifier,
+ &adp5585_gpio->nb);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, adp5585_gpio_unreg_notifier,
+ adp5585_gpio);
+ if (ret)
+ return ret;
+ }
+
+ /* everything masked by default */
+ adp5585_gpio->irq_mask = ~0UL;
+
+ ret = devm_mutex_init(dev, &adp5585_gpio->bus_lock);
+ if (ret)
+ return ret;
ret = devm_gpiochip_add_data(dev, &adp5585_gpio->gpio_chip,
adp5585_gpio);
if (ret)
@@ -211,8 +475,40 @@ static int adp5585_gpio_probe(struct platform_device *pdev)
return 0;
}
+static const struct adp5585_gpio_chip adp5585_gpio_chip_info = {
+ .bank = adp5585_gpio_bank,
+ .bit = adp5585_gpio_bit,
+ .debounce_dis_a = ADP5585_DEBOUNCE_DIS_A,
+ .rpull_cfg_a = ADP5585_RPULL_CONFIG_A,
+ .gpo_data_a = ADP5585_GPO_DATA_OUT_A,
+ .gpo_out_a = ADP5585_GPO_OUT_MODE_A,
+ .gpio_dir_a = ADP5585_GPIO_DIRECTION_A,
+ .gpi_stat_a = ADP5585_GPI_STATUS_A,
+ .has_bias_hole = true,
+ .gpi_ev_min = ADP5585_GPI_EVENT_START,
+ .gpi_ev_max = ADP5585_GPI_EVENT_END,
+ .gpi_int_lvl_a = ADP5585_GPI_INT_LEVEL_A,
+ .gpi_ev_a = ADP5585_GPI_EVENT_EN_A,
+};
+
+static const struct adp5585_gpio_chip adp5589_gpio_chip_info = {
+ .bank = adp5589_gpio_bank,
+ .bit = adp5589_gpio_bit,
+ .debounce_dis_a = ADP5589_DEBOUNCE_DIS_A,
+ .rpull_cfg_a = ADP5589_RPULL_CONFIG_A,
+ .gpo_data_a = ADP5589_GPO_DATA_OUT_A,
+ .gpo_out_a = ADP5589_GPO_OUT_MODE_A,
+ .gpio_dir_a = ADP5589_GPIO_DIRECTION_A,
+ .gpi_stat_a = ADP5589_GPI_STATUS_A,
+ .gpi_ev_min = ADP5589_GPI_EVENT_START,
+ .gpi_ev_max = ADP5589_GPI_EVENT_END,
+ .gpi_int_lvl_a = ADP5589_GPI_INT_LEVEL_A,
+ .gpi_ev_a = ADP5589_GPI_EVENT_EN_A,
+};
+
static const struct platform_device_id adp5585_gpio_id_table[] = {
- { "adp5585-gpio" },
+ { "adp5585-gpio", (kernel_ulong_t)&adp5585_gpio_chip_info },
+ { "adp5589-gpio", (kernel_ulong_t)&adp5589_gpio_chip_info },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(platform, adp5585_gpio_id_table);
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index e530c94dcce8..89ffde693019 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -39,7 +39,6 @@ static int arizona_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
return ret;
if (change && persistent) {
- pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
}
@@ -82,7 +81,6 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
return ret;
}
- pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
}
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index e7671bcd5c07..e29a9589b3cc 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -436,10 +436,8 @@ static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
struct device_node *np = dev->of_node;
int err;
- priv->irq_domain =
- irq_domain_create_linear(of_fwnode_handle(np), priv->num_gpios,
- &brcmstb_gpio_irq_domain_ops,
- priv);
+ priv->irq_domain = irq_domain_create_linear(dev_fwnode(dev), priv->num_gpios,
+ &brcmstb_gpio_irq_domain_ops, priv);
if (!priv->irq_domain) {
dev_err(dev, "Couldn't allocate IRQ domain\n");
return -ENXIO;
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index e9dd2564c54f..c647953521c7 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -8,9 +8,11 @@
* Boris Brezillon <boris.brezillon@free-electrons.com>
*/
-#include <linux/gpio/driver.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -30,7 +32,7 @@
#define CDNS_GPIO_IRQ_ANY_EDGE 0x2c
struct cdns_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
void __iomem *regs;
u32 bypass_orig;
};
@@ -38,29 +40,24 @@ struct cdns_gpio_chip {
static int cdns_gpio_request(struct gpio_chip *chip, unsigned int offset)
{
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) & ~BIT(offset),
cgpio->regs + CDNS_GPIO_BYPASS_MODE);
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
return 0;
}
static void cdns_gpio_free(struct gpio_chip *chip, unsigned int offset)
{
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
iowrite32(ioread32(cgpio->regs + CDNS_GPIO_BYPASS_MODE) |
(BIT(offset) & cgpio->bypass_orig),
cgpio->regs + CDNS_GPIO_BYPASS_MODE);
-
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
}
static void cdns_gpio_irq_mask(struct irq_data *d)
@@ -85,13 +82,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
struct cdns_gpio_chip *cgpio = gpiochip_get_data(chip);
- unsigned long flags;
u32 int_value;
u32 int_type;
u32 mask = BIT(d->hwirq);
int ret = 0;
- raw_spin_lock_irqsave(&chip->bgpio_lock, flags);
+ guard(gpio_generic_lock)(&cgpio->gen_gc);
int_value = ioread32(cgpio->regs + CDNS_GPIO_IRQ_VALUE) & ~mask;
int_type = ioread32(cgpio->regs + CDNS_GPIO_IRQ_TYPE) & ~mask;
@@ -108,15 +104,12 @@ static int cdns_gpio_irq_set_type(struct irq_data *d, unsigned int type)
} else if (type == IRQ_TYPE_LEVEL_LOW) {
int_type |= mask;
} else {
- ret = -EINVAL;
- goto err_irq_type;
+ return -EINVAL;
}
iowrite32(int_value, cgpio->regs + CDNS_GPIO_IRQ_VALUE);
iowrite32(int_type, cgpio->regs + CDNS_GPIO_IRQ_TYPE);
-err_irq_type:
- raw_spin_unlock_irqrestore(&chip->bgpio_lock, flags);
return ret;
}
@@ -150,6 +143,7 @@ static const struct irq_chip cdns_gpio_irqchip = {
static int cdns_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct cdns_gpio_chip *cgpio;
int ret, irq;
u32 dir_prev;
@@ -176,32 +170,33 @@ static int cdns_gpio_probe(struct platform_device *pdev)
* gpiochip_lock_as_irq:
* tried to flag a GPIO set as output for IRQ
* Generic GPIO driver stores the direction value internally,
- * so it needs to be changed before bgpio_init() is called.
+ * so it needs to be changed before gpio_generic_chip_init() is called.
*/
dir_prev = ioread32(cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
iowrite32(GENMASK(num_gpios - 1, 0),
cgpio->regs + CDNS_GPIO_DIRECTION_MODE);
- ret = bgpio_init(&cgpio->gc, &pdev->dev, 4,
- cgpio->regs + CDNS_GPIO_INPUT_VALUE,
- cgpio->regs + CDNS_GPIO_OUTPUT_VALUE,
- NULL,
- NULL,
- cgpio->regs + CDNS_GPIO_DIRECTION_MODE,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config.dev = &pdev->dev;
+ config.sz = 4;
+ config.dat = cgpio->regs + CDNS_GPIO_INPUT_VALUE;
+ config.set = cgpio->regs + CDNS_GPIO_OUTPUT_VALUE;
+ config.dirin = cgpio->regs + CDNS_GPIO_DIRECTION_MODE;
+ config.flags = BGPIOF_READ_OUTPUT_REG_SET;
+
+ ret = gpio_generic_chip_init(&cgpio->gen_gc, &config);
if (ret) {
dev_err(&pdev->dev, "Failed to register generic gpio, %d\n",
ret);
goto err_revert_dir;
}
- cgpio->gc.label = dev_name(&pdev->dev);
- cgpio->gc.ngpio = num_gpios;
- cgpio->gc.parent = &pdev->dev;
- cgpio->gc.base = -1;
- cgpio->gc.owner = THIS_MODULE;
- cgpio->gc.request = cdns_gpio_request;
- cgpio->gc.free = cdns_gpio_free;
+ cgpio->gen_gc.gc.label = dev_name(&pdev->dev);
+ cgpio->gen_gc.gc.ngpio = num_gpios;
+ cgpio->gen_gc.gc.parent = &pdev->dev;
+ cgpio->gen_gc.gc.base = -1;
+ cgpio->gen_gc.gc.owner = THIS_MODULE;
+ cgpio->gen_gc.gc.request = cdns_gpio_request;
+ cgpio->gen_gc.gc.free = cdns_gpio_free;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -218,7 +213,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
if (irq >= 0) {
struct gpio_irq_chip *girq;
- girq = &cgpio->gc.irq;
+ girq = &cgpio->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &cdns_gpio_irqchip);
girq->parent_handler = cdns_gpio_irq_handler;
girq->num_parents = 1;
@@ -234,7 +229,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gc, cgpio);
+ ret = devm_gpiochip_add_data(&pdev->dev, &cgpio->gen_gc.gc, cgpio);
if (ret < 0) {
dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
goto err_revert_dir;
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d69a24dd4828..24ff2347d599 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -8,13 +8,15 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
static int clps711x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip *gen_gc;
void __iomem *dat, *dir;
- struct gpio_chip *gc;
int err, id;
if (!np)
@@ -24,8 +26,8 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if ((id < 0) || (id > 4))
return -ENODEV;
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
dat = devm_platform_ioremap_resource(pdev, 0);
@@ -36,35 +38,37 @@ static int clps711x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
+ config.dev = &pdev->dev;
+ config.sz = 1;
+ config.dat = dat;
+
switch (id) {
case 3:
/* PORTD is inverted logic for direction register */
- err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL,
- NULL, dir, 0);
+ config.dirin = dir;
break;
default:
- err = bgpio_init(gc, &pdev->dev, 1, dat, NULL, NULL,
- dir, NULL, 0);
+ config.dirout = dir;
break;
}
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
switch (id) {
case 4:
/* PORTE is 3 lines only */
- gc->ngpio = 3;
+ gen_gc->gc.ngpio = 3;
break;
default:
break;
}
- gc->base = -1;
- gc->owner = THIS_MODULE;
- platform_set_drvdata(pdev, gc);
+ gen_gc->gc.base = -1;
+ gen_gc->gc.owner = THIS_MODULE;
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
static const struct of_device_id clps711x_gpio_ids[] = {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 80a82492171e..8f3a36d0191d 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -478,7 +478,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
return irq;
}
- irq_domain = irq_domain_create_legacy(of_fwnode_handle(dev->of_node), ngpio, irq, 0,
+ irq_domain = irq_domain_create_legacy(dev_fwnode(dev), ngpio, irq, 0,
&davinci_gpio_irq_ops, chips);
if (!irq_domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index a5e6e446f39c..015f1ac32dd9 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -325,8 +325,7 @@ static int em_gio_probe(struct platform_device *pdev)
irq_chip->irq_release_resources = em_gio_irq_relres;
irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND;
- p->irq_domain = irq_domain_create_simple(of_fwnode_handle(dev->of_node),
- ngpios, 0,
+ p->irq_domain = irq_domain_create_simple(dev_fwnode(dev), ngpios, 0,
&em_gio_irq_domain_ops, p);
if (!p->irq_domain) {
dev_err(dev, "cannot initialize irq domain\n");
diff --git a/drivers/gpio/gpio-en7523.c b/drivers/gpio/gpio-en7523.c
index 69834db2c1cf..cf47afc578a9 100644
--- a/drivers/gpio/gpio-en7523.c
+++ b/drivers/gpio/gpio-en7523.c
@@ -4,6 +4,7 @@
#include <linux/io.h>
#include <linux/bits.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -13,28 +14,23 @@
/**
* struct airoha_gpio_ctrl - Airoha GPIO driver data
- * @gc: Associated gpio_chip instance.
+ * @gen_gc: Associated gpio_generic_chip instance.
* @data: The data register.
* @dir: [0] The direction register for the lower 16 pins.
* [1]: The direction register for the higher 16 pins.
* @output: The output enable register.
*/
struct airoha_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
void __iomem *data;
void __iomem *dir[2];
void __iomem *output;
};
-static struct airoha_gpio_ctrl *gc_to_ctrl(struct gpio_chip *gc)
-{
- return container_of(gc, struct airoha_gpio_ctrl, gc);
-}
-
static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio,
int val, int out)
{
- struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc);
+ struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc);
u32 dir = ioread32(ctrl->dir[gpio / 16]);
u32 output = ioread32(ctrl->output);
u32 mask = BIT((gpio % 16) * 2);
@@ -50,7 +46,7 @@ static int airoha_dir_set(struct gpio_chip *gc, unsigned int gpio,
iowrite32(dir, ctrl->dir[gpio / 16]);
if (out)
- gc->set(gc, gpio, val);
+ gpio_generic_chip_set(&ctrl->gen_gc, gpio, val);
iowrite32(output, ctrl->output);
@@ -70,7 +66,7 @@ static int airoha_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
- struct airoha_gpio_ctrl *ctrl = gc_to_ctrl(gc);
+ struct airoha_gpio_ctrl *ctrl = gpiochip_get_data(gc);
u32 dir = ioread32(ctrl->dir[gpio / 16]);
u32 mask = BIT((gpio % 16) * 2);
@@ -79,6 +75,7 @@ static int airoha_get_dir(struct gpio_chip *gc, unsigned int gpio)
static int airoha_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device *dev = &pdev->dev;
struct airoha_gpio_ctrl *ctrl;
int err;
@@ -103,18 +100,21 @@ static int airoha_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->output))
return PTR_ERR(ctrl->output);
- err = bgpio_init(&ctrl->gc, dev, 4, ctrl->data, NULL,
- NULL, NULL, NULL, 0);
+ config.dev = dev;
+ config.sz = 4;
+ config.dat = ctrl->data;
+
+ err = gpio_generic_chip_init(&ctrl->gen_gc, &config);
if (err)
return dev_err_probe(dev, err, "unable to init generic GPIO");
- ctrl->gc.ngpio = AIROHA_GPIO_MAX;
- ctrl->gc.owner = THIS_MODULE;
- ctrl->gc.direction_output = airoha_dir_out;
- ctrl->gc.direction_input = airoha_dir_in;
- ctrl->gc.get_direction = airoha_get_dir;
+ ctrl->gen_gc.gc.ngpio = AIROHA_GPIO_MAX;
+ ctrl->gen_gc.gc.owner = THIS_MODULE;
+ ctrl->gen_gc.gc.direction_output = airoha_dir_out;
+ ctrl->gen_gc.gc.direction_input = airoha_dir_in;
+ ctrl->gen_gc.gc.get_direction = airoha_get_dir;
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->gen_gc.gc, ctrl);
}
static const struct of_device_id airoha_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index d38a2d9854ca..f3f8bab62f94 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -402,9 +402,8 @@ static int grgpio_probe(struct platform_device *ofdev)
return -EINVAL;
}
- priv->domain = irq_domain_create_linear(of_fwnode_handle(np), gc->ngpio,
- &grgpio_irq_domain_ops,
- priv);
+ priv->domain = irq_domain_create_linear(dev_fwnode(&ofdev->dev), gc->ngpio,
+ &grgpio_irq_domain_ops, priv);
if (!priv->domain) {
dev_err(dev, "Could not add irq domain\n");
return -EINVAL;
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 70a01c5b8ad1..add09971d26a 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -222,6 +222,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data0 = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
@@ -230,6 +231,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
.conf_offset = 0x0,
.in_offset = 0x20,
.out_offset = 0x10,
+ .inten_offset = 0x30,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
@@ -246,6 +248,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a5000_data = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
@@ -254,6 +257,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a_data = {
.conf_offset = 0x800,
.in_offset = 0xa00,
.out_offset = 0x900,
+ .inten_offset = 0xb00,
};
/* LS7A2000 chipset GPIO */
@@ -263,6 +267,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls7a2000_data0 = {
.conf_offset = 0x800,
.in_offset = 0xa00,
.out_offset = 0x900,
+ .inten_offset = 0xb00,
};
/* LS7A2000 ACPI GPIO */
@@ -281,6 +286,7 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls3a6000_data = {
.conf_offset = 0x0,
.in_offset = 0xc,
.out_offset = 0x8,
+ .inten_offset = 0x14,
};
static const struct of_device_id loongson_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-lpc18xx.c b/drivers/gpio/gpio-lpc18xx.c
index b0a8da5c058d..2dbfbf90176c 100644
--- a/drivers/gpio/gpio-lpc18xx.c
+++ b/drivers/gpio/gpio-lpc18xx.c
@@ -249,8 +249,8 @@ static int lpc18xx_gpio_pin_ic_probe(struct lpc18xx_gpio_chip *gc)
raw_spin_lock_init(&ic->lock);
ic->domain = irq_domain_create_hierarchy(parent_domain, 0, NR_LPC18XX_GPIO_PIN_IC_IRQS,
- of_fwnode_handle(dev->of_node),
- &lpc18xx_gpio_pin_ic_domain_ops, ic);
+ dev_fwnode(dev), &lpc18xx_gpio_pin_ic_domain_ops,
+ ic);
if (!ic->domain) {
pr_err("unable to add irq domain\n");
ret = -ENODEV;
diff --git a/drivers/gpio/gpio-macsmc.c b/drivers/gpio/gpio-macsmc.c
new file mode 100644
index 000000000000..7570d9e89adf
--- /dev/null
+++ b/drivers/gpio/gpio-macsmc.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC GPIO driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * This driver implements basic SMC PMU GPIO support that can read inputs
+ * and write outputs. Mode changes and IRQ config are not yet implemented.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+
+#define MAX_GPIO 64
+
+/*
+ * Commands 0-6 are, presumably, the intended API.
+ * Command 0xff lets you get/set the pin configuration in detail directly,
+ * but the bit meanings seem not to be stable between devices/PMU hardware
+ * versions.
+ *
+ * We're going to try to make do with the low commands for now.
+ * We don't implement pin mode changes at this time.
+ */
+
+#define CMD_ACTION (0 << 24)
+#define CMD_OUTPUT (1 << 24)
+#define CMD_INPUT (2 << 24)
+#define CMD_PINMODE (3 << 24)
+#define CMD_IRQ_ENABLE (4 << 24)
+#define CMD_IRQ_ACK (5 << 24)
+#define CMD_IRQ_MODE (6 << 24)
+#define CMD_CONFIG (0xff << 24)
+
+#define MODE_INPUT 0
+#define MODE_OUTPUT 1
+#define MODE_VALUE_0 0
+#define MODE_VALUE_1 2
+
+#define IRQ_MODE_HIGH 0
+#define IRQ_MODE_LOW 1
+#define IRQ_MODE_RISING 2
+#define IRQ_MODE_FALLING 3
+#define IRQ_MODE_BOTH 4
+
+#define CONFIG_MASK GENMASK(23, 16)
+#define CONFIG_VAL GENMASK(7, 0)
+
+#define CONFIG_OUTMODE GENMASK(7, 6)
+#define CONFIG_IRQMODE GENMASK(5, 3)
+#define CONFIG_PULLDOWN BIT(2)
+#define CONFIG_PULLUP BIT(1)
+#define CONFIG_OUTVAL BIT(0)
+
+/*
+ * Output modes seem to differ depending on the PMU in use... ?
+ * j274 / M1 (Sera PMU):
+ * 0 = input
+ * 1 = output
+ * 2 = open drain
+ * 3 = disable
+ * j314 / M1Pro (Maverick PMU):
+ * 0 = input
+ * 1 = open drain
+ * 2 = output
+ * 3 = ?
+ */
+
+struct macsmc_gpio {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct gpio_chip gc;
+
+ int first_index;
+};
+
+static int macsmc_gpio_nr(smc_key key)
+{
+ int low = hex_to_bin(key & 0xff);
+ int high = hex_to_bin((key >> 8) & 0xff);
+
+ if (low < 0 || high < 0)
+ return -1;
+
+ return low | (high << 4);
+}
+
+static int macsmc_gpio_key(unsigned int offset)
+{
+ return _SMC_KEY("gP\0\0") | hex_asc_hi(offset) << 8 | hex_asc_lo(offset);
+}
+
+static int macsmc_gpio_find_first_gpio_index(struct macsmc_gpio *smcgp)
+{
+ struct apple_smc *smc = smcgp->smc;
+ smc_key key = macsmc_gpio_key(0);
+ smc_key first_key, last_key;
+ int start, count, ret;
+
+ /* Return early if the key is out of bounds */
+ ret = apple_smc_get_key_by_index(smc, 0, &first_key);
+ if (ret)
+ return ret;
+ if (key <= first_key)
+ return -ENODEV;
+
+ ret = apple_smc_get_key_by_index(smc, smc->key_count - 1, &last_key);
+ if (ret)
+ return ret;
+ if (key > last_key)
+ return -ENODEV;
+
+ /* Binary search to find index of first SMC key bigger or equal to key */
+ start = 0;
+ count = smc->key_count;
+ while (count > 1) {
+ smc_key pkey;
+ int pivot = start + ((count - 1) >> 1);
+
+ ret = apple_smc_get_key_by_index(smc, pivot, &pkey);
+ if (ret < 0)
+ return ret;
+
+ if (pkey == key)
+ return pivot;
+
+ pivot++;
+
+ if (pkey < key) {
+ count -= pivot - start;
+ start = pivot;
+ } else {
+ count = pivot - start;
+ }
+ }
+
+ return start;
+}
+
+static int macsmc_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 val;
+ int ret;
+
+ /* First try reading the explicit pin mode register */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_PINMODE, &val);
+ if (!ret)
+ return (val & MODE_OUTPUT) ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+
+ /*
+ * Less common IRQ configs cause CMD_PINMODE to fail, and so does open drain mode.
+ * Fall back to reading IRQ mode, which will only succeed for inputs.
+ */
+ ret = apple_smc_rw_u32(smcgp->smc, key, CMD_IRQ_MODE, &val);
+ return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int macsmc_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ u32 cmd, val;
+ int ret;
+
+ ret = macsmc_gpio_get_direction(gc, offset);
+ if (ret < 0)
+ return ret;
+
+ if (ret == GPIO_LINE_DIRECTION_OUT)
+ cmd = CMD_OUTPUT;
+ else
+ cmd = CMD_INPUT;
+
+ ret = apple_smc_rw_u32(smcgp->smc, key, cmd, &val);
+ if (ret < 0)
+ return ret;
+
+ return val ? 1 : 0;
+}
+
+static int macsmc_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ smc_key key = macsmc_gpio_key(offset);
+ int ret;
+
+ value |= CMD_OUTPUT;
+ ret = apple_smc_write_u32(smcgp->smc, key, CMD_OUTPUT | value);
+ if (ret < 0)
+ dev_err(smcgp->dev, "GPIO set failed %p4ch = 0x%x\n",
+ &key, value);
+
+ return ret;
+}
+
+static int macsmc_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask, unsigned int ngpios)
+{
+ struct macsmc_gpio *smcgp = gpiochip_get_data(gc);
+ int count;
+ int i;
+
+ count = min(smcgp->smc->key_count, MAX_GPIO);
+
+ bitmap_zero(valid_mask, ngpios);
+
+ for (i = 0; i < count; i++) {
+ int ret, gpio_nr;
+ smc_key key;
+
+ ret = apple_smc_get_key_by_index(smcgp->smc, smcgp->first_index + i, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > SMC_KEY(gPff))
+ break;
+
+ gpio_nr = macsmc_gpio_nr(key);
+ if (gpio_nr < 0 || gpio_nr > MAX_GPIO) {
+ dev_err(smcgp->dev, "Bad GPIO key %p4ch\n", &key);
+ continue;
+ }
+
+ set_bit(gpio_nr, valid_mask);
+ }
+
+ return 0;
+}
+
+static int macsmc_gpio_probe(struct platform_device *pdev)
+{
+ struct macsmc_gpio *smcgp;
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ smc_key key;
+ int ret;
+
+ smcgp = devm_kzalloc(&pdev->dev, sizeof(*smcgp), GFP_KERNEL);
+ if (!smcgp)
+ return -ENOMEM;
+
+ smcgp->dev = &pdev->dev;
+ smcgp->smc = smc;
+
+ smcgp->first_index = macsmc_gpio_find_first_gpio_index(smcgp);
+ if (smcgp->first_index < 0)
+ return smcgp->first_index;
+
+ ret = apple_smc_get_key_by_index(smc, smcgp->first_index, &key);
+ if (ret < 0)
+ return ret;
+
+ if (key > macsmc_gpio_key(MAX_GPIO - 1))
+ return -ENODEV;
+
+ dev_info(smcgp->dev, "First GPIO key: %p4ch\n", &key);
+
+ smcgp->gc.label = "macsmc-pmu-gpio";
+ smcgp->gc.owner = THIS_MODULE;
+ smcgp->gc.get = macsmc_gpio_get;
+ smcgp->gc.set_rv = macsmc_gpio_set;
+ smcgp->gc.get_direction = macsmc_gpio_get_direction;
+ smcgp->gc.init_valid_mask = macsmc_gpio_init_valid_mask;
+ smcgp->gc.can_sleep = true;
+ smcgp->gc.ngpio = MAX_GPIO;
+ smcgp->gc.base = -1;
+ smcgp->gc.parent = &pdev->dev;
+
+ return devm_gpiochip_add_data(&pdev->dev, &smcgp->gc, smcgp);
+}
+
+static const struct of_device_id macsmc_gpio_of_table[] = {
+ { .compatible = "apple,smc-gpio", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, macsmc_gpio_of_table);
+
+static struct platform_driver macsmc_gpio_driver = {
+ .driver = {
+ .name = "macsmc-gpio",
+ .of_match_table = macsmc_gpio_of_table,
+ },
+ .probe = macsmc_gpio_probe,
+};
+module_platform_driver(macsmc_gpio_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC GPIO driver");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 14ae25783438..897a1e004681 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -55,9 +55,9 @@ static void ltq_mm_apply(struct ltq_mm *chip)
* @gpio: GPIO signal number.
* @val: Value to be written to specified signal.
*
- * Set the shadow value and call ltq_mm_apply.
+ * Set the shadow value and call ltq_mm_apply. Always returns 0.
*/
-static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+static int ltq_mm_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct ltq_mm *chip = gpiochip_get_data(gc);
@@ -66,6 +66,8 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
else
chip->shadow &= ~(1 << offset);
ltq_mm_apply(chip);
+
+ return 0;
}
/**
@@ -78,9 +80,7 @@ static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
*/
static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
{
- ltq_mm_set(gc, offset, value);
-
- return 0;
+ return ltq_mm_set(gc, offset, value);
}
/**
@@ -111,7 +111,7 @@ static int ltq_mm_probe(struct platform_device *pdev)
chip->mmchip.gc.ngpio = 16;
chip->mmchip.gc.direction_output = ltq_mm_dir_out;
- chip->mmchip.gc.set = ltq_mm_set;
+ chip->mmchip.gc.set_rv = ltq_mm_set;
chip->mmchip.save_regs = ltq_mm_save_regs;
/* store the shadow value if one was passed by the devicetree */
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 4841e4ebe7a6..cf878c2ea6bf 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -211,11 +211,12 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
return 0;
}
-static void bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+static int bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
{
+ return 0;
}
-static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
@@ -230,10 +231,12 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
gc->write_reg(gc->reg_dat, gc->bgpio_data);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ return 0;
}
-static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
unsigned long mask = bgpio_line2mask(gc, gpio);
@@ -241,9 +244,11 @@ static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
gc->write_reg(gc->reg_set, mask);
else
gc->write_reg(gc->reg_clr, mask);
+
+ return 0;
}
-static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long mask = bgpio_line2mask(gc, gpio);
unsigned long flags;
@@ -258,6 +263,8 @@ static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
gc->write_reg(gc->reg_set, gc->bgpio_data);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+
+ return 0;
}
static void bgpio_multiple_get_masks(struct gpio_chip *gc,
@@ -298,21 +305,25 @@ static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
-static void bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+static int bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
unsigned long *bits)
{
bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
+
+ return 0;
}
-static void bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
+
+ return 0;
}
-static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
- unsigned long *mask,
- unsigned long *bits)
+static int bgpio_set_multiple_with_clear(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits)
{
unsigned long set_mask, clear_mask;
@@ -322,6 +333,8 @@ static void bgpio_set_multiple_with_clear(struct gpio_chip *gc,
gc->write_reg(gc->reg_set, set_mask);
if (clear_mask)
gc->write_reg(gc->reg_clr, clear_mask);
+
+ return 0;
}
static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
@@ -335,6 +348,11 @@ static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_ou
return pinctrl_gpio_direction_input(gc, gpio);
}
+static int bgpio_dir_in_err(struct gpio_chip *gc, unsigned int gpio)
+{
+ return -EINVAL;
+}
+
static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
return bgpio_dir_return(gc, gpio, false);
@@ -349,7 +367,7 @@ static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set(gc, gpio, val);
+ gc->set_rv(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
@@ -414,14 +432,14 @@ static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
bgpio_dir_out(gc, gpio, val);
- gc->set(gc, gpio, val);
+ gc->set_rv(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
int val)
{
- gc->set(gc, gpio, val);
+ gc->set_rv(gc, gpio, val);
bgpio_dir_out(gc, gpio, val);
return bgpio_dir_return(gc, gpio, true);
}
@@ -510,18 +528,18 @@ static int bgpio_setup_io(struct gpio_chip *gc,
if (set && clr) {
gc->reg_set = set;
gc->reg_clr = clr;
- gc->set = bgpio_set_with_clear;
- gc->set_multiple = bgpio_set_multiple_with_clear;
+ gc->set_rv = bgpio_set_with_clear;
+ gc->set_multiple_rv = bgpio_set_multiple_with_clear;
} else if (set && !clr) {
gc->reg_set = set;
- gc->set = bgpio_set_set;
- gc->set_multiple = bgpio_set_multiple_set;
+ gc->set_rv = bgpio_set_set;
+ gc->set_multiple_rv = bgpio_set_multiple_set;
} else if (flags & BGPIOF_NO_OUTPUT) {
- gc->set = bgpio_set_none;
- gc->set_multiple = NULL;
+ gc->set_rv = bgpio_set_none;
+ gc->set_multiple_rv = NULL;
} else {
- gc->set = bgpio_set;
- gc->set_multiple = bgpio_set_multiple;
+ gc->set_rv = bgpio_set;
+ gc->set_multiple_rv = bgpio_set_multiple;
}
if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
@@ -566,7 +584,11 @@ static int bgpio_setup_direction(struct gpio_chip *gc,
gc->direction_output = bgpio_dir_out_err;
else
gc->direction_output = bgpio_simple_dir_out;
- gc->direction_input = bgpio_simple_dir_in;
+
+ if (flags & BGPIOF_NO_INPUT)
+ gc->direction_input = bgpio_dir_in_err;
+ else
+ gc->direction_input = bgpio_simple_dir_in;
}
return 0;
@@ -654,7 +676,7 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
}
gc->bgpio_data = gc->read_reg(gc->reg_dat);
- if (gc->set == bgpio_set_set &&
+ if (gc->set_rv == bgpio_set_set &&
!(flags & BGPIOF_UNREADABLE_REG_SET))
gc->bgpio_data = gc->read_reg(gc->reg_set);
@@ -712,28 +734,6 @@ static const struct of_device_id bgpio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, bgpio_of_match);
-static struct bgpio_pdata *bgpio_parse_fw(struct device *dev, unsigned long *flags)
-{
- struct bgpio_pdata *pdata;
-
- if (!dev_fwnode(dev))
- return NULL;
-
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- pdata->base = -1;
-
- if (device_is_big_endian(dev))
- *flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
-
- if (device_property_read_bool(dev, "no-output"))
- *flags |= BGPIOF_NO_OUTPUT;
-
- return pdata;
-}
-
static int bgpio_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -745,18 +745,10 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
void __iomem *dirin;
unsigned long sz;
unsigned long flags = 0;
+ unsigned int base;
int err;
struct gpio_chip *gc;
- struct bgpio_pdata *pdata;
-
- pdata = bgpio_parse_fw(dev, &flags);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
-
- if (!pdata) {
- pdata = dev_get_platdata(dev);
- flags = pdev->id_entry->driver_data;
- }
+ const char *label;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
if (!r)
@@ -788,17 +780,27 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
if (!gc)
return -ENOMEM;
+ if (device_is_big_endian(dev))
+ flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+
+ if (device_property_read_bool(dev, "no-output"))
+ flags |= BGPIOF_NO_OUTPUT;
+
err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
if (err)
return err;
- if (pdata) {
- if (pdata->label)
- gc->label = pdata->label;
- gc->base = pdata->base;
- if (pdata->ngpio > 0)
- gc->ngpio = pdata->ngpio;
- }
+ err = device_property_read_string(dev, "label", &label);
+ if (!err)
+ gc->label = label;
+
+ /*
+ * This property *must not* be used in device-tree sources, it's only
+ * meant to be passed to the driver from board files and MFD core.
+ */
+ err = device_property_read_u32(dev, "gpio-mmio,base", &base);
+ if (!err && base <= INT_MAX)
+ gc->base = base;
platform_set_drvdata(pdev, gc);
@@ -809,9 +811,6 @@ static const struct platform_device_id bgpio_id_table[] = {
{
.name = "basic-mmio-gpio",
.driver_data = 0,
- }, {
- .name = "basic-mmio-gpio-be",
- .driver_data = BGPIOF_BIG_ENDIAN,
},
{ }
};
diff --git a/drivers/gpio/gpio-moxtet.c b/drivers/gpio/gpio-moxtet.c
index 61f9efd6c64f..27dd9c3e7b77 100644
--- a/drivers/gpio/gpio-moxtet.c
+++ b/drivers/gpio/gpio-moxtet.c
@@ -52,15 +52,15 @@ static int moxtet_gpio_get_value(struct gpio_chip *gc, unsigned int offset)
return !!(ret & BIT(offset));
}
-static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
int state;
state = moxtet_device_written(chip->dev);
if (state < 0)
- return;
+ return state;
offset -= MOXTET_GPIO_INPUTS;
@@ -69,7 +69,7 @@ static void moxtet_gpio_set_value(struct gpio_chip *gc, unsigned int offset,
else
state &= ~BIT(offset);
- moxtet_device_write(chip->dev, state);
+ return moxtet_device_write(chip->dev, state);
}
static int moxtet_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -104,13 +104,11 @@ static int moxtet_gpio_direction_output(struct gpio_chip *gc,
struct moxtet_gpio_chip *chip = gpiochip_get_data(gc);
if (chip->desc->out_mask & BIT(offset))
- moxtet_gpio_set_value(gc, offset, val);
+ return moxtet_gpio_set_value(gc, offset, val);
else if (chip->desc->in_mask & BIT(offset))
return -ENOTSUPP;
- else
- return -EINVAL;
- return 0;
+ return -EINVAL;
}
static int moxtet_gpio_probe(struct device *dev)
@@ -142,7 +140,7 @@ static int moxtet_gpio_probe(struct device *dev)
chip->gpio_chip.direction_input = moxtet_gpio_direction_input;
chip->gpio_chip.direction_output = moxtet_gpio_direction_output;
chip->gpio_chip.get = moxtet_gpio_get_value;
- chip->gpio_chip.set = moxtet_gpio_set_value;
+ chip->gpio_chip.set_rv = moxtet_gpio_set_value;
chip->gpio_chip.base = -1;
chip->gpio_chip.ngpio = MOXTET_GPIO_NGPIOS;
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index 091d96f2d682..40d587176a75 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -69,7 +69,7 @@ __mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_8(&regs->wkup_dvo, chip->shadow_dvo);
}
-static void
+static int
mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
@@ -81,6 +81,8 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -151,7 +153,7 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
- gc->set = mpc52xx_wkup_gpio_set;
+ gc->set_rv = mpc52xx_wkup_gpio_set;
ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
if (ret)
@@ -228,7 +230,7 @@ __mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
out_be32(&regs->simple_dvo, chip->shadow_dvo);
}
-static void
+static int
mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
@@ -240,6 +242,8 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+ return 0;
}
static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
@@ -311,7 +315,7 @@ static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
- gc->set = mpc52xx_simple_gpio_set;
+ gc->set_rv = mpc52xx_simple_gpio_set;
ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
if (ret)
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
index 561a961c97a6..3415cb7ebb0f 100644
--- a/drivers/gpio/gpio-mpfs.c
+++ b/drivers/gpio/gpio-mpfs.c
@@ -99,16 +99,19 @@ static int mpfs_gpio_get(struct gpio_chip *gc, unsigned int gpio_index)
return regmap_test_bits(mpfs_gpio->regs, mpfs_gpio->offsets->inp, BIT(gpio_index));
}
-static void mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value)
+static int mpfs_gpio_set(struct gpio_chip *gc, unsigned int gpio_index, int value)
{
struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
+ int ret;
mpfs_gpio_get(gc, gpio_index);
- regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
- value << gpio_index);
+ ret = regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp,
+ BIT(gpio_index), value << gpio_index);
mpfs_gpio_get(gc, gpio_index);
+
+ return ret;
}
static int mpfs_gpio_probe(struct platform_device *pdev)
@@ -147,7 +150,7 @@ static int mpfs_gpio_probe(struct platform_device *pdev)
mpfs_gpio->gc.direction_output = mpfs_gpio_direction_output;
mpfs_gpio->gc.get_direction = mpfs_gpio_get_direction;
mpfs_gpio->gc.get = mpfs_gpio_get;
- mpfs_gpio->gc.set = mpfs_gpio_set;
+ mpfs_gpio->gc.set_rv = mpfs_gpio_set;
mpfs_gpio->gc.base = -1;
mpfs_gpio->gc.ngpio = ngpios;
mpfs_gpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index 3ea32c5e33d1..b17de08e9e03 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -160,8 +160,8 @@ static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank)
return buf;
}
-static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
unsigned long i, bank, bank_mask, bank_bits;
int ret;
@@ -180,11 +180,11 @@ static void gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
ret = gpio_mpsse_set_bank(priv, bank);
if (ret)
- dev_err(&priv->intf->dev,
- "Couldn't set values for bank %ld!",
- bank);
+ return ret;
}
}
+
+ return 0;
}
static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -227,7 +227,7 @@ static int gpio_mpsse_gpio_get(struct gpio_chip *chip, unsigned int offset)
return 0;
}
-static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
+static int gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long mask = 0, bits = 0;
@@ -236,7 +236,7 @@ static void gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
if (value)
__set_bit(offset, &bits);
- gpio_mpsse_set_multiple(chip, &mask, &bits);
+ return gpio_mpsse_set_multiple(chip, &mask, &bits);
}
static int gpio_mpsse_direction_output(struct gpio_chip *chip,
@@ -249,9 +249,7 @@ static int gpio_mpsse_direction_output(struct gpio_chip *chip,
scoped_guard(mutex, &priv->io_mutex)
priv->gpio_dir[bank] |= BIT(bank_offset);
- gpio_mpsse_gpio_set(chip, offset, value);
-
- return 0;
+ return gpio_mpsse_gpio_set(chip, offset, value);
}
static int gpio_mpsse_direction_input(struct gpio_chip *chip,
@@ -450,9 +448,9 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.direction_input = gpio_mpsse_direction_input;
priv->gpio.direction_output = gpio_mpsse_direction_output;
priv->gpio.get = gpio_mpsse_gpio_get;
- priv->gpio.set = gpio_mpsse_gpio_set;
+ priv->gpio.set_rv = gpio_mpsse_gpio_set;
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
- priv->gpio.set_multiple = gpio_mpsse_set_multiple;
+ priv->gpio.set_multiple_rv = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
priv->gpio.ngpio = 16;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 6db9e469e0dc..992339a89d19 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -486,7 +486,7 @@ struct msc313_gpio {
u8 *saved;
};
-static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct msc313_gpio *gpio = gpiochip_get_data(chip);
u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
@@ -497,6 +497,8 @@ static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int val
gpioreg &= ~MSC313_GPIO_OUT;
writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
}
static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -656,7 +658,7 @@ static int msc313_gpio_probe(struct platform_device *pdev)
gpiochip->direction_input = msc313_gpio_direction_input;
gpiochip->direction_output = msc313_gpio_direction_output;
gpiochip->get = msc313_gpio_get;
- gpiochip->set = msc313_gpio_set;
+ gpiochip->set_rv = msc313_gpio_set;
gpiochip->base = -1;
gpiochip->ngpio = gpio->gpio_data->num;
gpiochip->names = gpio->gpio_data->names;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 57633a7b4270..24792b8eb083 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1236,8 +1236,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
if (!have_irqs)
return 0;
- mvchip->domain =
- irq_domain_create_linear(of_fwnode_handle(np), ngpios, &irq_generic_chip_ops, NULL);
+ mvchip->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), ngpios,
+ &irq_generic_chip_ops, NULL);
if (!mvchip->domain) {
dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
mvchip->chip.label);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index fae1a30f8ae6..433cbadc3a4c 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -7,6 +7,7 @@
// Authors: Daniel Mack, Juergen Beisert.
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -22,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/bug.h>
@@ -64,7 +66,7 @@ struct mxc_gpio_port {
int irq_high;
void (*mx_irq_handler)(struct irq_desc *desc);
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct device *dev;
u32 both_edges;
struct mxc_gpio_reg_saved gpio_saved_reg;
@@ -161,7 +163,6 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
- unsigned long flags;
u32 bit, val;
u32 gpio_idx = d->hwirq;
int edge;
@@ -179,7 +180,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
if (GPIO_EDGE_SEL >= 0) {
edge = GPIO_INT_BOTH_EDGES;
} else {
- val = port->gc.get(&port->gc, gpio_idx);
+ val = port->gen_gc.gc.get(&port->gen_gc.gc, gpio_idx);
if (val) {
edge = GPIO_INT_LOW_LEV;
pr_debug("mxc: set GPIO %d to low trigger\n", gpio_idx);
@@ -200,41 +201,38 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &port->gen_gc) {
+ if (GPIO_EDGE_SEL >= 0) {
+ val = readl(port->base + GPIO_EDGE_SEL);
+ if (edge == GPIO_INT_BOTH_EDGES)
+ writel(val | (1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ else
+ writel(val & ~(1 << gpio_idx),
+ port->base + GPIO_EDGE_SEL);
+ }
- if (GPIO_EDGE_SEL >= 0) {
- val = readl(port->base + GPIO_EDGE_SEL);
- if (edge == GPIO_INT_BOTH_EDGES)
- writel(val | (1 << gpio_idx),
- port->base + GPIO_EDGE_SEL);
- else
- writel(val & ~(1 << gpio_idx),
- port->base + GPIO_EDGE_SEL);
- }
+ if (edge != GPIO_INT_BOTH_EDGES) {
+ reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
+ bit = gpio_idx & 0xf;
+ val = readl(reg) & ~(0x3 << (bit << 1));
+ writel(val | (edge << (bit << 1)), reg);
+ }
- if (edge != GPIO_INT_BOTH_EDGES) {
- reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* lower or upper register */
- bit = gpio_idx & 0xf;
- val = readl(reg) & ~(0x3 << (bit << 1));
- writel(val | (edge << (bit << 1)), reg);
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
+ port->pad_type[gpio_idx] = type;
}
- writel(1 << gpio_idx, port->base + GPIO_ISR);
- port->pad_type[gpio_idx] = type;
-
- raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
-
- return port->gc.direction_input(&port->gc, gpio_idx);
+ return port->gen_gc.gc.direction_input(&port->gen_gc.gc, gpio_idx);
}
static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
{
void __iomem *reg = port->base;
- unsigned long flags;
u32 bit, val;
int edge;
- raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&port->gen_gc);
reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
bit = gpio & 0xf;
@@ -250,12 +248,9 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
} else {
pr_err("mxc: invalid configuration for GPIO %d: %x\n",
gpio, edge);
- goto unlock;
+ return;
}
writel(val | (edge << (bit << 1)), reg);
-
-unlock:
- raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
}
/* handle 32 interrupts in one status register */
@@ -420,6 +415,7 @@ static void mxc_update_irq_chained_handler(struct mxc_gpio_port *port, bool enab
static int mxc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config = { };
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
int irq_count;
@@ -479,27 +475,31 @@ static int mxc_gpio_probe(struct platform_device *pdev)
port->mx_irq_handler = mx3_gpio_irq_handler;
mxc_update_irq_chained_handler(port, true);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + GPIO_PSR,
- port->base + GPIO_DR, NULL,
- port->base + GPIO_GDIR, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+
+ config.dev = &pdev->dev;
+ config.sz = 4;
+ config.dat = port->base + GPIO_PSR;
+ config.set = port->base + GPIO_DR;
+ config.dirout = port->base + GPIO_GDIR;
+ config.flags = BGPIOF_READ_OUTPUT_REG_SET;
+
+ err = gpio_generic_chip_init(&port->gen_gc, &config);
if (err)
goto out_bgio;
- port->gc.request = mxc_gpio_request;
- port->gc.free = mxc_gpio_free;
- port->gc.to_irq = mxc_gpio_to_irq;
+ port->gen_gc.gc.request = mxc_gpio_request;
+ port->gen_gc.gc.free = mxc_gpio_free;
+ port->gen_gc.gc.to_irq = mxc_gpio_to_irq;
/*
* Driver is DT-only, so a fixed base needs only be maintained for legacy
* userspace with sysfs interface.
*/
if (IS_ENABLED(CONFIG_GPIO_SYSFS))
- port->gc.base = of_alias_get_id(np, "gpio") * 32;
+ port->gen_gc.gc.base = of_alias_get_id(np, "gpio") * 32;
else /* silence boot time warning */
- port->gc.base = -1;
+ port->gen_gc.gc.base = -1;
- err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port);
+ err = devm_gpiochip_add_data(&pdev->dev, &port->gen_gc.gc, port);
if (err)
goto out_bgio;
@@ -509,8 +509,8 @@ static int mxc_gpio_probe(struct platform_device *pdev)
goto out_bgio;
}
- port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
- &irq_domain_simple_ops, NULL);
+ port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
goto out_bgio;
@@ -573,7 +573,8 @@ static bool mxc_gpio_generic_config(struct mxc_gpio_port *port,
if (of_device_is_compatible(np, "fsl,imx8dxl-gpio") ||
of_device_is_compatible(np, "fsl,imx8qxp-gpio") ||
of_device_is_compatible(np, "fsl,imx8qm-gpio"))
- return (gpiochip_generic_config(&port->gc, offset, conf) == 0);
+ return (gpiochip_generic_config(&port->gen_gc.gc,
+ offset, conf) == 0);
return false;
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b418fbccb26c..0ea46f3d04e1 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -303,7 +303,7 @@ static int mxs_gpio_probe(struct platform_device *pdev)
goto out_iounmap;
}
- port->domain = irq_domain_create_legacy(of_fwnode_handle(np), 32, irq_base, 0,
+ port->domain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), 32, irq_base, 0,
&irq_domain_simple_ops, NULL);
if (!port->domain) {
err = -ENODEV;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index fa19a44943fd..296d13845b30 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -347,8 +347,8 @@ static int nmk_gpio_get_input(struct gpio_chip *chip, unsigned int offset)
return value;
}
-static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
@@ -357,6 +357,8 @@ static void nmk_gpio_set_output(struct gpio_chip *chip, unsigned int offset,
__nmk_gpio_set_output(nmk_chip, offset, val);
clk_disable(nmk_chip->clk);
+
+ return 0;
}
static int nmk_gpio_make_output(struct gpio_chip *chip, unsigned int offset,
@@ -672,7 +674,7 @@ static int nmk_gpio_probe(struct platform_device *pdev)
chip->direction_input = nmk_gpio_make_input;
chip->get = nmk_gpio_get_input;
chip->direction_output = nmk_gpio_make_output;
- chip->set = nmk_gpio_set_output;
+ chip->set_rv = nmk_gpio_set_output;
chip->dbg_show = nmk_gpio_dbg_show;
chip->can_sleep = false;
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index 260570614543..25b203a89e38 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -211,9 +211,7 @@ static int npcm_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset)
static int npcm_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val)
{
- gc->set(gc, offset, val);
-
- return 0;
+ return gc->set_rv(gc, offset, val);
}
static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -226,7 +224,7 @@ static int npcm_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset)
return GPIO_LINE_DIRECTION_IN;
}
-static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct npcm_sgpio *gpio = gpiochip_get_data(gc);
const struct npcm_sgpio_bank *bank = offset_to_bank(offset);
@@ -242,6 +240,8 @@ static void npcm_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
reg &= ~BIT(GPIO_BIT(offset));
iowrite8(reg, addr);
+
+ return 0;
}
static int npcm_sgpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -546,7 +546,7 @@ static int npcm_sgpio_probe(struct platform_device *pdev)
gpio->chip.direction_output = npcm_sgpio_dir_out;
gpio->chip.get_direction = npcm_sgpio_get_direction;
gpio->chip.get = npcm_sgpio_get;
- gpio->chip.set = npcm_sgpio_set;
+ gpio->chip.set_rv = npcm_sgpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-octeon.c b/drivers/gpio/gpio-octeon.c
index afb0e8a791e5..24966161742a 100644
--- a/drivers/gpio/gpio-octeon.c
+++ b/drivers/gpio/gpio-octeon.c
@@ -47,12 +47,15 @@ static int octeon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void octeon_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int octeon_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct octeon_gpio *gpio = gpiochip_get_data(chip);
u64 mask = 1ull << offset;
u64 reg = gpio->register_base + (value ? TX_SET : TX_CLEAR);
cvmx_write_csr(reg, mask);
+
+ return 0;
}
static int octeon_gpio_dir_out(struct gpio_chip *chip, unsigned offset,
@@ -105,7 +108,7 @@ static int octeon_gpio_probe(struct platform_device *pdev)
chip->direction_input = octeon_gpio_dir_in;
chip->get = octeon_gpio_get;
chip->direction_output = octeon_gpio_dir_out;
- chip->set = octeon_gpio_set;
+ chip->set_rv = octeon_gpio_set;
err = devm_gpiochip_add_data(&pdev->dev, chip, gpio);
if (err)
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 54c4bfdccf56..ed5c88a5c520 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -953,7 +953,7 @@ static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset,
return ret;
}
-static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int omap_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_bank *bank;
unsigned long flags;
@@ -962,10 +962,12 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_lock_irqsave(&bank->lock, flags);
bank->set_dataout(bank, offset, value);
raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
-static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_bank *bank = gpiochip_get_data(chip);
void __iomem *reg = bank->base + bank->regs->dataout;
@@ -977,6 +979,8 @@ static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask,
writel_relaxed(l, reg);
bank->context.dataout = l;
raw_spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
/*---------------------------------------------------------------------*/
@@ -1042,8 +1046,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev)
bank->chip.get_multiple = omap_gpio_get_multiple;
bank->chip.direction_output = omap_gpio_output;
bank->chip.set_config = omap_gpio_set_config;
- bank->chip.set = omap_gpio_set;
- bank->chip.set_multiple = omap_gpio_set_multiple;
+ bank->chip.set_rv = omap_gpio_set;
+ bank->chip.set_multiple_rv = omap_gpio_set_multiple;
if (bank->is_mpuio) {
bank->chip.label = "mpuio";
if (bank->regs->wkup_en)
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c
index 28dba7048509..9329d8ce8f59 100644
--- a/drivers/gpio/gpio-palmas.c
+++ b/drivers/gpio/gpio-palmas.c
@@ -54,12 +54,11 @@ static int palmas_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & BIT(offset));
}
-static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int palmas_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct palmas_gpio *pg = gpiochip_get_data(gc);
struct palmas *palmas = pg->palmas;
- int ret;
unsigned int reg;
int gpio16 = (offset/8);
@@ -71,9 +70,7 @@ static void palmas_gpio_set(struct gpio_chip *gc, unsigned offset,
reg = (value) ?
PALMAS_GPIO_SET_DATA_OUT : PALMAS_GPIO_CLEAR_DATA_OUT;
- ret = palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
- if (ret < 0)
- dev_err(gc->parent, "Reg 0x%02x write failed, %d\n", reg, ret);
+ return palmas_write(palmas, PALMAS_GPIO_BASE, reg, BIT(offset));
}
static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -89,7 +86,9 @@ static int palmas_gpio_output(struct gpio_chip *gc, unsigned offset,
reg = (gpio16) ? PALMAS_GPIO_DATA_DIR2 : PALMAS_GPIO_DATA_DIR;
/* Set the initial value */
- palmas_gpio_set(gc, offset, value);
+ ret = palmas_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
ret = palmas_update_bits(palmas, PALMAS_GPIO_BASE, reg,
BIT(offset), BIT(offset));
@@ -140,6 +139,7 @@ static const struct of_device_id of_palmas_gpio_match[] = {
{ .compatible = "ti,tps80036-gpio", .data = &tps80036_dev_data,},
{ },
};
+MODULE_DEVICE_TABLE(of, of_palmas_gpio_match);
static int palmas_gpio_probe(struct platform_device *pdev)
{
@@ -166,7 +166,7 @@ static int palmas_gpio_probe(struct platform_device *pdev)
palmas_gpio->gpio_chip.direction_input = palmas_gpio_input;
palmas_gpio->gpio_chip.direction_output = palmas_gpio_output;
palmas_gpio->gpio_chip.to_irq = palmas_gpio_to_irq;
- palmas_gpio->gpio_chip.set = palmas_gpio_set;
+ palmas_gpio->gpio_chip.set_rv = palmas_gpio_set;
palmas_gpio->gpio_chip.get = palmas_gpio_get;
palmas_gpio->gpio_chip.parent = &pdev->dev;
@@ -197,3 +197,13 @@ static int __init palmas_gpio_init(void)
return platform_driver_register(&palmas_gpio_driver);
}
subsys_initcall(palmas_gpio_init);
+
+static void __exit palmas_gpio_exit(void)
+{
+ platform_driver_unregister(&palmas_gpio_driver);
+}
+module_exit(palmas_gpio_exit);
+
+MODULE_DESCRIPTION("TI PALMAS series GPIO driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index e80a96f39788..69906a9af7e6 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -38,6 +38,10 @@
#define PCA953X_INVERT 0x02
#define PCA953X_DIRECTION 0x03
+#define TCA6418_INPUT 0x14
+#define TCA6418_OUTPUT 0x17
+#define TCA6418_DIRECTION 0x23
+
#define REG_ADDR_MASK GENMASK(5, 0)
#define REG_ADDR_EXT BIT(6)
#define REG_ADDR_AI BIT(7)
@@ -76,7 +80,8 @@
#define PCA953X_TYPE BIT(12)
#define PCA957X_TYPE BIT(13)
#define PCAL653X_TYPE BIT(14)
-#define PCA_TYPE_MASK GENMASK(15, 12)
+#define TCA6418_TYPE BIT(16)
+#define PCA_TYPE_MASK GENMASK(16, 12)
#define PCA_CHIP_TYPE(x) ((x) & PCA_TYPE_MASK)
@@ -115,6 +120,7 @@ static const struct i2c_device_id pca953x_id[] = {
{ "pca6107", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6408", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca6416", 16 | PCA953X_TYPE | PCA_INT, },
+ { "tca6418", 18 | TCA6418_TYPE | PCA_INT, },
{ "tca6424", 24 | PCA953X_TYPE | PCA_INT, },
{ "tca9538", 8 | PCA953X_TYPE | PCA_INT, },
{ "tca9539", 16 | PCA953X_TYPE | PCA_INT, },
@@ -204,6 +210,13 @@ static const struct pca953x_reg_config pca957x_regs = {
.invert = PCA957X_INVRT,
};
+static const struct pca953x_reg_config tca6418_regs = {
+ .direction = TCA6418_DIRECTION,
+ .output = TCA6418_OUTPUT,
+ .input = TCA6418_INPUT,
+ .invert = 0xFF, /* Does not apply */
+};
+
struct pca953x_chip {
unsigned gpio_start;
struct mutex i2c_lock;
@@ -237,6 +250,22 @@ static int pca953x_bank_shift(struct pca953x_chip *chip)
return fls((chip->gpio_chip.ngpio - 1) / BANK_SZ);
}
+/*
+ * Helper function to get the correct bit mask for a given offset and chip type.
+ * The TCA6418's input, output, and direction banks have a peculiar bit order:
+ * the first byte uses reversed bit order, while the second byte uses standard order.
+ */
+static inline u8 pca953x_get_bit_mask(struct pca953x_chip *chip, unsigned int offset)
+{
+ unsigned int bit_pos_in_bank = offset % BANK_SZ;
+ int msb = BANK_SZ - 1;
+
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE && offset <= msb)
+ return BIT(msb - bit_pos_in_bank);
+
+ return BIT(bit_pos_in_bank);
+}
+
#define PCA953x_BANK_INPUT BIT(0)
#define PCA953x_BANK_OUTPUT BIT(1)
#define PCA953x_BANK_POLARITY BIT(2)
@@ -353,18 +382,43 @@ static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
return true;
}
+/* TCA6418 breaks the PCA953x register order rule */
+static bool tca6418_check_register(struct pca953x_chip *chip, unsigned int reg,
+ u32 access_type_mask)
+{
+ /* Valid Input Registers - BIT(0) for readable access */
+ if (reg >= TCA6418_INPUT && reg < (TCA6418_INPUT + NBANK(chip)))
+ return (access_type_mask & BIT(0));
+
+ /* Valid Output Registers - BIT(1) for writeable access */
+ if (reg >= TCA6418_OUTPUT && reg < (TCA6418_OUTPUT + NBANK(chip)))
+ return (access_type_mask & (BIT(0) | BIT(1)));
+
+ /* Valid Direction Registers - BIT(2) for volatile access */
+ if (reg >= TCA6418_DIRECTION && reg < (TCA6418_DIRECTION + NBANK(chip)))
+ return (access_type_mask & (BIT(0) | BIT(1)));
+
+ return false;
+}
+
static bool pca953x_readable_register(struct device *dev, unsigned int reg)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_INPUT | PCA957x_BANK_OUTPUT |
PCA957x_BANK_POLARITY | PCA957x_BANK_CONFIG |
PCA957x_BANK_BUSHOLD;
- } else {
+ break;
+ case TCA6418_TYPE:
+ /* BIT(0) to indicate read access */
+ return tca6418_check_register(chip, reg, BIT(0));
+ default:
bank = PCA953x_BANK_INPUT | PCA953x_BANK_OUTPUT |
PCA953x_BANK_POLARITY | PCA953x_BANK_CONFIG;
+ break;
}
if (chip->driver_data & PCA_PCAL) {
@@ -381,12 +435,18 @@ static bool pca953x_writeable_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_OUTPUT | PCA957x_BANK_POLARITY |
PCA957x_BANK_CONFIG | PCA957x_BANK_BUSHOLD;
- } else {
+ break;
+ case TCA6418_TYPE:
+ /* BIT(1) for write access */
+ return tca6418_check_register(chip, reg, BIT(1));
+ default:
bank = PCA953x_BANK_OUTPUT | PCA953x_BANK_POLARITY |
PCA953x_BANK_CONFIG;
+ break;
}
if (chip->driver_data & PCA_PCAL)
@@ -401,10 +461,17 @@ static bool pca953x_volatile_register(struct device *dev, unsigned int reg)
struct pca953x_chip *chip = dev_get_drvdata(dev);
u32 bank;
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE)
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
bank = PCA957x_BANK_INPUT;
- else
+ break;
+ case TCA6418_TYPE:
+ /* BIT(2) for volatile access */
+ return tca6418_check_register(chip, reg, BIT(2));
+ default:
bank = PCA953x_BANK_INPUT;
+ break;
+ }
if (chip->driver_data & PCA_PCAL)
bank |= PCAL9xxx_BANK_IRQ_STAT;
@@ -489,6 +556,16 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
return pinctrl + addr + (off / BANK_SZ);
}
+static u8 tca6418_recalc_addr(struct pca953x_chip *chip, int reg_base, int offset)
+{
+ /*
+ * reg_base will be TCA6418_INPUT, TCA6418_OUTPUT, or TCA6418_DIRECTION
+ * offset is the global GPIO line offset (0-17)
+ * BANK_SZ is 8 for TCA6418 (8 bits per register bank)
+ */
+ return reg_base + (offset / BANK_SZ);
+}
+
static int pca953x_write_regs(struct pca953x_chip *chip, int reg, unsigned long *val)
{
u8 regaddr = chip->recalc_addr(chip, reg, 0);
@@ -529,11 +606,14 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
guard(mutex)(&chip->i2c_lock);
- return regmap_write_bits(chip->regmap, dirreg, bit, bit);
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return regmap_update_bits(chip->regmap, dirreg, bit, 0);
+
+ return regmap_update_bits(chip->regmap, dirreg, bit, bit);
}
static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -542,25 +622,31 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc,
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
int ret;
guard(mutex)(&chip->i2c_lock);
/* set output level */
- ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ ret = regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0);
if (ret)
return ret;
- /* then direction */
- return regmap_write_bits(chip->regmap, dirreg, bit, 0);
+ /*
+ * then direction
+ * (in/out logic is inverted on TCA6418)
+ */
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return regmap_update_bits(chip->regmap, dirreg, bit, bit);
+
+ return regmap_update_bits(chip->regmap, dirreg, bit, 0);
}
static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 inreg = chip->recalc_addr(chip, chip->regs->input, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
u32 reg_val;
int ret;
@@ -577,18 +663,18 @@ static int pca953x_gpio_set_value(struct gpio_chip *gc, unsigned int off,
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 outreg = chip->recalc_addr(chip, chip->regs->output, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
guard(mutex)(&chip->i2c_lock);
- return regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0);
+ return regmap_update_bits(chip->regmap, outreg, bit, val ? bit : 0);
}
static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
{
struct pca953x_chip *chip = gpiochip_get_data(gc);
u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off);
- u8 bit = BIT(off % BANK_SZ);
+ u8 bit = pca953x_get_bit_mask(chip, off);
u32 reg_val;
int ret;
@@ -597,7 +683,14 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off)
if (ret < 0)
return ret;
- if (reg_val & bit)
+ /* (in/out logic is inverted on TCA6418) */
+ if (reg_val & bit) {
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ return GPIO_LINE_DIRECTION_IN;
+ }
+ if (PCA_CHIP_TYPE(chip->driver_data) == TCA6418_TYPE)
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -658,9 +751,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
/* Configure pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_PULL_UP)
- ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, bit);
+ ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, bit);
else if (param == PIN_CONFIG_BIAS_PULL_DOWN)
- ret = regmap_write_bits(chip->regmap, pull_sel_reg, bit, 0);
+ ret = regmap_update_bits(chip->regmap, pull_sel_reg, bit, 0);
else
ret = 0;
if (ret)
@@ -668,9 +761,9 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip,
/* Disable/Enable pull-up/pull-down */
if (param == PIN_CONFIG_BIAS_DISABLE)
- return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0);
+ return regmap_update_bits(chip->regmap, pull_en_reg, bit, 0);
else
- return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit);
+ return regmap_update_bits(chip->regmap, pull_en_reg, bit, bit);
}
static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
@@ -1117,12 +1210,22 @@ static int pca953x_probe(struct i2c_client *client)
regmap_config = &pca953x_i2c_regmap;
}
- if (PCA_CHIP_TYPE(chip->driver_data) == PCAL653X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCAL653X_TYPE:
chip->recalc_addr = pcal6534_recalc_addr;
chip->check_reg = pcal6534_check_register;
- } else {
+ break;
+ case TCA6418_TYPE:
+ chip->recalc_addr = tca6418_recalc_addr;
+ /*
+ * We don't assign chip->check_reg = tca6418_check_register directly here.
+ * Instead, the wrappers handle the dispatch based on PCA_CHIP_TYPE.
+ */
+ break;
+ default:
chip->recalc_addr = pca953x_recalc_addr;
chip->check_reg = pca953x_check_register;
+ break;
}
chip->regmap = devm_regmap_init_i2c(client, regmap_config);
@@ -1151,15 +1254,22 @@ static int pca953x_probe(struct i2c_client *client)
lockdep_set_subclass(&chip->i2c_lock,
i2c_adapter_depth(client->adapter));
- /* initialize cached registers from their original values.
+ /*
+ * initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- if (PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
+ switch (PCA_CHIP_TYPE(chip->driver_data)) {
+ case PCA957X_TYPE:
chip->regs = &pca957x_regs;
ret = device_pca957x_init(chip);
- } else {
+ break;
+ case TCA6418_TYPE:
+ chip->regs = &tca6418_regs;
+ break;
+ default:
chip->regs = &pca953x_regs;
ret = device_pca95xx_init(chip);
+ break;
}
if (ret)
return ret;
@@ -1325,6 +1435,7 @@ static const struct of_device_id pca953x_dt_ids[] = {
{ .compatible = "ti,pca9536", .data = OF_953X( 4, 0), },
{ .compatible = "ti,tca6408", .data = OF_953X( 8, PCA_INT), },
{ .compatible = "ti,tca6416", .data = OF_953X(16, PCA_INT), },
+ { .compatible = "ti,tca6418", .data = (void *)(18 | TCA6418_TYPE | PCA_INT), },
{ .compatible = "ti,tca6424", .data = OF_953X(24, PCA_INT), },
{ .compatible = "ti,tca9535", .data = OF_953X(16, PCA_INT), },
{ .compatible = "ti,tca9538", .data = OF_953X( 8, PCA_INT), },
@@ -1355,7 +1466,9 @@ static int __init pca953x_init(void)
{
return i2c_add_driver(&pca953x_driver);
}
-/* register after i2c postcore initcall and before
+
+/*
+ * register after i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(pca953x_init);
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index d37ba4049368..a33246f20fd8 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -88,7 +88,7 @@ static int pca9570_get(struct gpio_chip *chip, unsigned offset)
return !!(buffer & BIT(offset));
}
-static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pca9570_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct pca9570 *gpio = gpiochip_get_data(chip);
u8 buffer;
@@ -110,6 +110,7 @@ static void pca9570_set(struct gpio_chip *chip, unsigned offset, int value)
out:
mutex_unlock(&gpio->lock);
+ return ret;
}
static int pca9570_probe(struct i2c_client *client)
@@ -125,7 +126,7 @@ static int pca9570_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get_direction = pca9570_get_direction;
gpio->chip.get = pca9570_get;
- gpio->chip.set = pca9570_set;
+ gpio->chip.set_rv = pca9570_set;
gpio->chip.base = -1;
gpio->chip_data = device_get_match_data(&client->dev);
gpio->chip.ngpio = gpio->chip_data->ngpio;
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 2e5f5d7f8865..a04203680333 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -171,21 +171,24 @@ static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value
return status;
}
-static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- pcf857x_output(chip, offset, value);
+ return pcf857x_output(chip, offset, value);
}
-static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
struct pcf857x *gpio = gpiochip_get_data(chip);
+ int status;
mutex_lock(&gpio->lock);
gpio->out &= ~*mask;
gpio->out |= *bits & *mask;
- gpio->write(gpio->client, gpio->out);
+ status = gpio->write(gpio->client, gpio->out);
mutex_unlock(&gpio->lock);
+
+ return status;
}
/*-------------------------------------------------------------------------*/
@@ -292,8 +295,8 @@ static int pcf857x_probe(struct i2c_client *client)
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
gpio->chip.get_multiple = pcf857x_get_multiple;
- gpio->chip.set = pcf857x_set;
- gpio->chip.set_multiple = pcf857x_set_multiple;
+ gpio->chip.set_rv = pcf857x_set;
+ gpio->chip.set_multiple_rv = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = (uintptr_t)i2c_get_match_data(client);
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 63f25c72eac2..c6f313342ba0 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -99,7 +99,7 @@ struct pch_gpio {
spinlock_t spinlock;
};
-static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
+static int pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
u32 reg_val;
struct pch_gpio *chip = gpiochip_get_data(gpio);
@@ -114,6 +114,8 @@ static void pch_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
iowrite32(reg_val, &chip->reg->po);
spin_unlock_irqrestore(&chip->spinlock, flags);
+
+ return 0;
}
static int pch_gpio_get(struct gpio_chip *gpio, unsigned int nr)
@@ -217,7 +219,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
gpio->direction_input = pch_gpio_direction_input;
gpio->get = pch_gpio_get;
gpio->direction_output = pch_gpio_direction_output;
- gpio->set = pch_gpio_set;
+ gpio->set_rv = pch_gpio_set;
gpio->base = -1;
gpio->ngpio = gpio_pins[chip->ioh];
gpio->can_sleep = false;
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index e3013e778e15..a69b74866a13 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -67,13 +67,6 @@ static int pisosr_gpio_direction_input(struct gpio_chip *chip,
return 0;
}
-static int pisosr_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- /* This device is input only */
- return -EINVAL;
-}
-
static int pisosr_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct pisosr_gpio *gpio = gpiochip_get_data(chip);
@@ -108,7 +101,6 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.get_direction = pisosr_gpio_get_direction,
.direction_input = pisosr_gpio_direction_input,
- .direction_output = pisosr_gpio_direction_output,
.get = pisosr_gpio_get,
.get_multiple = pisosr_gpio_get_multiple,
.base = -1,
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 1c273727ffa3..98cfac4eac85 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -115,11 +115,13 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset)
return !!readb(pl061->base + (BIT(offset + 2)));
}
-static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value)
+static int pl061_set_value(struct gpio_chip *gc, unsigned int offset, int value)
{
struct pl061 *pl061 = gpiochip_get_data(gc);
writeb(!!value << offset, pl061->base + (BIT(offset + 2)));
+
+ return 0;
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
@@ -328,7 +330,7 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
pl061->gc.direction_input = pl061_direction_input;
pl061->gc.direction_output = pl061_direction_output;
pl061->gc.get = pl061_get_value;
- pl061->gc.set = pl061_set_value;
+ pl061->gc.set_rv = pl061_set_value;
pl061->gc.ngpio = PL061_GPIO_NR;
pl061->gc.label = dev_name(dev);
pl061->gc.parent = dev;
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index d9b228bea42e..cb015fb5c946 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -109,12 +109,6 @@ static int sprd_pmic_eic_direction_input(struct gpio_chip *chip,
return 0;
}
-static void sprd_pmic_eic_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- /* EICs are always input, nothing need to do here. */
-}
-
static int sprd_pmic_eic_set_debounce(struct gpio_chip *chip,
unsigned int offset,
unsigned int debounce)
@@ -351,7 +345,6 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.request = sprd_pmic_eic_request;
pmic_eic->chip.free = sprd_pmic_eic_free;
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
- pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
pmic_eic->chip.can_sleep = true;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index aead35ea090e..13f7da2a9486 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -315,12 +315,14 @@ static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(gplr & GPIO_bit(offset));
}
-static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int pxa_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
void __iomem *base = gpio_bank_base(chip, offset);
writel_relaxed(GPIO_bit(offset),
base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+
+ return 0;
}
#ifdef CONFIG_OF_GPIO
@@ -353,7 +355,7 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, void __iom
pchip->chip.direction_input = pxa_gpio_direction_input;
pchip->chip.direction_output = pxa_gpio_direction_output;
pchip->chip.get = pxa_gpio_get;
- pchip->chip.set = pxa_gpio_set;
+ pchip->chip.set_rv = pxa_gpio_set;
pchip->chip.to_irq = pxa_gpio_to_irq;
pchip->chip.ngpio = ngpio;
pchip->chip.request = gpiochip_generic_request;
@@ -642,9 +644,8 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (!pxa_last_gpio)
return -EINVAL;
- pchip->irqdomain = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node),
- pxa_last_gpio + 1, irq_base, 0,
- &pxa_irq_domain_ops, pchip);
+ pchip->irqdomain = irq_domain_create_legacy(dev_fwnode(&pdev->dev), pxa_last_gpio + 1,
+ irq_base, 0, &pxa_irq_domain_ops, pchip);
if (!pchip->irqdomain)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-raspberrypi-exp.c b/drivers/gpio/gpio-raspberrypi-exp.c
index 9d1b95e429f1..b4b607515a04 100644
--- a/drivers/gpio/gpio-raspberrypi-exp.c
+++ b/drivers/gpio/gpio-raspberrypi-exp.c
@@ -175,7 +175,7 @@ static int rpi_exp_gpio_get(struct gpio_chip *gc, unsigned int off)
return !!get.state;
}
-static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
+static int rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
{
struct rpi_exp_gpio *gpio;
struct gpio_get_set_state set;
@@ -188,10 +188,14 @@ static void rpi_exp_gpio_set(struct gpio_chip *gc, unsigned int off, int val)
ret = rpi_firmware_property(gpio->fw, RPI_FIRMWARE_SET_GPIO_STATE,
&set, sizeof(set));
- if (ret || set.gpio != 0)
+ if (ret || set.gpio != 0) {
dev_err(gc->parent,
"Failed to set GPIO %u state (%d %x)\n", off, ret,
set.gpio);
+ return ret ? ret : -EIO;
+ }
+
+ return 0;
}
static int rpi_exp_gpio_probe(struct platform_device *pdev)
@@ -228,7 +232,7 @@ static int rpi_exp_gpio_probe(struct platform_device *pdev)
rpi_gpio->gc.direction_output = rpi_exp_gpio_dir_out;
rpi_gpio->gc.get_direction = rpi_exp_gpio_get_direction;
rpi_gpio->gc.get = rpi_exp_gpio_get;
- rpi_gpio->gc.set = rpi_exp_gpio_set;
+ rpi_gpio->gc.set_rv = rpi_exp_gpio_set;
rpi_gpio->gc.can_sleep = true;
return devm_gpiochip_add_data(dev, &rpi_gpio->gc, rpi_gpio);
diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c
index c34dcadaee36..cf3e91d235df 100644
--- a/drivers/gpio/gpio-rc5t583.c
+++ b/drivers/gpio/gpio-rc5t583.c
@@ -35,14 +35,20 @@ static int rc5t583_gpio_get(struct gpio_chip *gc, unsigned int offset)
return !!(val & BIT(offset));
}
-static void rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int rc5t583_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct rc5t583_gpio *rc5t583_gpio = gpiochip_get_data(gc);
struct device *parent = rc5t583_gpio->rc5t583->dev;
+ int ret;
+
if (val)
- rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOOUT,
+ BIT(offset));
else
- rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT, BIT(offset));
+ ret = rc5t583_clear_bits(parent, RC5T583_GPIO_IOOUT,
+ BIT(offset));
+
+ return ret;
}
static int rc5t583_gpio_dir_input(struct gpio_chip *gc, unsigned int offset)
@@ -66,7 +72,10 @@ static int rc5t583_gpio_dir_output(struct gpio_chip *gc, unsigned offset,
struct device *parent = rc5t583_gpio->rc5t583->dev;
int ret;
- rc5t583_gpio_set(gc, offset, value);
+ ret = rc5t583_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
+
ret = rc5t583_set_bits(parent, RC5T583_GPIO_IOSEL, BIT(offset));
if (ret < 0)
return ret;
@@ -109,7 +118,7 @@ static int rc5t583_gpio_probe(struct platform_device *pdev)
rc5t583_gpio->gpio_chip.free = rc5t583_gpio_free,
rc5t583_gpio->gpio_chip.direction_input = rc5t583_gpio_dir_input,
rc5t583_gpio->gpio_chip.direction_output = rc5t583_gpio_dir_output,
- rc5t583_gpio->gpio_chip.set = rc5t583_gpio_set,
+ rc5t583_gpio->gpio_chip.set_rv = rc5t583_gpio_set,
rc5t583_gpio->gpio_chip.get = rc5t583_gpio_get,
rc5t583_gpio->gpio_chip.to_irq = rc5t583_gpio_to_irq,
rc5t583_gpio->gpio_chip.ngpio = RC5T583_MAX_GPIO,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 18c965ee02c8..cd31580effa9 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -331,14 +331,11 @@ static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
+ u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- u32 bankmask, outputs, m, val = 0;
+ u32 outputs, m, val = 0;
unsigned long flags;
- bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (!bankmask)
- return 0;
-
if (p->info.has_always_in) {
bits[0] = gpio_rcar_read(p, INDT) & bankmask;
return 0;
@@ -359,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
-static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
+static int gpio_rcar_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
@@ -367,18 +364,17 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
-static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
+ u32 bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- u32 val, bankmask;
-
- bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
- if (!bankmask)
- return;
+ u32 val;
raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
@@ -386,6 +382,8 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -537,8 +535,8 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get = gpio_rcar_get;
gpio_chip->get_multiple = gpio_rcar_get_multiple;
gpio_chip->direction_output = gpio_rcar_direction_output;
- gpio_chip->set = gpio_rcar_set;
- gpio_chip->set_multiple = gpio_rcar_set_multiple;
+ gpio_chip->set_rv = gpio_rcar_set;
+ gpio_chip->set_multiple_rv = gpio_rcar_set_multiple;
gpio_chip->label = name;
gpio_chip->parent = dev;
gpio_chip->owner = THIS_MODULE;
@@ -594,7 +592,6 @@ static void gpio_rcar_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int gpio_rcar_suspend(struct device *dev)
{
struct gpio_rcar_priv *p = dev_get_drvdata(dev);
@@ -653,16 +650,16 @@ static int gpio_rcar_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP*/
-static SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend, gpio_rcar_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(gpio_rcar_pm_ops, gpio_rcar_suspend,
+ gpio_rcar_resume);
static struct platform_driver gpio_rcar_device_driver = {
.probe = gpio_rcar_probe,
.remove = gpio_rcar_remove,
.driver = {
.name = "gpio_rcar",
- .pm = &gpio_rcar_pm_ops,
+ .pm = pm_sleep_ptr(&gpio_rcar_pm_ops),
.of_match_table = gpio_rcar_of_table,
}
};
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index ec7fb9220a47..a75ed8021de5 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -64,8 +64,8 @@ static void rdc_gpio_set_value_impl(struct gpio_chip *chip,
}
/* set GPIO pin to value */
-static void rdc_gpio_set_value(struct gpio_chip *chip,
- unsigned gpio, int value)
+static int rdc_gpio_set_value(struct gpio_chip *chip, unsigned int gpio,
+ int value)
{
struct rdc321x_gpio *gpch;
@@ -73,6 +73,8 @@ static void rdc_gpio_set_value(struct gpio_chip *chip,
spin_lock(&gpch->lock);
rdc_gpio_set_value_impl(chip, gpio, value);
spin_unlock(&gpch->lock);
+
+ return 0;
}
static int rdc_gpio_config(struct gpio_chip *chip,
@@ -157,7 +159,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
- rdc321x_gpio_dev->chip.set = rdc_gpio_set_value;
+ rdc321x_gpio_dev->chip.set_rv = rdc_gpio_set_value;
rdc321x_gpio_dev->chip.base = 0;
rdc321x_gpio_dev->chip.ngpio = pdata->max_gpios;
diff --git a/drivers/gpio/gpio-reg.c b/drivers/gpio/gpio-reg.c
index 73c7260d89c0..d8da99f97385 100644
--- a/drivers/gpio/gpio-reg.c
+++ b/drivers/gpio/gpio-reg.c
@@ -46,7 +46,7 @@ static int gpio_reg_direction_output(struct gpio_chip *gc, unsigned offset,
if (r->direction & BIT(offset))
return -ENOTSUPP;
- gc->set(gc, offset, value);
+ gc->set_rv(gc, offset, value);
return 0;
}
@@ -57,7 +57,7 @@ static int gpio_reg_direction_input(struct gpio_chip *gc, unsigned offset)
return r->direction & BIT(offset) ? 0 : -ENOTSUPP;
}
-static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
+static int gpio_reg_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct gpio_reg *r = to_gpio_reg(gc);
unsigned long flags;
@@ -72,6 +72,8 @@ static void gpio_reg_set(struct gpio_chip *gc, unsigned offset, int value)
r->out = val;
writel_relaxed(val, r->reg);
spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
}
static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
@@ -92,8 +94,8 @@ static int gpio_reg_get(struct gpio_chip *gc, unsigned offset)
return !!(val & mask);
}
-static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
struct gpio_reg *r = to_gpio_reg(gc);
unsigned long flags;
@@ -102,6 +104,8 @@ static void gpio_reg_set_multiple(struct gpio_chip *gc, unsigned long *mask,
r->out = (r->out & ~*mask) | (*bits & *mask);
writel_relaxed(r->out, r->reg);
spin_unlock_irqrestore(&r->lock, flags);
+
+ return 0;
}
static int gpio_reg_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -157,9 +161,9 @@ struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
r->gc.get_direction = gpio_reg_get_direction;
r->gc.direction_input = gpio_reg_direction_input;
r->gc.direction_output = gpio_reg_direction_output;
- r->gc.set = gpio_reg_set;
+ r->gc.set_rv = gpio_reg_set;
r->gc.get = gpio_reg_get;
- r->gc.set_multiple = gpio_reg_set_multiple;
+ r->gc.set_multiple_rv = gpio_reg_set_multiple;
if (irqs)
r->gc.to_irq = gpio_reg_to_irq;
r->gc.base = base;
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index c63352f2f1ec..ecd60ff9e1dd 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -177,8 +177,8 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
return 0;
}
-static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct rockchip_pin_bank *bank = gpiochip_get_data(gc);
unsigned long flags;
@@ -186,6 +186,8 @@ static void rockchip_gpio_set(struct gpio_chip *gc, unsigned int offset,
raw_spin_lock_irqsave(&bank->slock, flags);
rockchip_gpio_writel_bit(bank, offset, value, bank->gpio_regs->port_dr);
raw_spin_unlock_irqrestore(&bank->slock, flags);
+
+ return 0;
}
static int rockchip_gpio_get(struct gpio_chip *gc, unsigned int offset)
@@ -325,7 +327,7 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
static const struct gpio_chip rockchip_gpiolib_chip = {
.request = gpiochip_generic_request,
.free = gpiochip_generic_free,
- .set = rockchip_gpio_set,
+ .set_rv = rockchip_gpio_set,
.get = rockchip_gpio_get,
.get_direction = rockchip_gpio_get_direction,
.direction_input = rockchip_gpio_direction_input,
@@ -521,8 +523,8 @@ static int rockchip_interrupts_register(struct rockchip_pin_bank *bank)
struct irq_chip_generic *gc;
int ret;
- bank->domain = irq_domain_create_linear(of_fwnode_handle(bank->of_node), 32,
- &irq_generic_chip_ops, NULL);
+ bank->domain = irq_domain_create_linear(dev_fwnode(bank->dev), 32, &irq_generic_chip_ops,
+ NULL);
if (!bank->domain) {
dev_warn(bank->dev, "could not init irq domain for bank %s\n",
bank->name);
diff --git a/drivers/gpio/gpio-rtd.c b/drivers/gpio/gpio-rtd.c
index bf7f008f58d7..25bbd749b019 100644
--- a/drivers/gpio/gpio-rtd.c
+++ b/drivers/gpio/gpio-rtd.c
@@ -275,7 +275,7 @@ static int rtd_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
}
}
-static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct rtd_gpio *data = gpiochip_get_data(chip);
u32 mask = BIT(offset % 32);
@@ -292,6 +292,8 @@ static void rtd_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
else
val &= ~mask;
writel_relaxed(val, data->base + dato_reg_offset);
+
+ return 0;
}
static int rtd_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -563,7 +565,7 @@ static int rtd_gpio_probe(struct platform_device *pdev)
data->gpio_chip.get_direction = rtd_gpio_get_direction;
data->gpio_chip.direction_input = rtd_gpio_direction_input;
data->gpio_chip.direction_output = rtd_gpio_direction_output;
- data->gpio_chip.set = rtd_gpio_set;
+ data->gpio_chip.set_rv = rtd_gpio_set;
data->gpio_chip.get = rtd_gpio_get;
data->gpio_chip.set_config = rtd_gpio_set_config;
data->gpio_chip.parent = dev;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 3f3ee36bc3cb..e9d054d78ccb 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -43,11 +43,14 @@ static int sa1100_gpio_get(struct gpio_chip *chip, unsigned offset)
BIT(offset);
}
-static void sa1100_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int sa1100_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int reg = value ? R_GPSR : R_GPCR;
writel_relaxed(BIT(offset), sa1100_gpio_chip(chip)->membase + reg);
+
+ return 0;
}
static int sa1100_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -96,7 +99,7 @@ static struct sa1100_gpio_chip sa1100_gpio_chip = {
.get_direction = sa1100_get_direction,
.direction_input = sa1100_direction_input,
.direction_output = sa1100_direction_output,
- .set = sa1100_gpio_set,
+ .set_rv = sa1100_gpio_set,
.get = sa1100_gpio_get,
.to_irq = sa1100_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-sama5d2-piobu.c b/drivers/gpio/gpio-sama5d2-piobu.c
index d770a6f3d846..c31244cf5e89 100644
--- a/drivers/gpio/gpio-sama5d2-piobu.c
+++ b/drivers/gpio/gpio-sama5d2-piobu.c
@@ -169,15 +169,15 @@ static int sama5d2_piobu_get(struct gpio_chip *chip, unsigned int pin)
/*
* sama5d2_piobu_set() - gpiochip set
*/
-static void sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin,
- int value)
+static int sama5d2_piobu_set(struct gpio_chip *chip, unsigned int pin,
+ int value)
{
if (!value)
value = PIOBU_LOW;
else
value = PIOBU_HIGH;
- sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value);
+ return sama5d2_piobu_write_value(chip, pin, PIOBU_SOD, value);
}
static int sama5d2_piobu_probe(struct platform_device *pdev)
@@ -196,7 +196,7 @@ static int sama5d2_piobu_probe(struct platform_device *pdev)
piobu->chip.direction_input = sama5d2_piobu_direction_input;
piobu->chip.direction_output = sama5d2_piobu_direction_output;
piobu->chip.get = sama5d2_piobu_get;
- piobu->chip.set = sama5d2_piobu_set;
+ piobu->chip.set_rv = sama5d2_piobu_set;
piobu->chip.base = -1;
piobu->chip.ngpio = PIOBU_NUM;
piobu->chip.can_sleep = 0;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index ff0341b1222f..833ffdd98d74 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -117,7 +117,7 @@ static int sch_gpio_get(struct gpio_chip *gc, unsigned int gpio_num)
return sch_gpio_reg_get(sch, gpio_num, GLV);
}
-static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
+static int sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
{
struct sch_gpio *sch = gpiochip_get_data(gc);
unsigned long flags;
@@ -125,6 +125,8 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned int gpio_num, int val)
spin_lock_irqsave(&sch->lock, flags);
sch_gpio_reg_set(sch, gpio_num, GLV, val);
spin_unlock_irqrestore(&sch->lock, flags);
+
+ return 0;
}
static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
@@ -146,8 +148,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned int gpio_num,
* But we cannot prevent a short low pulse if direction is set to high
* and an external pull-up is connected.
*/
- sch_gpio_set(gc, gpio_num, val);
- return 0;
+ return sch_gpio_set(gc, gpio_num, val);
}
static int sch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio_num)
@@ -166,7 +167,7 @@ static const struct gpio_chip sch_gpio_chip = {
.direction_input = sch_gpio_direction_in,
.get = sch_gpio_get,
.direction_output = sch_gpio_direction_out,
- .set = sch_gpio_set,
+ .set_rv = sch_gpio_set,
.get_direction = sch_gpio_get_direction,
};
diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c
index ba4fccf3cc94..44fb5fc21fb8 100644
--- a/drivers/gpio/gpio-sch311x.c
+++ b/drivers/gpio/gpio-sch311x.c
@@ -178,14 +178,16 @@ static void __sch311x_gpio_set(struct sch311x_gpio_block *block,
outb(data, block->runtime_reg + block->data_reg);
}
-static void sch311x_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int sch311x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct sch311x_gpio_block *block = gpiochip_get_data(chip);
spin_lock(&block->lock);
__sch311x_gpio_set(block, offset, value);
spin_unlock(&block->lock);
+
+ return 0;
}
static int sch311x_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
@@ -295,7 +297,7 @@ static int sch311x_gpio_probe(struct platform_device *pdev)
block->chip.get_direction = sch311x_gpio_get_direction;
block->chip.set_config = sch311x_gpio_set_config;
block->chip.get = sch311x_gpio_get;
- block->chip.set = sch311x_gpio_set;
+ block->chip.set_rv = sch311x_gpio_set;
block->chip.ngpio = 8;
block->chip.parent = &pdev->dev;
block->chip.base = sch311x_gpio_blocks[i].base;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index f638219a7c4f..9503296422fd 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -39,7 +39,7 @@
#include "dev-sync-probe.h"
#define GPIO_SIM_NGPIO_MAX 1024
-#define GPIO_SIM_PROP_MAX 4 /* Max 3 properties + sentinel. */
+#define GPIO_SIM_PROP_MAX 5 /* Max 4 properties + sentinel. */
#define GPIO_SIM_NUM_ATTRS 3 /* value, pull and sentinel */
static DEFINE_IDA(gpio_sim_ida);
@@ -629,6 +629,7 @@ struct gpio_sim_line {
unsigned int offset;
char *name;
+ bool valid;
/* There can only be one hog per line. */
struct gpio_sim_hog *hog;
@@ -744,6 +745,36 @@ gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names)
}
}
+static unsigned int gpio_sim_get_reserved_ranges_size(struct gpio_sim_bank *bank)
+{
+ struct gpio_sim_line *line;
+ unsigned int size = 0;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->valid)
+ continue;
+
+ size += 2;
+ }
+
+ return size;
+}
+
+static void gpio_sim_set_reserved_ranges(struct gpio_sim_bank *bank,
+ u32 *ranges)
+{
+ struct gpio_sim_line *line;
+ int i = 0;
+
+ list_for_each_entry(line, &bank->line_list, siblings) {
+ if (line->valid)
+ continue;
+
+ ranges[i++] = line->offset;
+ ranges[i++] = 1;
+ }
+}
+
static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
{
struct gpiod_hog *hog;
@@ -844,9 +875,10 @@ static struct fwnode_handle *
gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
struct fwnode_handle *parent)
{
+ unsigned int prop_idx = 0, line_names_size, ranges_size;
struct property_entry properties[GPIO_SIM_PROP_MAX];
- unsigned int prop_idx = 0, line_names_size;
char **line_names __free(kfree) = NULL;
+ u32 *ranges __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
@@ -870,6 +902,19 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
line_names, line_names_size);
}
+ ranges_size = gpio_sim_get_reserved_ranges_size(bank);
+ if (ranges_size) {
+ ranges = kcalloc(ranges_size, sizeof(u32), GFP_KERNEL);
+ if (!ranges)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_sim_set_reserved_ranges(bank, ranges);
+
+ properties[prop_idx++] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ "gpio-reserved-ranges",
+ ranges, ranges_size);
+ }
+
return fwnode_create_software_node(properties, parent);
}
@@ -1189,8 +1234,41 @@ static ssize_t gpio_sim_line_config_name_store(struct config_item *item,
CONFIGFS_ATTR(gpio_sim_line_config_, name);
+static ssize_t
+gpio_sim_line_config_valid_show(struct config_item *item, char *page)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+
+ guard(mutex)(&dev->lock);
+
+ return sprintf(page, "%c\n", line->valid ? '1' : '0');
+}
+
+static ssize_t gpio_sim_line_config_valid_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct gpio_sim_line *line = to_gpio_sim_line(item);
+ struct gpio_sim_device *dev = gpio_sim_line_get_device(line);
+ bool valid;
+ int ret;
+
+ ret = kstrtobool(page, &valid);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+
+ line->valid = valid;
+
+ return count;
+}
+
+CONFIGFS_ATTR(gpio_sim_line_config_, valid);
+
static struct configfs_attribute *gpio_sim_line_config_attrs[] = {
&gpio_sim_line_config_attr_name,
+ &gpio_sim_line_config_attr_valid,
NULL
};
@@ -1399,6 +1477,7 @@ gpio_sim_bank_config_make_line_group(struct config_group *group,
line->parent = bank;
line->offset = offset;
+ line->valid = true;
list_add_tail(&line->siblings, &bank->line_list);
return &line->group;
diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c
index 051bc99bdfb2..95355dda621b 100644
--- a/drivers/gpio/gpio-siox.c
+++ b/drivers/gpio/gpio-siox.c
@@ -160,8 +160,8 @@ static int gpio_siox_get(struct gpio_chip *chip, unsigned int offset)
return ret;
}
-static void gpio_siox_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int gpio_siox_set(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
struct gpio_siox_ddata *ddata = gpiochip_get_data(chip);
u8 mask = 1 << (19 - offset);
@@ -174,6 +174,8 @@ static void gpio_siox_set(struct gpio_chip *chip,
ddata->setdata[0] &= ~mask;
mutex_unlock(&ddata->lock);
+
+ return 0;
}
static int gpio_siox_direction_input(struct gpio_chip *chip,
@@ -191,8 +193,7 @@ static int gpio_siox_direction_output(struct gpio_chip *chip,
if (offset < 12)
return -EINVAL;
- gpio_siox_set(chip, offset, value);
- return 0;
+ return gpio_siox_set(chip, offset, value);
}
static int gpio_siox_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -236,7 +237,7 @@ static int gpio_siox_probe(struct siox_device *sdevice)
gc->parent = dev;
gc->owner = THIS_MODULE;
gc->get = gpio_siox_get;
- gc->set = gpio_siox_set;
+ gc->set_rv = gpio_siox_set;
gc->direction_input = gpio_siox_direction_input;
gc->direction_output = gpio_siox_direction_output;
gc->get_direction = gpio_siox_get_direction;
diff --git a/drivers/gpio/gpio-sloppy-logic-analyzer.c b/drivers/gpio/gpio-sloppy-logic-analyzer.c
index 8cf3b171c599..969dddd3d6fa 100644
--- a/drivers/gpio/gpio-sloppy-logic-analyzer.c
+++ b/drivers/gpio/gpio-sloppy-logic-analyzer.c
@@ -306,7 +306,7 @@ static void gpio_la_poll_remove(struct platform_device *pdev)
}
static const struct of_device_id gpio_la_poll_of_match[] = {
- { .compatible = GPIO_LA_NAME },
+ { .compatible = "gpio-sloppy-logic-analyzer" },
{ }
};
MODULE_DEVICE_TABLE(of, gpio_la_poll_of_match);
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 6a3c4c625138..abd13c79ace0 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -169,8 +169,8 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- sd->id = irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), SDV_NUM_PUB_GPIOS,
- sd->irq_base, 0, &irq_domain_sdv_ops, sd);
+ sd->id = irq_domain_create_legacy(dev_fwnode(&pdev->dev), SDV_NUM_PUB_GPIOS, sd->irq_base,
+ 0, &irq_domain_sdv_ops, sd);
if (!sd->id)
return -ENODEV;
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 51539185400d..55f0e8afa291 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -51,13 +51,8 @@ struct spear_spics {
struct gpio_chip chip;
};
-/* gpio framework specific routines */
-static int spics_get_value(struct gpio_chip *chip, unsigned offset)
-{
- return -ENXIO;
-}
-
-static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
+static int spics_set_value(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct spear_spics *spics = gpiochip_get_data(chip);
u32 tmp;
@@ -74,18 +69,14 @@ static void spics_set_value(struct gpio_chip *chip, unsigned offset, int value)
tmp &= ~(0x1 << spics->cs_value_bit);
tmp |= value << spics->cs_value_bit;
writel_relaxed(tmp, spics->base + spics->perip_cfg);
-}
-static int spics_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- return -ENXIO;
+ return 0;
}
static int spics_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
- spics_set_value(chip, offset, value);
- return 0;
+ return spics_set_value(chip, offset, value);
}
static int spics_request(struct gpio_chip *chip, unsigned offset)
@@ -148,10 +139,8 @@ static int spics_gpio_probe(struct platform_device *pdev)
spics->chip.base = -1;
spics->chip.request = spics_request;
spics->chip.free = spics_free;
- spics->chip.direction_input = spics_direction_input;
spics->chip.direction_output = spics_direction_output;
- spics->chip.get = spics_get_value;
- spics->chip.set = spics_set_value;
+ spics->chip.set_rv = spics_set_value;
spics->chip.label = dev_name(&pdev->dev);
spics->chip.parent = &pdev->dev;
spics->chip.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c
index c117c11bfb29..bbd5bf51c088 100644
--- a/drivers/gpio/gpio-sprd.c
+++ b/drivers/gpio/gpio-sprd.c
@@ -108,10 +108,12 @@ static int sprd_gpio_get(struct gpio_chip *chip, unsigned int offset)
return sprd_gpio_read(chip, offset, SPRD_GPIO_DATA);
}
-static void sprd_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int sprd_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
sprd_gpio_update(chip, offset, SPRD_GPIO_DATA, value);
+
+ return 0;
}
static void sprd_gpio_irq_mask(struct irq_data *data)
@@ -243,7 +245,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
sprd_gpio->chip.request = sprd_gpio_request;
sprd_gpio->chip.free = sprd_gpio_free;
sprd_gpio->chip.get = sprd_gpio_get;
- sprd_gpio->chip.set = sprd_gpio_set;
+ sprd_gpio->chip.set_rv = sprd_gpio_set;
sprd_gpio->chip.direction_input = sprd_gpio_direction_input;
sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index dce8ff322e47..0a270156e0be 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -54,7 +54,7 @@ static int stmpe_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(ret & mask);
}
-static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int stmpe_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(chip);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -67,9 +67,9 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
* For them we need to write 0 to clear and 1 to set.
*/
if (stmpe->regs[STMPE_IDX_GPSR_LSB] == stmpe->regs[STMPE_IDX_GPCR_LSB])
- stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
- else
- stmpe_reg_write(stmpe, reg, mask);
+ return stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
+
+ return stmpe_reg_write(stmpe, reg, mask);
}
static int stmpe_gpio_get_direction(struct gpio_chip *chip,
@@ -98,8 +98,11 @@ static int stmpe_gpio_direction_output(struct gpio_chip *chip,
struct stmpe *stmpe = stmpe_gpio->stmpe;
u8 reg = stmpe->regs[STMPE_IDX_GPDR_LSB + (offset / 8)];
u8 mask = BIT(offset % 8);
+ int ret;
- stmpe_gpio_set(chip, offset, val);
+ ret = stmpe_gpio_set(chip, offset, val);
+ if (ret)
+ return ret;
return stmpe_set_bits(stmpe, reg, mask, mask);
}
@@ -133,7 +136,7 @@ static const struct gpio_chip template_chip = {
.direction_input = stmpe_gpio_direction_input,
.get = stmpe_gpio_get,
.direction_output = stmpe_gpio_direction_output,
- .set = stmpe_gpio_set,
+ .set_rv = stmpe_gpio_set,
.request = stmpe_gpio_request,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
index 5a6406d1f03a..fdda8de6ca36 100644
--- a/drivers/gpio/gpio-stp-xway.c
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -113,7 +113,7 @@ static int xway_stp_get(struct gpio_chip *gc, unsigned int gpio)
*
* Set the shadow value and call ltq_ebu_apply.
*/
-static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+static int xway_stp_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct xway_stp *chip = gpiochip_get_data(gc);
@@ -124,6 +124,8 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
if (!chip->reserved)
xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+
+ return 0;
}
/**
@@ -136,9 +138,7 @@ static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
*/
static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
{
- xway_stp_set(gc, gpio, val);
-
- return 0;
+ return xway_stp_set(gc, gpio, val);
}
/**
@@ -249,7 +249,7 @@ static int xway_stp_probe(struct platform_device *pdev)
chip->gc.label = "stp-xway";
chip->gc.direction_output = xway_stp_dir_out;
chip->gc.get = xway_stp_get;
- chip->gc.set = xway_stp_set;
+ chip->gc.set_rv = xway_stp_set;
chip->gc.request = xway_stp_request;
chip->gc.base = -1;
chip->gc.owner = THIS_MODULE;
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c
index 5ab394ec81e6..f86f78655c24 100644
--- a/drivers/gpio/gpio-syscon.c
+++ b/drivers/gpio/gpio-syscon.c
@@ -40,8 +40,8 @@ struct syscon_gpio_data {
unsigned int bit_count;
unsigned int dat_bit_offset;
unsigned int dir_bit_offset;
- void (*set)(struct gpio_chip *chip,
- unsigned offset, int value);
+ int (*set)(struct gpio_chip *chip, unsigned int offset,
+ int value);
};
struct syscon_gpio_priv {
@@ -68,17 +68,17 @@ static int syscon_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(val & BIT(offs % SYSCON_REG_BITS));
}
-static void syscon_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int syscon_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
- regmap_update_bits(priv->syscon,
- (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
- BIT(offs % SYSCON_REG_BITS),
- val ? BIT(offs % SYSCON_REG_BITS) : 0);
+ return regmap_update_bits(priv->syscon,
+ (offs / SYSCON_REG_BITS) * SYSCON_REG_SIZE,
+ BIT(offs % SYSCON_REG_BITS),
+ val ? BIT(offs % SYSCON_REG_BITS) : 0);
}
static int syscon_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
@@ -115,9 +115,7 @@ static int syscon_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS));
}
- chip->set(chip, offset, val);
-
- return 0;
+ return chip->set_rv(chip, offset, val);
}
static const struct syscon_gpio_data clps711x_mctrl_gpio = {
@@ -127,8 +125,8 @@ static const struct syscon_gpio_data clps711x_mctrl_gpio = {
.dat_bit_offset = 0x40 * 8 + 8,
};
-static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
@@ -144,6 +142,8 @@ static void rockchip_gpio_set(struct gpio_chip *chip, unsigned int offset,
data);
if (ret < 0)
dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+
+ return ret;
}
static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
@@ -156,7 +156,8 @@ static const struct syscon_gpio_data rockchip_rk3328_gpio_mute = {
#define KEYSTONE_LOCK_BIT BIT(0)
-static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int keystone_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct syscon_gpio_priv *priv = gpiochip_get_data(chip);
unsigned int offs;
@@ -165,7 +166,7 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
offs = priv->dreg_offset + priv->data->dat_bit_offset + offset;
if (!val)
- return;
+ return 0;
ret = regmap_update_bits(
priv->syscon,
@@ -174,6 +175,8 @@ static void keystone_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
BIT(offs % SYSCON_REG_BITS) | KEYSTONE_LOCK_BIT);
if (ret < 0)
dev_err(chip->parent, "gpio write failed ret(%d)\n", ret);
+
+ return ret;
}
static const struct syscon_gpio_data keystone_dsp_gpio = {
@@ -248,7 +251,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
if (priv->data->flags & GPIO_SYSCON_FEAT_IN)
priv->chip.direction_input = syscon_gpio_dir_in;
if (priv->data->flags & GPIO_SYSCON_FEAT_OUT) {
- priv->chip.set = priv->data->set ? : syscon_gpio_set;
+ priv->chip.set_rv = priv->data->set ? : syscon_gpio_set;
priv->chip.direction_output = syscon_gpio_dir_out;
}
diff --git a/drivers/gpio/gpio-tangier.c b/drivers/gpio/gpio-tangier.c
index a415e6d36173..ce17b98e0623 100644
--- a/drivers/gpio/gpio-tangier.c
+++ b/drivers/gpio/gpio-tangier.c
@@ -90,7 +90,7 @@ static int tng_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(readl(gplr) & BIT(shift));
}
-static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct tng_gpio *priv = gpiochip_get_data(chip);
void __iomem *reg;
@@ -101,6 +101,8 @@ static void tng_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
guard(raw_spinlock_irqsave)(&priv->lock);
writel(BIT(shift), reg);
+
+ return 0;
}
static int tng_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
@@ -428,7 +430,7 @@ int devm_tng_gpio_probe(struct device *dev, struct tng_gpio *gpio)
gpio->chip.direction_input = tng_gpio_direction_input;
gpio->chip.direction_output = tng_gpio_direction_output;
gpio->chip.get = tng_gpio_get;
- gpio->chip.set = tng_gpio_set;
+ gpio->chip.set_rv = tng_gpio_set;
gpio->chip.get_direction = tng_gpio_get_direction;
gpio->chip.set_config = tng_gpio_set_config;
gpio->chip.base = info->base;
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 8cf676fd0a0b..1869ee7f9423 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -183,9 +183,8 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_create_linear(of_fwnode_handle(np),
- tb10x_gpio->gc.ngpio,
- &irq_generic_chip_ops, NULL);
+ tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev), tb10x_gpio->gc.ngpio,
+ &irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
return -ENOMEM;
}
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c
index e62ee7e56908..0bd32809fd68 100644
--- a/drivers/gpio/gpio-tc3589x.c
+++ b/drivers/gpio/gpio-tc3589x.c
@@ -49,7 +49,7 @@ static int tc3589x_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(ret & mask);
}
-static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
+static int tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int val)
{
struct tc3589x_gpio *tc3589x_gpio = gpiochip_get_data(chip);
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
@@ -57,7 +57,7 @@ static void tc3589x_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
unsigned int pos = offset % 8;
u8 data[] = {val ? BIT(pos) : 0, BIT(pos)};
- tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
+ return tc3589x_block_write(tc3589x, reg, ARRAY_SIZE(data), data);
}
static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
@@ -67,8 +67,11 @@ static int tc3589x_gpio_direction_output(struct gpio_chip *chip,
struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
u8 reg = TC3589x_GPIODIR0 + offset / 8;
unsigned int pos = offset % 8;
+ int ret;
- tc3589x_gpio_set(chip, offset, val);
+ ret = tc3589x_gpio_set(chip, offset, val);
+ if (ret)
+ return ret;
return tc3589x_set_bits(tc3589x, reg, BIT(pos), BIT(pos));
}
@@ -146,7 +149,7 @@ static const struct gpio_chip template_chip = {
.label = "tc3589x",
.owner = THIS_MODULE,
.get = tc3589x_gpio_get,
- .set = tc3589x_gpio_set,
+ .set_rv = tc3589x_gpio_set,
.direction_output = tc3589x_gpio_direction_output,
.direction_input = tc3589x_gpio_direction_input,
.get_direction = tc3589x_gpio_get_direction,
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9ad286adf263..126fd12550aa 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -146,12 +146,14 @@ static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
tegra_gpio_disable(tgi, offset);
}
-static void tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int tegra_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
tegra_gpio_mask_write(tgi, GPIO_MSK_OUT(tgi, offset), offset, value);
+
+ return 0;
}
static int tegra_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -718,7 +720,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tgi->gc.direction_input = tegra_gpio_direction_input;
tgi->gc.get = tegra_gpio_get;
tgi->gc.direction_output = tegra_gpio_direction_output;
- tgi->gc.set = tegra_gpio_set;
+ tgi->gc.set_rv = tegra_gpio_set;
tgi->gc.get_direction = tegra_gpio_get_direction;
tgi->gc.base = 0;
tgi->gc.ngpio = tgi->bank_count * 32;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index d27bfac6c9f5..f902da15c419 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -202,6 +202,28 @@ static int tegra186_init_valid_mask(struct gpio_chip *chip,
return 0;
}
+static int tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int level)
+{
+ struct tegra_gpio *gpio = gpiochip_get_data(chip);
+ void __iomem *base;
+ u32 value;
+
+ base = tegra186_gpio_get_base(gpio, offset);
+ if (WARN_ON(base == NULL))
+ return -ENODEV;
+
+ value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
+ if (level == 0)
+ value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+ else
+ value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
+
+ writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
+
+ return 0;
+}
+
static int tegra186_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
@@ -249,9 +271,12 @@ static int tegra186_gpio_direction_output(struct gpio_chip *chip,
struct tegra_gpio *gpio = gpiochip_get_data(chip);
void __iomem *base;
u32 value;
+ int ret;
/* configure output level first */
- chip->set(chip, offset, level);
+ ret = tegra186_gpio_set(chip, offset, level);
+ if (ret)
+ return ret;
base = tegra186_gpio_get_base(gpio, offset);
if (WARN_ON(base == NULL))
@@ -359,26 +384,6 @@ static int tegra186_gpio_get(struct gpio_chip *chip, unsigned int offset)
return value & BIT(0);
}
-static void tegra186_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int level)
-{
- struct tegra_gpio *gpio = gpiochip_get_data(chip);
- void __iomem *base;
- u32 value;
-
- base = tegra186_gpio_get_base(gpio, offset);
- if (WARN_ON(base == NULL))
- return;
-
- value = readl(base + TEGRA186_GPIO_OUTPUT_VALUE);
- if (level == 0)
- value &= ~TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
- else
- value |= TEGRA186_GPIO_OUTPUT_VALUE_HIGH;
-
- writel(value, base + TEGRA186_GPIO_OUTPUT_VALUE);
-}
-
static int tegra186_gpio_set_config(struct gpio_chip *chip,
unsigned int offset,
unsigned long config)
@@ -886,7 +891,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->gpio.direction_input = tegra186_gpio_direction_input;
gpio->gpio.direction_output = tegra186_gpio_direction_output;
gpio->gpio.get = tegra186_gpio_get;
- gpio->gpio.set = tegra186_gpio_set;
+ gpio->gpio.set_rv = tegra186_gpio_set;
gpio->gpio.set_config = tegra186_gpio_set_config;
gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges;
gpio->gpio.init_valid_mask = tegra186_init_valid_mask;
diff --git a/drivers/gpio/gpio-thunderx.c b/drivers/gpio/gpio-thunderx.c
index 5b851e904c11..eb6a1f0279c0 100644
--- a/drivers/gpio/gpio-thunderx.c
+++ b/drivers/gpio/gpio-thunderx.c
@@ -116,8 +116,8 @@ static int thunderx_gpio_dir_in(struct gpio_chip *chip, unsigned int line)
return 0;
}
-static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
- int value)
+static int thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
+ int value)
{
struct thunderx_gpio *txgpio = gpiochip_get_data(chip);
int bank = line / 64;
@@ -127,6 +127,8 @@ static void thunderx_gpio_set(struct gpio_chip *chip, unsigned int line,
(bank * GPIO_2ND_BANK) + (value ? GPIO_TX_SET : GPIO_TX_CLR);
writeq(BIT_ULL(bank_bit), reg);
+
+ return 0;
}
static int thunderx_gpio_dir_out(struct gpio_chip *chip, unsigned int line,
@@ -269,9 +271,9 @@ static int thunderx_gpio_get(struct gpio_chip *chip, unsigned int line)
return masked_bits != 0;
}
-static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask,
- unsigned long *bits)
+static int thunderx_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
{
int bank;
u64 set_bits, clear_bits;
@@ -283,6 +285,8 @@ static void thunderx_gpio_set_multiple(struct gpio_chip *chip,
writeq(set_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_SET);
writeq(clear_bits, txgpio->register_base + (bank * GPIO_2ND_BANK) + GPIO_TX_CLR);
}
+
+ return 0;
}
static void thunderx_gpio_irq_ack(struct irq_data *d)
@@ -529,8 +533,8 @@ static int thunderx_gpio_probe(struct pci_dev *pdev,
chip->direction_input = thunderx_gpio_dir_in;
chip->get = thunderx_gpio_get;
chip->direction_output = thunderx_gpio_dir_out;
- chip->set = thunderx_gpio_set;
- chip->set_multiple = thunderx_gpio_set_multiple;
+ chip->set_rv = thunderx_gpio_set;
+ chip->set_multiple_rv = thunderx_gpio_set_multiple;
chip->set_config = thunderx_gpio_set_config;
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &thunderx_gpio_irq_chip);
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index cb303a26f4d3..fbb883089189 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -80,10 +80,9 @@ static int timbgpio_gpio_direction_output(struct gpio_chip *gpio,
return timbgpio_update_bit(gpio, nr, TGPIODIR, false);
}
-static void timbgpio_gpio_set(struct gpio_chip *gpio,
- unsigned nr, int val)
+static int timbgpio_gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
- timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
+ return timbgpio_update_bit(gpio, nr, TGPIOVAL, val != 0);
}
static int timbgpio_to_irq(struct gpio_chip *gpio, unsigned offset)
@@ -254,7 +253,7 @@ static int timbgpio_probe(struct platform_device *pdev)
gc->direction_input = timbgpio_gpio_direction_input;
gc->get = timbgpio_gpio_get;
gc->direction_output = timbgpio_gpio_direction_output;
- gc->set = timbgpio_gpio_set;
+ gc->set_rv = timbgpio_gpio_set;
gc->to_irq = (irq >= 0 && tgpio->irq_base > 0) ? timbgpio_to_irq : NULL;
gc->dbg_show = NULL;
gc->base = pdata->gpio_base;
diff --git a/drivers/gpio/gpio-tpic2810.c b/drivers/gpio/gpio-tpic2810.c
index effb7b8ff81f..d5b8568ab061 100644
--- a/drivers/gpio/gpio-tpic2810.c
+++ b/drivers/gpio/gpio-tpic2810.c
@@ -25,7 +25,7 @@ struct tpic2810 {
struct mutex lock;
};
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value);
+static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value);
static int tpic2810_get_direction(struct gpio_chip *chip,
unsigned offset)
@@ -34,19 +34,11 @@ static int tpic2810_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
-static int tpic2810_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- /* This device is output only */
- return -EINVAL;
-}
-
static int tpic2810_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
/* This device always output */
- tpic2810_set(chip, offset, value);
- return 0;
+ return tpic2810_set(chip, offset, value);
}
static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
@@ -68,25 +60,28 @@ static void tpic2810_set_mask_bits(struct gpio_chip *chip, u8 mask, u8 bits)
mutex_unlock(&gpio->lock);
}
-static void tpic2810_set(struct gpio_chip *chip, unsigned offset, int value)
+static int tpic2810_set(struct gpio_chip *chip, unsigned int offset, int value)
{
tpic2810_set_mask_bits(chip, BIT(offset), value ? BIT(offset) : 0);
+
+ return 0;
}
-static void tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
+static int tpic2810_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
tpic2810_set_mask_bits(chip, *mask, *bits);
+
+ return 0;
}
static const struct gpio_chip template_chip = {
.label = "tpic2810",
.owner = THIS_MODULE,
.get_direction = tpic2810_get_direction,
- .direction_input = tpic2810_direction_input,
.direction_output = tpic2810_direction_output,
- .set = tpic2810_set,
- .set_multiple = tpic2810_set_multiple,
+ .set_rv = tpic2810_set,
+ .set_multiple_rv = tpic2810_set_multiple,
.base = -1,
.ngpio = 8,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65086.c b/drivers/gpio/gpio-tps65086.c
index 8f5827554e1e..08fa061b73ef 100644
--- a/drivers/gpio/gpio-tps65086.c
+++ b/drivers/gpio/gpio-tps65086.c
@@ -37,10 +37,8 @@ static int tps65086_gpio_direction_output(struct gpio_chip *chip,
struct tps65086_gpio *gpio = gpiochip_get_data(chip);
/* Set the initial value */
- regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
- BIT(4 + offset), value ? BIT(4 + offset) : 0);
-
- return 0;
+ return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -55,13 +53,13 @@ static int tps65086_gpio_get(struct gpio_chip *chip, unsigned offset)
return val & BIT(4 + offset);
}
-static void tps65086_gpio_set(struct gpio_chip *chip, unsigned offset,
- int value)
+static int tps65086_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tps65086_gpio *gpio = gpiochip_get_data(chip);
- regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
- BIT(4 + offset), value ? BIT(4 + offset) : 0);
+ return regmap_update_bits(gpio->tps->regmap, TPS65086_GPOCTRL,
+ BIT(4 + offset), value ? BIT(4 + offset) : 0);
}
static const struct gpio_chip template_chip = {
@@ -71,7 +69,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65086_gpio_direction_input,
.direction_output = tps65086_gpio_direction_output,
.get = tps65086_gpio_get,
- .set = tps65086_gpio_set,
+ .set_rv = tps65086_gpio_set,
.base = -1,
.ngpio = 4,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps65218.c b/drivers/gpio/gpio-tps65218.c
index d7d9d50dcddf..49cd7754ed05 100644
--- a/drivers/gpio/gpio-tps65218.c
+++ b/drivers/gpio/gpio-tps65218.c
@@ -34,34 +34,28 @@ static int tps65218_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & (TPS65218_ENABLE2_GPIO1 << offset));
}
-static void tps65218_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65218_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65218_gpio *tps65218_gpio = gpiochip_get_data(gc);
struct tps65218 *tps65218 = tps65218_gpio->tps65218;
if (value)
- tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_PROTECT_L1);
- else
- tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
- TPS65218_ENABLE2_GPIO1 << offset,
- TPS65218_PROTECT_L1);
+ return tps65218_set_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
+
+ return tps65218_clear_bits(tps65218, TPS65218_REG_ENABLE2,
+ TPS65218_ENABLE2_GPIO1 << offset,
+ TPS65218_PROTECT_L1);
}
static int tps65218_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
/* Only drives GPOs */
- tps65218_gpio_set(gc, offset, value);
- return 0;
-}
-
-static int tps65218_gpio_input(struct gpio_chip *gc, unsigned offset)
-{
- return -EPERM;
+ return tps65218_gpio_set(gc, offset, value);
}
static int tps65218_gpio_request(struct gpio_chip *gc, unsigned offset)
@@ -174,9 +168,8 @@ static const struct gpio_chip template_chip = {
.owner = THIS_MODULE,
.request = tps65218_gpio_request,
.direction_output = tps65218_gpio_output,
- .direction_input = tps65218_gpio_input,
.get = tps65218_gpio_get,
- .set = tps65218_gpio_set,
+ .set_rv = tps65218_gpio_set,
.set_config = tps65218_gpio_set_config,
.can_sleep = true,
.ngpio = 3,
diff --git a/drivers/gpio/gpio-tps65219.c b/drivers/gpio/gpio-tps65219.c
index 526640c39a11..c0177088c54c 100644
--- a/drivers/gpio/gpio-tps65219.c
+++ b/drivers/gpio/gpio-tps65219.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * GPIO driver for TI TPS65219 PMICs
+ * GPIO driver for TI TPS65214/TPS65215/TPS65219 PMICs
*
- * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2022, 2025 Texas Instruments Incorporated - http://www.ti.com/
*/
#include <linux/bits.h>
@@ -13,20 +13,48 @@
#include <linux/regmap.h>
#define TPS65219_GPIO0_DIR_MASK BIT(3)
-#define TPS65219_GPIO0_OFFSET 2
-#define TPS65219_GPIO0_IDX 0
+#define TPS65214_GPIO0_DIR_MASK BIT(1)
+#define TPS6521X_GPIO0_OFFSET 2
+#define TPS6521X_GPIO0_IDX 0
+
+/*
+ * TPS65214 GPIO mapping
+ * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2
+ * Linux gpio offset 1 -> GPO1 (pin9 ) -> bit_offset 0
+ *
+ * TPS65215 & TPS65219 GPIO mapping
+ * Linux gpio offset 0 -> GPIO (pin16) -> bit_offset 2
+ * Linux gpio offset 1 -> GPO1 (pin8 ) -> bit_offset 0
+ * Linux gpio offset 2 -> GPO2 (pin17) -> bit_offset 1
+ */
struct tps65219_gpio {
+ int (*change_dir)(struct gpio_chip *gc, unsigned int offset, unsigned int dir);
struct gpio_chip gpio_chip;
struct tps65219 *tps;
};
+static int tps65214_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+ int ret, val;
+
+ if (offset != TPS6521X_GPIO0_IDX)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ ret = regmap_read(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ return !(val & TPS65214_GPIO0_DIR_MASK);
+}
+
static int tps65219_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
int ret, val;
- if (offset != TPS65219_GPIO0_IDX)
+ if (offset != TPS6521X_GPIO0_IDX)
return GPIO_LINE_DIRECTION_OUT;
ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val);
@@ -42,7 +70,7 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
struct device *dev = gpio->tps->dev;
int ret, val;
- if (offset != TPS65219_GPIO0_IDX) {
+ if (offset != TPS6521X_GPIO0_IDX) {
dev_err(dev, "GPIO%d is output only, cannot get\n", offset);
return -ENOTSUPP;
}
@@ -65,19 +93,18 @@ static int tps65219_gpio_get(struct gpio_chip *gc, unsigned int offset)
return ret;
}
-static void tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int tps65219_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
- struct device *dev = gpio->tps->dev;
int v, mask, bit;
- bit = (offset == TPS65219_GPIO0_IDX) ? TPS65219_GPIO0_OFFSET : offset - 1;
+ bit = (offset == TPS6521X_GPIO0_IDX) ? TPS6521X_GPIO0_OFFSET : offset - 1;
mask = BIT(bit);
v = value ? mask : 0;
- if (regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG, mask, v))
- dev_err(dev, "GPIO%d, set to value %d failed.\n", offset, value);
+ return regmap_update_bits(gpio->tps->regmap,
+ TPS65219_REG_GENERAL_CONFIG, mask, v);
}
static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int offset,
@@ -112,12 +139,39 @@ static int tps65219_gpio_change_direction(struct gpio_chip *gc, unsigned int off
return -ENOTSUPP;
}
+static int tps65214_gpio_change_direction(struct gpio_chip *gc, unsigned int offset,
+ unsigned int direction)
+{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = gpio->tps->dev;
+ int val, ret;
+
+ /**
+ * Verified if GPIO or GPO in parent function
+ * Masked value: 0 = GPIO, 1 = VSEL
+ */
+ ret = regmap_read(gpio->tps->regmap, TPS65219_REG_MFP_1_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ ret = !!(val & BIT(TPS65219_GPIO0_DIR_MASK));
+ if (ret)
+ dev_err(dev, "GPIO%d configured as VSEL, not GPIO\n", offset);
+
+ ret = regmap_update_bits(gpio->tps->regmap, TPS65219_REG_GENERAL_CONFIG,
+ TPS65214_GPIO0_DIR_MASK, direction);
+ if (ret)
+ dev_err(dev, "Fail to change direction to %u for GPIO%d.\n", direction, offset);
+
+ return ret;
+}
+
static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
{
struct tps65219_gpio *gpio = gpiochip_get_data(gc);
struct device *dev = gpio->tps->dev;
- if (offset != TPS65219_GPIO0_IDX) {
+ if (offset != TPS6521X_GPIO0_IDX) {
dev_err(dev, "GPIO%d is output only, cannot change to input\n", offset);
return -ENOTSUPP;
}
@@ -125,21 +179,36 @@ static int tps65219_gpio_direction_input(struct gpio_chip *gc, unsigned int offs
if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_IN)
return 0;
- return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_IN);
+ return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_IN);
}
static int tps65219_gpio_direction_output(struct gpio_chip *gc, unsigned int offset, int value)
{
+ struct tps65219_gpio *gpio = gpiochip_get_data(gc);
+
tps65219_gpio_set(gc, offset, value);
- if (offset != TPS65219_GPIO0_IDX)
+ if (offset != TPS6521X_GPIO0_IDX)
return 0;
if (tps65219_gpio_get_direction(gc, offset) == GPIO_LINE_DIRECTION_OUT)
return 0;
- return tps65219_gpio_change_direction(gc, offset, GPIO_LINE_DIRECTION_OUT);
+ return gpio->change_dir(gc, offset, GPIO_LINE_DIRECTION_OUT);
}
+static const struct gpio_chip tps65214_template_chip = {
+ .label = "tps65214-gpio",
+ .owner = THIS_MODULE,
+ .get_direction = tps65214_gpio_get_direction,
+ .direction_input = tps65219_gpio_direction_input,
+ .direction_output = tps65219_gpio_direction_output,
+ .get = tps65219_gpio_get,
+ .set_rv = tps65219_gpio_set,
+ .base = -1,
+ .ngpio = 2,
+ .can_sleep = true,
+};
+
static const struct gpio_chip tps65219_template_chip = {
.label = "tps65219-gpio",
.owner = THIS_MODULE,
@@ -147,7 +216,7 @@ static const struct gpio_chip tps65219_template_chip = {
.direction_input = tps65219_gpio_direction_input,
.direction_output = tps65219_gpio_direction_output,
.get = tps65219_gpio_get,
- .set = tps65219_gpio_set,
+ .set_rv = tps65219_gpio_set,
.base = -1,
.ngpio = 3,
.can_sleep = true,
@@ -155,6 +224,7 @@ static const struct gpio_chip tps65219_template_chip = {
static int tps65219_gpio_probe(struct platform_device *pdev)
{
+ enum pmic_id chip = platform_get_device_id(pdev)->driver_data;
struct tps65219 *tps = dev_get_drvdata(pdev->dev.parent);
struct tps65219_gpio *gpio;
@@ -162,22 +232,38 @@ static int tps65219_gpio_probe(struct platform_device *pdev)
if (!gpio)
return -ENOMEM;
+ if (chip == TPS65214) {
+ gpio->gpio_chip = tps65214_template_chip;
+ gpio->change_dir = tps65214_gpio_change_direction;
+ } else if (chip == TPS65219) {
+ gpio->gpio_chip = tps65219_template_chip;
+ gpio->change_dir = tps65219_gpio_change_direction;
+ } else {
+ return -ENODATA;
+ }
+
gpio->tps = tps;
- gpio->gpio_chip = tps65219_template_chip;
gpio->gpio_chip.parent = tps->dev;
return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio_chip, gpio);
}
+static const struct platform_device_id tps6521x_gpio_id_table[] = {
+ { "tps65214-gpio", TPS65214 },
+ { "tps65219-gpio", TPS65219 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6521x_gpio_id_table);
+
static struct platform_driver tps65219_gpio_driver = {
.driver = {
.name = "tps65219-gpio",
},
.probe = tps65219_gpio_probe,
+ .id_table = tps6521x_gpio_id_table,
};
module_platform_driver(tps65219_gpio_driver);
-MODULE_ALIAS("platform:tps65219-gpio");
MODULE_AUTHOR("Jonathan Cormier <jcormier@criticallink.com>");
-MODULE_DESCRIPTION("TPS65219 GPIO driver");
+MODULE_DESCRIPTION("TPS65214/TPS65215/TPS65219 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c
index d277aa951143..f1ced092f38a 100644
--- a/drivers/gpio/gpio-tps6586x.c
+++ b/drivers/gpio/gpio-tps6586x.c
@@ -40,13 +40,13 @@ static int tps6586x_gpio_get(struct gpio_chip *gc, unsigned offset)
return !!(val & (1 << offset));
}
-static void tps6586x_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps6586x_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc);
- tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
- value << offset, 1 << offset);
+ return tps6586x_update(tps6586x_gpio->parent, TPS6586X_GPIOSET2,
+ value << offset, 1 << offset);
}
static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -54,8 +54,11 @@ static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
{
struct tps6586x_gpio *tps6586x_gpio = gpiochip_get_data(gc);
uint8_t val, mask;
+ int ret;
- tps6586x_gpio_set(gc, offset, value);
+ ret = tps6586x_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
val = 0x1 << (offset * 2);
mask = 0x3 << (offset * 2);
@@ -95,7 +98,7 @@ static int tps6586x_gpio_probe(struct platform_device *pdev)
/* FIXME: add handling of GPIOs as dedicated inputs */
tps6586x_gpio->gpio_chip.direction_output = tps6586x_gpio_output;
- tps6586x_gpio->gpio_chip.set = tps6586x_gpio_set;
+ tps6586x_gpio->gpio_chip.set_rv = tps6586x_gpio_set;
tps6586x_gpio->gpio_chip.get = tps6586x_gpio_get;
tps6586x_gpio->gpio_chip.to_irq = tps6586x_gpio_to_irq;
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 187d21580573..3204f55394cf 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -36,18 +36,18 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65910_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
if (value)
- regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
- GPIO_SET_MASK);
- else
- regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
- GPIO_SET_MASK);
+ return regmap_set_bits(tps65910->regmap,
+ TPS65910_GPIO0 + offset, GPIO_SET_MASK);
+
+ return regmap_clear_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
+ GPIO_SET_MASK);
}
static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
@@ -55,9 +55,12 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
{
struct tps65910_gpio *tps65910_gpio = gpiochip_get_data(gc);
struct tps65910 *tps65910 = tps65910_gpio->tps65910;
+ int ret;
/* Set the initial value */
- tps65910_gpio_set(gc, offset, value);
+ ret = tps65910_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
return regmap_set_bits(tps65910->regmap, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
@@ -136,7 +139,7 @@ static int tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.can_sleep = true;
tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
- tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
+ tps65910_gpio->gpio_chip.set_rv = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.parent = &pdev->dev;
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index fab771cb6a87..d586ccfbfc56 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -49,10 +49,13 @@ static int tps65912_gpio_direction_output(struct gpio_chip *gc,
unsigned offset, int value)
{
struct tps65912_gpio *gpio = gpiochip_get_data(gc);
+ int ret;
/* Set the initial value */
- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ ret = regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ if (ret)
+ return ret;
return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
GPIO_CFG_MASK, GPIO_CFG_MASK);
@@ -73,13 +76,13 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
return 0;
}
-static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
- int value)
+static int tps65912_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps65912_gpio *gpio = gpiochip_get_data(gc);
- regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
- GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
+ return regmap_update_bits(gpio->tps->regmap, TPS65912_GPIO1 + offset,
+ GPIO_SET_MASK, value ? GPIO_SET_MASK : 0);
}
static const struct gpio_chip template_chip = {
@@ -89,7 +92,7 @@ static const struct gpio_chip template_chip = {
.direction_input = tps65912_gpio_direction_input,
.direction_output = tps65912_gpio_direction_output,
.get = tps65912_gpio_get,
- .set = tps65912_gpio_set,
+ .set_rv = tps65912_gpio_set,
.base = -1,
.ngpio = 5,
.can_sleep = true,
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index 532deaddfd4e..3b8805c854f7 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -70,8 +70,8 @@ static int tps68470_gpio_get_direction(struct gpio_chip *gc,
GPIO_LINE_DIRECTION_IN;
}
-static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int value)
+static int tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
{
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
@@ -82,7 +82,8 @@ static void tps68470_gpio_set(struct gpio_chip *gc, unsigned int offset,
offset -= TPS68470_N_REGULAR_GPIO;
}
- regmap_update_bits(regmap, reg, BIT(offset), value ? BIT(offset) : 0);
+ return regmap_update_bits(regmap, reg, BIT(offset),
+ value ? BIT(offset) : 0);
}
static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
@@ -90,9 +91,12 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset,
{
struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc);
struct regmap *regmap = tps68470_gpio->tps68470_regmap;
+ int ret;
/* Set the initial value */
- tps68470_gpio_set(gc, offset, value);
+ ret = tps68470_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
/* rest are always outputs */
if (offset >= TPS68470_N_REGULAR_GPIO)
@@ -138,7 +142,7 @@ static int tps68470_gpio_probe(struct platform_device *pdev)
tps68470_gpio->gc.direction_output = tps68470_gpio_output;
tps68470_gpio->gc.get = tps68470_gpio_get;
tps68470_gpio->gc.get_direction = tps68470_gpio_get_direction;
- tps68470_gpio->gc.set = tps68470_gpio_set;
+ tps68470_gpio->gc.set_rv = tps68470_gpio_set;
tps68470_gpio->gc.can_sleep = true;
tps68470_gpio->gc.names = tps68470_names;
tps68470_gpio->gc.ngpio = TPS68470_N_GPIO;
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 18f523a15b3c..056799ecce6a 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -93,14 +93,16 @@ static void _tqmx86_gpio_set(struct tqmx86_gpio_data *gpio, unsigned int offset,
tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
}
-static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
guard(raw_spinlock_irqsave)(&gpio->spinlock);
_tqmx86_gpio_set(gpio, offset, value);
+
+ return 0;
}
static int tqmx86_gpio_direction_input(struct gpio_chip *chip,
@@ -368,7 +370,7 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
chip->direction_output = tqmx86_gpio_direction_output;
chip->get_direction = tqmx86_gpio_get_direction;
chip->get = tqmx86_gpio_get;
- chip->set = tqmx86_gpio_set;
+ chip->set_rv = tqmx86_gpio_set;
chip->ngpio = TQMX86_NGPIO;
chip->parent = pdev->dev.parent;
diff --git a/drivers/gpio/gpio-ts4900.c b/drivers/gpio/gpio-ts4900.c
index 5c806140fdf0..35dd2d09b4d4 100644
--- a/drivers/gpio/gpio-ts4900.c
+++ b/drivers/gpio/gpio-ts4900.c
@@ -95,16 +95,16 @@ static int ts4900_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(reg & priv->input_bit);
}
-static void ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int ts4900_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct ts4900_gpio_priv *priv = gpiochip_get_data(chip);
if (value)
- regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT,
- TS4900_GPIO_OUT);
- else
- regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
+ return regmap_update_bits(priv->regmap, offset,
+ TS4900_GPIO_OUT, TS4900_GPIO_OUT);
+
+ return regmap_update_bits(priv->regmap, offset, TS4900_GPIO_OUT, 0);
}
static const struct regmap_config ts4900_regmap_config = {
@@ -119,7 +119,7 @@ static const struct gpio_chip template_chip = {
.direction_input = ts4900_gpio_direction_input,
.direction_output = ts4900_gpio_direction_output,
.get = ts4900_gpio_get,
- .set = ts4900_gpio_set,
+ .set_rv = ts4900_gpio_set,
.base = -1,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-ts5500.c b/drivers/gpio/gpio-ts5500.c
index 61cbec5c06a7..bb432ed73698 100644
--- a/drivers/gpio/gpio-ts5500.c
+++ b/drivers/gpio/gpio-ts5500.c
@@ -244,7 +244,7 @@ static int ts5500_gpio_output(struct gpio_chip *chip, unsigned offset, int val)
return 0;
}
-static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
+static int ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct ts5500_priv *priv = gpiochip_get_data(chip);
const struct ts5500_dio line = priv->pinout[offset];
@@ -256,6 +256,8 @@ static void ts5500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
else
ts5500_clear_mask(line.value_mask, line.value_addr);
spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
}
static int ts5500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -338,7 +340,7 @@ static int ts5500_dio_probe(struct platform_device *pdev)
priv->gpio_chip.direction_input = ts5500_gpio_input;
priv->gpio_chip.direction_output = ts5500_gpio_output;
priv->gpio_chip.get = ts5500_gpio_get;
- priv->gpio_chip.set = ts5500_gpio_set;
+ priv->gpio_chip.set_rv = ts5500_gpio_set;
priv->gpio_chip.to_irq = ts5500_gpio_to_irq;
priv->gpio_chip.base = -1;
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index 0d17985a5fdc..e39e39e3ef85 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -120,7 +120,7 @@ static u8 cached_leden;
* external pullup is needed. We could also expose the integrated PWM
* as a LED brightness control; we initialize it as "always on".
*/
-static void twl4030_led_set_value(int led, int value)
+static int twl4030_led_set_value(int led, int value)
{
u8 mask = LEDEN_LEDAON | LEDEN_LEDAPWM;
@@ -132,8 +132,8 @@ static void twl4030_led_set_value(int led, int value)
else
cached_leden |= mask;
- WARN_ON_ONCE(twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
- TWL4030_LED_LEDEN_REG));
+ return twl_i2c_write_u8(TWL4030_MODULE_LED, cached_leden,
+ TWL4030_LED_LEDEN_REG);
}
static int twl4030_set_gpio_direction(int gpio, int is_input)
@@ -278,7 +278,7 @@ static void twl_free(struct gpio_chip *chip, unsigned offset)
mutex_lock(&priv->mutex);
if (offset >= TWL4030_GPIO_MAX) {
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1);
+ WARN_ON_ONCE(twl4030_led_set_value(offset - TWL4030_GPIO_MAX, 1));
goto out;
}
@@ -334,15 +334,16 @@ out:
return ret;
}
-static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpio_twl4030_priv *priv = gpiochip_get_data(chip);
+ int ret;
mutex_lock(&priv->mutex);
if (offset < TWL4030_GPIO_MAX)
- twl4030_set_gpio_dataout(offset, value);
+ ret = twl4030_set_gpio_dataout(offset, value);
else
- twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
+ ret = twl4030_led_set_value(offset - TWL4030_GPIO_MAX, value);
if (value)
priv->out_state |= BIT(offset);
@@ -350,6 +351,8 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
priv->out_state &= ~BIT(offset);
mutex_unlock(&priv->mutex);
+
+ return ret;
}
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
@@ -373,9 +376,7 @@ static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
- twl_set(chip, offset, value);
-
- return ret;
+ return twl_set(chip, offset, value);
}
static int twl_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -418,7 +419,7 @@ static const struct gpio_chip template_chip = {
.direction_output = twl_direction_out,
.get_direction = twl_get_direction,
.get = twl_get,
- .set = twl_set,
+ .set_rv = twl_set,
.to_irq = twl_to_irq,
.can_sleep = true,
};
@@ -523,7 +524,7 @@ static int gpio_twl4030_probe(struct platform_device *pdev)
return irq_base;
}
- irq_domain_create_legacy(of_fwnode_handle(pdev->dev.of_node), TWL4030_GPIO_MAX, irq_base, 0,
+ irq_domain_create_legacy(dev_fwnode(&pdev->dev), TWL4030_GPIO_MAX, irq_base, 0,
&irq_domain_simple_ops, NULL);
ret = twl4030_sih_setup(&pdev->dev, TWL4030_MODULE_GPIO, irq_base);
diff --git a/drivers/gpio/gpio-twl6040.c b/drivers/gpio/gpio-twl6040.c
index b9171bf66168..b2196b62b528 100644
--- a/drivers/gpio/gpio-twl6040.c
+++ b/drivers/gpio/gpio-twl6040.c
@@ -37,14 +37,8 @@ static int twl6040gpo_get_direction(struct gpio_chip *chip, unsigned offset)
return GPIO_LINE_DIRECTION_OUT;
}
-static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- /* This only drives GPOs, and can't change direction */
- return 0;
-}
-
-static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
+static int twl6040gpo_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct twl6040 *twl6040 = gpiochip_get_data(chip);
int ret;
@@ -52,14 +46,21 @@ static void twl6040gpo_set(struct gpio_chip *chip, unsigned offset, int value)
ret = twl6040_reg_read(twl6040, TWL6040_REG_GPOCTL);
if (ret < 0)
- return;
+ return ret;
if (value)
gpoctl = ret | BIT(offset);
else
gpoctl = ret & ~BIT(offset);
- twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
+ return twl6040_reg_write(twl6040, TWL6040_REG_GPOCTL, gpoctl);
+}
+
+static int twl6040gpo_direction_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ /* This only drives GPOs, and can't change direction */
+ return twl6040gpo_set(chip, offset, value);
}
static struct gpio_chip twl6040gpo_chip = {
@@ -68,7 +69,7 @@ static struct gpio_chip twl6040gpo_chip = {
.get = twl6040gpo_get,
.direction_output = twl6040gpo_direction_out,
.get_direction = twl6040gpo_get_direction,
- .set = twl6040gpo_set,
+ .set_rv = twl6040gpo_set,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d738da8718f9..8939556f42b6 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -138,14 +138,16 @@ static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset)
return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA);
}
-static void uniphier_gpio_set(struct gpio_chip *chip,
- unsigned int offset, int val)
+static int uniphier_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
{
uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val);
+
+ return 0;
}
-static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
+static int uniphier_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask, unsigned long *bits)
{
unsigned long i, bank, bank_mask, bank_bits;
@@ -156,6 +158,8 @@ static void uniphier_gpio_set_multiple(struct gpio_chip *chip,
uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA,
bank_mask, bank_bits);
}
+
+ return 0;
}
static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -382,8 +386,8 @@ static int uniphier_gpio_probe(struct platform_device *pdev)
chip->direction_input = uniphier_gpio_direction_input;
chip->direction_output = uniphier_gpio_direction_output;
chip->get = uniphier_gpio_get;
- chip->set = uniphier_gpio_set;
- chip->set_multiple = uniphier_gpio_set_multiple;
+ chip->set_rv = uniphier_gpio_set;
+ chip->set_multiple_rv = uniphier_gpio_set_multiple;
chip->to_irq = uniphier_gpio_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index e55d28a8a66f..e8e906b54d51 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -128,45 +128,50 @@ static int vprbrd_gpioa_get(struct gpio_chip *chip,
return answer;
}
-static void vprbrd_gpioa_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int vprbrd_gpioa_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
- int ret;
+ int ret = 0;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
struct vprbrd *vb = gpio->vb;
struct vprbrd_gpioa_msg *gamsg = (struct vprbrd_gpioa_msg *)vb->buf;
- if (gpio->gpioa_out & (1 << offset)) {
- if (value)
- gpio->gpioa_val |= (1 << offset);
- else
- gpio->gpioa_val &= ~(1 << offset);
-
- mutex_lock(&vb->lock);
-
- gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
- gamsg->clk = 0x00;
- gamsg->offset = offset;
- gamsg->t1 = 0x00;
- gamsg->t2 = 0x00;
- gamsg->invert = 0x00;
- gamsg->pwmlevel = 0x00;
- gamsg->outval = value;
- gamsg->risefall = 0x00;
- gamsg->answer = 0x00;
- gamsg->__fill = 0x00;
-
- ret = usb_control_msg(vb->usb_dev,
- usb_sndctrlpipe(vb->usb_dev, 0),
- VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
- 0x0000, 0x0000, gamsg,
- sizeof(struct vprbrd_gpioa_msg), VPRBRD_USB_TIMEOUT_MS);
-
- mutex_unlock(&vb->lock);
-
- if (ret != sizeof(struct vprbrd_gpioa_msg))
- dev_err(chip->parent, "usb error setting pin value\n");
+ if (!(gpio->gpioa_out & (1 << offset)))
+ return 0;
+
+ if (value)
+ gpio->gpioa_val |= (1 << offset);
+ else
+ gpio->gpioa_val &= ~(1 << offset);
+
+ mutex_lock(&vb->lock);
+
+ gamsg->cmd = VPRBRD_GPIOA_CMD_SETOUT;
+ gamsg->clk = 0x00;
+ gamsg->offset = offset;
+ gamsg->t1 = 0x00;
+ gamsg->t2 = 0x00;
+ gamsg->invert = 0x00;
+ gamsg->pwmlevel = 0x00;
+ gamsg->outval = value;
+ gamsg->risefall = 0x00;
+ gamsg->answer = 0x00;
+ gamsg->__fill = 0x00;
+
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOA, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gamsg,
+ sizeof(struct vprbrd_gpioa_msg),
+ VPRBRD_USB_TIMEOUT_MS);
+
+ mutex_unlock(&vb->lock);
+
+ if (ret != sizeof(struct vprbrd_gpioa_msg)) {
+ dev_err(chip->parent, "usb error setting pin value\n");
+ return -EREMOTEIO;
}
+
+ return 0;
}
static int vprbrd_gpioa_direction_input(struct gpio_chip *chip,
@@ -304,37 +309,42 @@ static int vprbrd_gpiob_get(struct gpio_chip *chip,
return (gpio->gpiob_val >> offset) & 0x1;
}
-static void vprbrd_gpiob_set(struct gpio_chip *chip,
- unsigned int offset, int value)
+static int vprbrd_gpiob_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
int ret;
struct vprbrd_gpio *gpio = gpiochip_get_data(chip);
struct vprbrd *vb = gpio->vb;
struct vprbrd_gpiob_msg *gbmsg = (struct vprbrd_gpiob_msg *)vb->buf;
- if (gpio->gpiob_out & (1 << offset)) {
- if (value)
- gpio->gpiob_val |= (1 << offset);
- else
- gpio->gpiob_val &= ~(1 << offset);
+ if (!(gpio->gpiob_out & (1 << offset)))
+ return 0;
+
+ if (value)
+ gpio->gpiob_val |= (1 << offset);
+ else
+ gpio->gpiob_val &= ~(1 << offset);
- mutex_lock(&vb->lock);
+ mutex_lock(&vb->lock);
- gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
- gbmsg->val = cpu_to_be16(value << offset);
- gbmsg->mask = cpu_to_be16(0x0001 << offset);
+ gbmsg->cmd = VPRBRD_GPIOB_CMD_SETVAL;
+ gbmsg->val = cpu_to_be16(value << offset);
+ gbmsg->mask = cpu_to_be16(0x0001 << offset);
- ret = usb_control_msg(vb->usb_dev,
- usb_sndctrlpipe(vb->usb_dev, 0),
- VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
- 0x0000, 0x0000, gbmsg,
- sizeof(struct vprbrd_gpiob_msg), VPRBRD_USB_TIMEOUT_MS);
+ ret = usb_control_msg(vb->usb_dev, usb_sndctrlpipe(vb->usb_dev, 0),
+ VPRBRD_USB_REQUEST_GPIOB, VPRBRD_USB_TYPE_OUT,
+ 0x0000, 0x0000, gbmsg,
+ sizeof(struct vprbrd_gpiob_msg),
+ VPRBRD_USB_TIMEOUT_MS);
- mutex_unlock(&vb->lock);
+ mutex_unlock(&vb->lock);
- if (ret != sizeof(struct vprbrd_gpiob_msg))
- dev_err(chip->parent, "usb error setting pin value\n");
+ if (ret != sizeof(struct vprbrd_gpiob_msg)) {
+ dev_err(chip->parent, "usb error setting pin value\n");
+ return -EREMOTEIO;
}
+
+ return 0;
}
static int vprbrd_gpiob_direction_input(struct gpio_chip *chip,
@@ -368,16 +378,14 @@ static int vprbrd_gpiob_direction_output(struct gpio_chip *chip,
gpio->gpiob_out |= (1 << offset);
mutex_lock(&vb->lock);
-
ret = vprbrd_gpiob_setdir(vb, offset, 1);
- if (ret)
- dev_err(chip->parent, "usb error setting pin to output\n");
-
mutex_unlock(&vb->lock);
+ if (ret) {
+ dev_err(chip->parent, "usb error setting pin to output\n");
+ return ret;
+ }
- vprbrd_gpiob_set(chip, offset, value);
-
- return ret;
+ return vprbrd_gpiob_set(chip, offset, value);
}
/* ----- end of gpio b chip ---------------------------------------------- */
@@ -400,7 +408,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpioa.base = -1;
vb_gpio->gpioa.ngpio = 16;
vb_gpio->gpioa.can_sleep = true;
- vb_gpio->gpioa.set = vprbrd_gpioa_set;
+ vb_gpio->gpioa.set_rv = vprbrd_gpioa_set;
vb_gpio->gpioa.get = vprbrd_gpioa_get;
vb_gpio->gpioa.direction_input = vprbrd_gpioa_direction_input;
vb_gpio->gpioa.direction_output = vprbrd_gpioa_direction_output;
@@ -416,7 +424,7 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
vb_gpio->gpiob.base = -1;
vb_gpio->gpiob.ngpio = 16;
vb_gpio->gpiob.can_sleep = true;
- vb_gpio->gpiob.set = vprbrd_gpiob_set;
+ vb_gpio->gpiob.set_rv = vprbrd_gpiob_set;
vb_gpio->gpiob.get = vprbrd_gpiob_get;
vb_gpio->gpiob.direction_input = vprbrd_gpiob_direction_input;
vb_gpio->gpiob.direction_output = vprbrd_gpiob_direction_output;
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index ac39da17a29b..07552611da98 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -194,11 +194,12 @@ static int virtio_gpio_get(struct gpio_chip *gc, unsigned int gpio)
return ret ? ret : value;
}
-static void virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+static int virtio_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
{
struct virtio_gpio *vgpio = gpiochip_get_data(gc);
- virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value, NULL);
+ return virtio_gpio_req(vgpio, VIRTIO_GPIO_MSG_SET_VALUE, gpio, value,
+ NULL);
}
/* Interrupt handling */
@@ -526,7 +527,6 @@ static const char **virtio_gpio_get_names(struct virtio_gpio *vgpio,
static int virtio_gpio_probe(struct virtio_device *vdev)
{
- struct virtio_gpio_config config;
struct device *dev = &vdev->dev;
struct virtio_gpio *vgpio;
struct irq_chip *gpio_irq_chip;
@@ -539,9 +539,11 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Read configuration */
- virtio_cread_bytes(vdev, 0, &config, sizeof(config));
- gpio_names_size = le32_to_cpu(config.gpio_names_size);
- ngpio = le16_to_cpu(config.ngpio);
+ gpio_names_size =
+ virtio_cread32(vdev, offsetof(struct virtio_gpio_config,
+ gpio_names_size));
+ ngpio = virtio_cread16(vdev, offsetof(struct virtio_gpio_config,
+ ngpio));
if (!ngpio) {
dev_err(dev, "Number of GPIOs can't be zero\n");
return -EINVAL;
@@ -565,7 +567,7 @@ static int virtio_gpio_probe(struct virtio_device *vdev)
vgpio->gc.direction_input = virtio_gpio_direction_input;
vgpio->gc.direction_output = virtio_gpio_direction_output;
vgpio->gc.get = virtio_gpio_get;
- vgpio->gc.set = virtio_gpio_set;
+ vgpio->gc.set_rv = virtio_gpio_set;
vgpio->gc.ngpio = ngpio;
vgpio->gc.base = -1; /* Allocate base dynamically */
vgpio->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index eab6726953b4..a10eab7d2617 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -215,9 +215,7 @@ static int gpio_virtuser_set_array_value(struct gpio_descs *descs,
struct gpio_virtuser_irq_work_context ctx;
if (!atomic)
- return gpiod_set_array_value_cansleep(descs->ndescs,
- descs->desc,
- descs->info, values);
+ return gpiod_multi_set_value_cansleep(descs, values);
gpio_virtuser_init_irq_work_context(&ctx);
ctx.work = IRQ_WORK_INIT_HARD(gpio_virtuser_set_value_array_atomic);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index 8fd6c3913d69..a3bceac7854c 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -127,8 +127,7 @@ static int vx855gpio_get(struct gpio_chip *gpio, unsigned int nr)
return ret;
}
-static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
- int val)
+static int vx855gpio_set(struct gpio_chip *gpio, unsigned int nr, int val)
{
struct vx855_gpio *vg = gpiochip_get_data(gpio);
unsigned long flags;
@@ -136,7 +135,7 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
/* True GPI cannot be switched to output mode */
if (nr < NR_VX855_GPI)
- return;
+ return -EPERM;
spin_lock_irqsave(&vg->lock, flags);
reg_out = inl(vg->io_gpo);
@@ -153,6 +152,8 @@ static void vx855gpio_set(struct gpio_chip *gpio, unsigned int nr,
}
outl(reg_out, vg->io_gpo);
spin_unlock_irqrestore(&vg->lock, flags);
+
+ return 0;
}
static int vx855gpio_direction_output(struct gpio_chip *gpio,
@@ -215,7 +216,7 @@ static void vx855gpio_gpio_setup(struct vx855_gpio *vg)
c->direction_input = vx855gpio_direction_input;
c->direction_output = vx855gpio_direction_output;
c->get = vx855gpio_get;
- c->set = vx855gpio_set;
+ c->set_rv = vx855gpio_set;
c->set_config = vx855gpio_set_config;
c->dbg_show = NULL;
c->base = 0;
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 2bba27b13947..c89da9a22016 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -46,9 +46,12 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
int val)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
+ int ret;
- regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
- WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+ ret = regmap_update_bits(data->map, WCD_REG_DIR_CTL_OFFSET,
+ WCD_PIN_MASK(pin), WCD_PIN_MASK(pin));
+ if (ret)
+ return ret;
return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
WCD_PIN_MASK(pin),
@@ -65,12 +68,13 @@ static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin)
return !!(value & WCD_PIN_MASK(pin));
}
-static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
+static int wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val)
{
struct wcd_gpio_data *data = gpiochip_get_data(chip);
- regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
- WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0);
+ return regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET,
+ WCD_PIN_MASK(pin),
+ val ? WCD_PIN_MASK(pin) : 0);
}
static int wcd_gpio_probe(struct platform_device *pdev)
@@ -94,7 +98,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->direction_output = wcd_gpio_direction_output;
chip->get_direction = wcd_gpio_get_direction;
chip->get = wcd_gpio_get;
- chip->set = wcd_gpio_set;
+ chip->set_rv = wcd_gpio_set;
chip->parent = dev;
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c
index 1ec24f6f9300..f7df3d5fc71c 100644
--- a/drivers/gpio/gpio-wcove.c
+++ b/drivers/gpio/gpio-wcove.c
@@ -200,18 +200,15 @@ static int wcove_gpio_get(struct gpio_chip *chip, unsigned int gpio)
return val & 0x1;
}
-static void wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
+static int wcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
struct wcove_gpio *wg = gpiochip_get_data(chip);
int reg = to_reg(gpio, CTRL_OUT);
if (reg < 0)
- return;
+ return 0;
- if (value)
- regmap_set_bits(wg->regmap, reg, 1);
- else
- regmap_clear_bits(wg->regmap, reg, 1);
+ return regmap_assign_bits(wg->regmap, reg, 1, value);
}
static int wcove_gpio_set_config(struct gpio_chip *chip, unsigned int gpio,
@@ -442,7 +439,7 @@ static int wcove_gpio_probe(struct platform_device *pdev)
wg->chip.direction_output = wcove_gpio_dir_out;
wg->chip.get_direction = wcove_gpio_get_direction;
wg->chip.get = wcove_gpio_get;
- wg->chip.set = wcove_gpio_set;
+ wg->chip.set_rv = wcove_gpio_set;
wg->chip.set_config = wcove_gpio_set_config;
wg->chip.base = -1;
wg->chip.ngpio = WCOVE_VGPIO_NUM;
diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c
index 4b61d975cc0e..421655b5d4c2 100644
--- a/drivers/gpio/gpio-winbond.c
+++ b/drivers/gpio/gpio-winbond.c
@@ -458,17 +458,19 @@ static int winbond_gpio_direction_out(struct gpio_chip *gc,
return 0;
}
-static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
- int val)
+static int winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int val)
{
unsigned long *base = gpiochip_get_data(gc);
const struct winbond_gpio_info *info;
+ int ret;
if (!winbond_gpio_get_info(&offset, &info))
- return;
+ return -EACCES;
- if (winbond_sio_enter(*base) != 0)
- return;
+ ret = winbond_sio_enter(*base);
+ if (ret)
+ return ret;
winbond_sio_select_logical(*base, info->dev);
@@ -481,6 +483,8 @@ static void winbond_gpio_set(struct gpio_chip *gc, unsigned int offset,
winbond_sio_reg_bclear(*base, info->datareg, offset);
winbond_sio_leave(*base);
+
+ return 0;
}
static struct gpio_chip winbond_gpio_chip = {
@@ -490,7 +494,7 @@ static struct gpio_chip winbond_gpio_chip = {
.can_sleep = true,
.get = winbond_gpio_get,
.direction_input = winbond_gpio_direction_in,
- .set = winbond_gpio_set,
+ .set_rv = winbond_gpio_set,
.direction_output = winbond_gpio_direction_out,
};
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index 61bb83a1e8ae..ab58aa7c0b99 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -58,13 +58,14 @@ static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm831x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm831x_gpio *wm831x_gpio = gpiochip_get_data(chip);
struct wm831x *wm831x = wm831x_gpio->wm831x;
- wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
- value << offset);
+ return wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset,
+ value << offset);
}
static int wm831x_gpio_direction_out(struct gpio_chip *chip,
@@ -85,9 +86,7 @@ static int wm831x_gpio_direction_out(struct gpio_chip *chip,
return ret;
/* Can only set GPIO state once it's in output mode */
- wm831x_gpio_set(chip, offset, value);
-
- return 0;
+ return wm831x_gpio_set(chip, offset, value);
}
static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -254,7 +253,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm831x_gpio_direction_in,
.get = wm831x_gpio_get,
.direction_output = wm831x_gpio_direction_out,
- .set = wm831x_gpio_set,
+ .set_rv = wm831x_gpio_set,
.to_irq = wm831x_gpio_to_irq,
.set_config = wm831x_set_config,
.dbg_show = wm831x_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-wm8350.c b/drivers/gpio/gpio-wm8350.c
index 2421cf606ed6..9a7677f841fc 100644
--- a/drivers/gpio/gpio-wm8350.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -48,15 +48,16 @@ static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset)
return 0;
}
-static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm8350_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm8350_gpio_data *wm8350_gpio = gpiochip_get_data(chip);
struct wm8350 *wm8350 = wm8350_gpio->wm8350;
if (value)
- wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
- else
- wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+ return wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+
+ return wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
}
static int wm8350_gpio_direction_out(struct gpio_chip *chip,
@@ -72,9 +73,7 @@ static int wm8350_gpio_direction_out(struct gpio_chip *chip,
return ret;
/* Don't have an atomic direction/value setup */
- wm8350_gpio_set(chip, offset, value);
-
- return 0;
+ return wm8350_gpio_set(chip, offset, value);
}
static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -94,7 +93,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8350_gpio_direction_in,
.get = wm8350_gpio_get,
.direction_output = wm8350_gpio_direction_out,
- .set = wm8350_gpio_set,
+ .set_rv = wm8350_gpio_set,
.to_irq = wm8350_gpio_to_irq,
.can_sleep = true,
};
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index bf05c9b5882b..ccc005628dd2 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -89,7 +89,8 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
-static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static int wm8994_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct wm8994_gpio *wm8994_gpio = gpiochip_get_data(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
@@ -97,7 +98,8 @@ static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (value)
value = WM8994_GPN_LVL;
- wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL, value);
+ return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset, WM8994_GPN_LVL,
+ value);
}
static int wm8994_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
@@ -254,7 +256,7 @@ static const struct gpio_chip template_chip = {
.direction_input = wm8994_gpio_direction_in,
.get = wm8994_gpio_get,
.direction_output = wm8994_gpio_direction_out,
- .set = wm8994_gpio_set,
+ .set_rv = wm8994_gpio_set,
.set_config = wm8994_gpio_set_config,
.to_irq = wm8994_gpio_to_irq,
.dbg_show = wm8994_gpio_dbg_show,
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index fb4b0c67aeef..28f794e5eb26 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -62,7 +62,7 @@ static void __xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
iowrite32(setval, chip->base + bank_offset);
}
-static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+static int xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
{
struct xgene_gpio *chip = gpiochip_get_data(gc);
unsigned long flags;
@@ -70,6 +70,8 @@ static void xgene_gpio_set(struct gpio_chip *gc, unsigned int offset, int val)
spin_lock_irqsave(&chip->lock, flags);
__xgene_gpio_set(gc, offset, val);
spin_unlock_irqrestore(&chip->lock, flags);
+
+ return 0;
}
static int xgene_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
@@ -176,7 +178,7 @@ static int xgene_gpio_probe(struct platform_device *pdev)
gpio->chip.direction_input = xgene_gpio_dir_in;
gpio->chip.direction_output = xgene_gpio_dir_out;
gpio->chip.get = xgene_gpio_get;
- gpio->chip.set = xgene_gpio_set;
+ gpio->chip.set_rv = xgene_gpio_set;
gpio->chip.label = dev_name(&pdev->dev);
gpio->chip.base = -1;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index c58a7e1349b4..36d91cacc2d9 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -148,7 +148,7 @@ static int xgpio_get(struct gpio_chip *gc, unsigned int gpio)
* This function writes the specified value in to the specified signal of the
* GPIO device.
*/
-static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
unsigned long flags;
struct xgpio_instance *chip = gpiochip_get_data(gc);
@@ -162,6 +162,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
}
/**
@@ -173,8 +175,8 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
* This function writes the specified values into the specified signals of the
* GPIO devices.
*/
-static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
DECLARE_BITMAP(hw_mask, 64);
DECLARE_BITMAP(hw_bits, 64);
@@ -194,6 +196,8 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
bitmap_copy(chip->state, state, 64);
raw_spin_unlock_irqrestore(&chip->gpio_lock, flags);
+
+ return 0;
}
/**
@@ -600,10 +604,10 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
chip->gc.get = xgpio_get;
- chip->gc.set = xgpio_set;
+ chip->gc.set_rv = xgpio_set;
chip->gc.request = xgpio_request;
chip->gc.free = xgpio_free;
- chip->gc.set_multiple = xgpio_set_multiple;
+ chip->gc.set_multiple_rv = xgpio_set_multiple;
chip->gc.label = dev_name(dev);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index b4b52213bcd9..bcd2dfec462d 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -206,7 +206,6 @@ static int xlp_gpio_dir_output(struct gpio_chip *gc, unsigned gpio, int state)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x1);
return 0;
@@ -216,7 +215,6 @@ static int xlp_gpio_dir_input(struct gpio_chip *gc, unsigned gpio)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_out_en, gpio, 0x0);
return 0;
@@ -226,16 +224,16 @@ static int xlp_gpio_get(struct gpio_chip *gc, unsigned gpio)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
return xlp_gpio_get_reg(priv->gpio_paddrv, gpio);
}
-static void xlp_gpio_set(struct gpio_chip *gc, unsigned gpio, int state)
+static int xlp_gpio_set(struct gpio_chip *gc, unsigned int gpio, int state)
{
struct xlp_gpio_priv *priv = gpiochip_get_data(gc);
- BUG_ON(gpio >= gc->ngpio);
xlp_gpio_set_reg(priv->gpio_paddrv, gpio, state);
+
+ return 0;
}
static int xlp_gpio_probe(struct platform_device *pdev)
@@ -276,7 +274,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
gc->ngpio = 70;
gc->direction_output = xlp_gpio_dir_output;
gc->direction_input = xlp_gpio_dir_input;
- gc->set = xlp_gpio_set;
+ gc->set_rv = xlp_gpio_set;
gc->get = xlp_gpio_get;
spin_lock_init(&priv->lock);
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index 842cf875bb92..70402c6b5407 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -102,16 +102,13 @@ static int xra1403_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(offset % 8));
}
-static void xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
+static int xra1403_set(struct gpio_chip *chip, unsigned int offset, int value)
{
- int ret;
struct xra1403 *xra = gpiochip_get_data(chip);
- ret = regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
- BIT(offset % 8), value ? BIT(offset % 8) : 0);
- if (ret)
- dev_err(chip->parent, "Failed to set pin: %d, ret: %d\n",
- offset, ret);
+ return regmap_update_bits(xra->regmap, to_reg(XRA_OCR, offset),
+ BIT(offset % 8),
+ value ? BIT(offset % 8) : 0);
}
#ifdef CONFIG_DEBUG_FS
@@ -167,7 +164,7 @@ static int xra1403_probe(struct spi_device *spi)
xra->chip.direction_output = xra1403_direction_output;
xra->chip.get_direction = xra1403_get_direction;
xra->chip.get = xra1403_get;
- xra->chip.set = xra1403_set;
+ xra->chip.set_rv = xra1403_set;
xra->chip.dbg_show = xra1403_dbg_show;
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index c8af34a6368f..e7ff3c60324d 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -86,12 +86,6 @@ static int xtensa_impwire_get_value(struct gpio_chip *gc, unsigned offset)
return !!(impwire & BIT(offset));
}
-static void xtensa_impwire_set_value(struct gpio_chip *gc, unsigned offset,
- int value)
-{
- BUG(); /* output only; should never be called */
-}
-
static int xtensa_expstate_get_direction(struct gpio_chip *gc, unsigned offset)
{
return GPIO_LINE_DIRECTION_OUT; /* output only */
@@ -109,7 +103,7 @@ static int xtensa_expstate_get_value(struct gpio_chip *gc, unsigned offset)
return !!(expstate & BIT(offset));
}
-static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
+static int xtensa_expstate_set_value(struct gpio_chip *gc, unsigned int offset,
int value)
{
unsigned long flags, saved_cpenable;
@@ -120,6 +114,8 @@ static void xtensa_expstate_set_value(struct gpio_chip *gc, unsigned offset,
__asm__ __volatile__("wrmsk_expstate %0, %1"
:: "a" (val), "a" (mask));
disable_cp(flags, saved_cpenable);
+
+ return 0;
}
static struct gpio_chip impwire_chip = {
@@ -128,7 +124,6 @@ static struct gpio_chip impwire_chip = {
.ngpio = 32,
.get_direction = xtensa_impwire_get_direction,
.get = xtensa_impwire_get_value,
- .set = xtensa_impwire_set_value,
};
static struct gpio_chip expstate_chip = {
@@ -137,7 +132,7 @@ static struct gpio_chip expstate_chip = {
.ngpio = 32,
.get_direction = xtensa_expstate_get_direction,
.get = xtensa_expstate_get_value,
- .set = xtensa_expstate_set_value,
+ .set_rv = xtensa_expstate_set_value,
};
static int xtensa_gpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index d7230fd83f5d..0799f7976710 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -91,7 +91,7 @@ static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin)
return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1;
}
-static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
+static int zevio_gpio_set(struct gpio_chip *chip, unsigned int pin, int value)
{
struct zevio_gpio *controller = gpiochip_get_data(chip);
u32 val;
@@ -105,6 +105,8 @@ static void zevio_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
zevio_gpio_port_set(controller, pin, ZEVIO_GPIO_OUTPUT, val);
spin_unlock(&controller->lock);
+
+ return 0;
}
static int zevio_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
@@ -159,7 +161,7 @@ static int zevio_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
static const struct gpio_chip zevio_gpio_chip = {
.direction_input = zevio_gpio_direction_input,
.direction_output = zevio_gpio_direction_output,
- .set = zevio_gpio_set,
+ .set_rv = zevio_gpio_set,
.get = zevio_gpio_get,
.to_irq = zevio_gpio_to_irq,
.base = 0,
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 3dae63f3ea21..b22b4e25c68d 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -265,8 +265,8 @@ static int zynq_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
* upper 16 bits) based on the given pin number and sets the state of a
* gpio pin to the specified value. The state is either 0 or non-zero.
*/
-static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
- int state)
+static int zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
{
unsigned int reg_offset, bank_num, bank_pin_num;
struct zynq_gpio *gpio = gpiochip_get_data(chip);
@@ -290,6 +290,8 @@ static void zynq_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
((state << bank_pin_num) | ZYNQ_GPIO_UPPER_MASK);
writel_relaxed(state, gpio->base_addr + reg_offset);
+
+ return 0;
}
/**
@@ -930,7 +932,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = zynq_gpio_get_value;
- chip->set = zynq_gpio_set_value;
+ chip->set_rv = zynq_gpio_set_value;
chip->request = zynq_gpio_request;
chip->free = zynq_gpio_free;
chip->direction_input = zynq_gpio_dir_in;
diff --git a/drivers/gpio/gpio-zynqmp-modepin.c b/drivers/gpio/gpio-zynqmp-modepin.c
index 2f3c9ebfa78d..6dc5d7acb89c 100644
--- a/drivers/gpio/gpio-zynqmp-modepin.c
+++ b/drivers/gpio/gpio-zynqmp-modepin.c
@@ -57,8 +57,8 @@ static int modepin_gpio_get_value(struct gpio_chip *chip, unsigned int pin)
*
* Return: None.
*/
-static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
- int state)
+static int modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
+ int state)
{
u32 bootpin_val = 0;
int ret;
@@ -77,6 +77,8 @@ static void modepin_gpio_set_value(struct gpio_chip *chip, unsigned int pin,
ret = zynqmp_pm_bootmode_write(bootpin_val);
if (ret)
pr_err("modepin: set value error %d for pin %d\n", ret, pin);
+
+ return ret;
}
/**
@@ -102,7 +104,7 @@ static int modepin_gpio_dir_in(struct gpio_chip *chip, unsigned int pin)
static int modepin_gpio_dir_out(struct gpio_chip *chip, unsigned int pin,
int state)
{
- return 0;
+ return modepin_gpio_set_value(chip, pin, state);
}
/**
@@ -128,7 +130,7 @@ static int modepin_gpio_probe(struct platform_device *pdev)
chip->owner = THIS_MODULE;
chip->parent = &pdev->dev;
chip->get = modepin_gpio_get_value;
- chip->set = modepin_gpio_set_value;
+ chip->set_rv = modepin_gpio_set_value;
chip->direction_input = modepin_gpio_dir_in;
chip->direction_output = modepin_gpio_dir_out;
chip->label = dev_name(&pdev->dev);
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index 219667315b2c..c13545dce349 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -331,6 +331,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_interrupt = "AMDI0030:00@11",
},
},
+ {
+ /*
+ * Wakeup only works when keyboard backlight is turned off
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/4169
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 15"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@8",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 4d5f83b17624..72422c5db364 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(devm_gpiod_unhinge);
*/
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs)
{
- devm_remove_action(dev, devm_gpiod_release_array, descs);
+ devm_release_action(dev, devm_gpiod_release_array, descs);
}
EXPORT_SYMBOL_GPL(devm_gpiod_put_array);
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index aeae6df8bec9..3bc93ccadb5b 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -86,44 +86,6 @@ static void devm_gpio_release(struct device *dev, void *res)
}
/**
- * devm_gpio_request - request a GPIO for a managed device
- * @dev: device to request the GPIO for
- * @gpio: GPIO to allocate
- * @label: the name of the requested GPIO
- *
- * Except for the extra @dev argument, this function takes the
- * same arguments and performs the same function as gpio_request().
- * GPIOs requested with this function will be automatically freed
- * on driver detach.
- *
- * **DEPRECATED** This function is deprecated and must not be used in new code.
- *
- * Returns:
- * 0 on success, or negative errno on failure.
- */
-int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
-{
- unsigned *dr;
- int rc;
-
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
-
- rc = gpio_request(gpio, label);
- if (rc) {
- devres_free(dr);
- return rc;
- }
-
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(devm_gpio_request);
-
-/**
* devm_gpio_request_one - request a single GPIO with initial setup
* @dev: device to request for
* @gpio: the GPIO number
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index 3eebfac290c5..2257f7a498a1 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -8,7 +8,7 @@
#include <linux/notifier.h>
-struct device;
+struct device_node;
struct fwnode_handle;
struct gpio_chip;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 4a3aa09dad9d..b64106f1cb7b 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -3,7 +3,6 @@
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/device.h>
-#include <linux/idr.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kdev_t.h>
@@ -12,7 +11,6 @@
#include <linux/mutex.h>
#include <linux/printk.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/srcu.h>
#include <linux/sysfs.h>
@@ -26,6 +24,8 @@
#include "gpiolib.h"
#include "gpiolib-sysfs.h"
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+
struct kernfs_node;
#define GPIO_IRQF_TRIGGER_NONE 0
@@ -34,15 +34,64 @@ struct kernfs_node;
#define GPIO_IRQF_TRIGGER_BOTH (GPIO_IRQF_TRIGGER_FALLING | \
GPIO_IRQF_TRIGGER_RISING)
+enum {
+ GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION = 0,
+ GPIO_SYSFS_LINE_CLASS_ATTR_VALUE,
+ GPIO_SYSFS_LINE_CLASS_ATTR_EDGE,
+ GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW,
+ GPIO_SYSFS_LINE_CLASS_ATTR_SENTINEL,
+ GPIO_SYSFS_LINE_CLASS_ATTR_SIZE,
+};
+
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+enum {
+ GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION = 0,
+ GPIO_SYSFS_LINE_CHIP_ATTR_VALUE,
+ GPIO_SYSFS_LINE_CHIP_ATTR_SENTINEL,
+ GPIO_SYSFS_LINE_CHIP_ATTR_SIZE,
+};
+
struct gpiod_data {
+ struct list_head list;
+
struct gpio_desc *desc;
+ struct device *dev;
struct mutex mutex;
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
struct kernfs_node *value_kn;
int irq;
unsigned char irq_flags;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
bool direction_can_change;
+
+ struct kobject *parent;
+ struct device_attribute dir_attr;
+ struct device_attribute val_attr;
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ struct device_attribute edge_attr;
+ struct device_attribute active_low_attr;
+
+ struct attribute *class_attrs[GPIO_SYSFS_LINE_CLASS_ATTR_SIZE];
+ struct attribute_group class_attr_group;
+ const struct attribute_group *class_attr_groups[2];
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ struct attribute *chip_attrs[GPIO_SYSFS_LINE_CHIP_ATTR_SIZE];
+ struct attribute_group chip_attr_group;
+ const struct attribute_group *chip_attr_groups[2];
+};
+
+struct gpiodev_data {
+ struct list_head exported_lines;
+ struct gpio_device *gdev;
+ struct device *cdev_id; /* Class device by GPIO device ID */
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ struct device *cdev_base; /* Class device by GPIO base */
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
};
/*
@@ -73,9 +122,10 @@ static DEFINE_MUTEX(sysfs_lock);
*/
static ssize_t direction_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ dir_attr);
struct gpio_desc *desc = data->desc;
int value;
@@ -88,11 +138,13 @@ static ssize_t direction_show(struct device *dev,
}
static ssize_t direction_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr, const char *buf,
+ size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ dir_attr);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status;
guard(mutex)(&data->mutex);
@@ -107,14 +159,14 @@ static ssize_t direction_store(struct device *dev,
return status ? : size;
}
-static DEVICE_ATTR_RW(direction);
-static ssize_t value_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t value_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ val_attr);
struct gpio_desc *desc = data->desc;
- ssize_t status;
+ ssize_t status;
scoped_guard(mutex, &data->mutex)
status = gpiod_get_value_cansleep(desc);
@@ -125,10 +177,11 @@ static ssize_t value_show(struct device *dev,
return sysfs_emit(buf, "%zd\n", status);
}
-static ssize_t value_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t value_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ val_attr);
struct gpio_desc *desc = data->desc;
ssize_t status;
long value;
@@ -145,8 +198,8 @@ static ssize_t value_store(struct device *dev,
return size;
}
-static DEVICE_ATTR_PREALLOC(value, S_IWUSR | S_IRUGO, value_show, value_store);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
struct gpiod_data *data = priv;
@@ -157,9 +210,8 @@ static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
}
/* Caller holds gpiod-data mutex. */
-static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
+static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
unsigned long irq_flags;
int ret;
@@ -172,33 +224,29 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
if (data->irq < 0)
return -EIO;
- data->value_kn = sysfs_get_dirent(dev->kobj.sd, "value");
- if (!data->value_kn)
- return -ENODEV;
-
irq_flags = IRQF_SHARED;
if (flags & GPIO_IRQF_TRIGGER_FALLING) {
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
set_bit(FLAG_EDGE_FALLING, &desc->flags);
}
if (flags & GPIO_IRQF_TRIGGER_RISING) {
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
- IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
set_bit(FLAG_EDGE_RISING, &desc->flags);
}
/*
* FIXME: This should be done in the irq_request_resources callback
- * when the irq is requested, but a few drivers currently fail
- * to do so.
+ * when the irq is requested, but a few drivers currently fail to do
+ * so.
*
- * Remove this redundant call (along with the corresponding
- * unlock) when those drivers have been fixed.
+ * Remove this redundant call (along with the corresponding unlock)
+ * when those drivers have been fixed.
*/
ret = gpiochip_lock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
if (ret < 0)
- goto err_put_kn;
+ goto err_clr_bits;
ret = request_any_context_irq(data->irq, gpio_sysfs_irq, irq_flags,
"gpiolib", data);
@@ -211,10 +259,9 @@ static int gpio_sysfs_request_irq(struct device *dev, unsigned char flags)
err_unlock:
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
-err_put_kn:
+err_clr_bits:
clear_bit(FLAG_EDGE_RISING, &desc->flags);
clear_bit(FLAG_EDGE_FALLING, &desc->flags);
- sysfs_put(data->value_kn);
return ret;
}
@@ -223,9 +270,8 @@ err_put_kn:
* Caller holds gpiod-data mutex (unless called after class-device
* deregistration).
*/
-static void gpio_sysfs_free_irq(struct device *dev)
+static void gpio_sysfs_free_irq(struct gpiod_data *data)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
struct gpio_desc *desc = data->desc;
CLASS(gpio_chip_guard, guard)(desc);
@@ -237,20 +283,20 @@ static void gpio_sysfs_free_irq(struct device *dev)
gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
clear_bit(FLAG_EDGE_RISING, &desc->flags);
clear_bit(FLAG_EDGE_FALLING, &desc->flags);
- sysfs_put(data->value_kn);
}
-static const char * const trigger_names[] = {
+static const char *const trigger_names[] = {
[GPIO_IRQF_TRIGGER_NONE] = "none",
[GPIO_IRQF_TRIGGER_FALLING] = "falling",
[GPIO_IRQF_TRIGGER_RISING] = "rising",
[GPIO_IRQF_TRIGGER_BOTH] = "both",
};
-static ssize_t edge_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t edge_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ edge_attr);
int flags;
scoped_guard(mutex, &data->mutex)
@@ -262,10 +308,11 @@ static ssize_t edge_show(struct device *dev,
return sysfs_emit(buf, "%s\n", trigger_names[flags]);
}
-static ssize_t edge_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+static ssize_t edge_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ edge_attr);
ssize_t status = size;
int flags;
@@ -279,12 +326,12 @@ static ssize_t edge_store(struct device *dev,
return size;
if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
+ gpio_sysfs_free_irq(data);
if (!flags)
return size;
- status = gpio_sysfs_request_irq(dev, flags);
+ status = gpio_sysfs_request_irq(data, flags);
if (status)
return status;
@@ -292,17 +339,14 @@ static ssize_t edge_store(struct device *dev,
return size;
}
-static DEVICE_ATTR_RW(edge);
/* Caller holds gpiod-data mutex. */
-static int gpio_sysfs_set_active_low(struct device *dev, int value)
+static int gpio_sysfs_set_active_low(struct gpiod_data *data, int value)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
unsigned int flags = data->irq_flags;
struct gpio_desc *desc = data->desc;
int status = 0;
-
if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
@@ -310,9 +354,9 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value)
/* reconfigure poll(2) support if enabled on one edge only */
if (flags == GPIO_IRQF_TRIGGER_FALLING ||
- flags == GPIO_IRQF_TRIGGER_RISING) {
- gpio_sysfs_free_irq(dev);
- status = gpio_sysfs_request_irq(dev, flags);
+ flags == GPIO_IRQF_TRIGGER_RISING) {
+ gpio_sysfs_free_irq(data);
+ status = gpio_sysfs_request_irq(data, flags);
}
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
@@ -321,9 +365,10 @@ static int gpio_sysfs_set_active_low(struct device *dev, int value)
}
static ssize_t active_low_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ active_low_attr);
struct gpio_desc *desc = data->desc;
int value;
@@ -334,9 +379,11 @@ static ssize_t active_low_show(struct device *dev,
}
static ssize_t active_low_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t size)
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
- struct gpiod_data *data = dev_get_drvdata(dev);
+ struct gpiod_data *data = container_of(attr, struct gpiod_data,
+ active_low_attr);
ssize_t status;
long value;
@@ -346,84 +393,189 @@ static ssize_t active_low_store(struct device *dev,
guard(mutex)(&data->mutex);
- return gpio_sysfs_set_active_low(dev, value) ?: size;
+ return gpio_sysfs_set_active_low(data, value) ?: size;
}
-static DEVICE_ATTR_RW(active_low);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
int n)
{
- struct device *dev = kobj_to_dev(kobj);
- struct gpiod_data *data = dev_get_drvdata(dev);
- struct gpio_desc *desc = data->desc;
+ struct device_attribute *dev_attr = container_of(attr,
+ struct device_attribute, attr);
umode_t mode = attr->mode;
- bool show_direction = data->direction_can_change;
+ struct gpiod_data *data;
+
+ if (strcmp(attr->name, "direction") == 0) {
+ data = container_of(dev_attr, struct gpiod_data, dir_attr);
- if (attr == &dev_attr_direction.attr) {
- if (!show_direction)
+ if (!data->direction_can_change)
mode = 0;
- } else if (attr == &dev_attr_edge.attr) {
- if (gpiod_to_irq(desc) < 0)
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ } else if (strcmp(attr->name, "edge") == 0) {
+ data = container_of(dev_attr, struct gpiod_data, edge_attr);
+
+ if (gpiod_to_irq(data->desc) < 0)
mode = 0;
- if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+
+ if (!data->direction_can_change &&
+ test_bit(FLAG_IS_OUT, &data->desc->flags))
mode = 0;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
return mode;
}
-static struct attribute *gpio_attrs[] = {
- &dev_attr_direction.attr,
- &dev_attr_edge.attr,
- &dev_attr_value.attr,
- &dev_attr_active_low.attr,
- NULL,
-};
-
-static const struct attribute_group gpio_group = {
- .attrs = gpio_attrs,
- .is_visible = gpio_is_visible,
-};
-
-static const struct attribute_group *gpio_groups[] = {
- &gpio_group,
- NULL
-};
-
/*
* /sys/class/gpio/gpiochipN/
* /base ... matching gpio_chip.base (N)
* /label ... matching gpio_chip.label
* /ngpio ... matching gpio_chip.ngpio
+ *
+ * AND
+ *
+ * /sys/class/gpio/chipX/
+ * /export ... export GPIO at given offset
+ * /unexport ... unexport GPIO at given offset
+ * /label ... matching gpio_chip.label
+ * /ngpio ... matching gpio_chip.ngpio
*/
-static ssize_t base_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+static ssize_t base_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", gdev->base);
+ return sysfs_emit(buf, "%u\n", data->gdev->base);
}
static DEVICE_ATTR_RO(base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
-static ssize_t label_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t label_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", gdev->label);
+ return sysfs_emit(buf, "%s\n", data->gdev->label);
}
static DEVICE_ATTR_RO(label);
-static ssize_t ngpio_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t ngpio_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- const struct gpio_device *gdev = dev_get_drvdata(dev);
+ const struct gpiodev_data *data = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%u\n", gdev->ngpio);
+ return sysfs_emit(buf, "%u\n", data->gdev->ngpio);
}
static DEVICE_ATTR_RO(ngpio);
+static int export_gpio_desc(struct gpio_desc *desc)
+{
+ int offset, ret;
+
+ CLASS(gpio_chip_guard, guard)(desc);
+ if (!guard.gc)
+ return -ENODEV;
+
+ offset = gpio_chip_hwgpio(desc);
+ if (!gpiochip_line_is_valid(guard.gc, offset)) {
+ pr_debug_ratelimited("%s: GPIO %d masked\n", __func__,
+ gpio_chip_hwgpio(desc));
+ return -EINVAL;
+ }
+
+ /*
+ * No extra locking here; FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+
+ ret = gpiod_request_user(desc, "sysfs");
+ if (ret)
+ return ret;
+
+ ret = gpiod_set_transitory(desc, false);
+ if (ret) {
+ gpiod_free(desc);
+ return ret;
+ }
+
+ ret = gpiod_export(desc, true);
+ if (ret < 0) {
+ gpiod_free(desc);
+ } else {
+ set_bit(FLAG_SYSFS, &desc->flags);
+ gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
+ }
+
+ return ret;
+}
+
+static int unexport_gpio_desc(struct gpio_desc *desc)
+{
+ /*
+ * No extra locking here; FLAG_SYSFS just signifies that the
+ * request and export were done by on behalf of userspace, so
+ * they may be undone on its behalf too.
+ */
+ if (!test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+ return -EINVAL;
+
+ gpiod_unexport(desc);
+ gpiod_free(desc);
+
+ return 0;
+}
+
+static ssize_t do_chip_export_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, ssize_t size,
+ int (*handler)(struct gpio_desc *desc))
+{
+ struct gpiodev_data *data = dev_get_drvdata(dev);
+ struct gpio_device *gdev = data->gdev;
+ struct gpio_desc *desc;
+ unsigned int gpio;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &gpio);
+ if (ret)
+ return ret;
+
+ desc = gpio_device_get_desc(gdev, gpio);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ ret = handler(desc);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t chip_export_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return do_chip_export_store(dev, attr, buf, size, export_gpio_desc);
+}
+
+static struct device_attribute dev_attr_export = __ATTR(export, 0200, NULL,
+ chip_export_store);
+
+static ssize_t chip_unexport_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ return do_chip_export_store(dev, attr, buf, size, unexport_gpio_desc);
+}
+
+static struct device_attribute dev_attr_unexport = __ATTR(unexport, 0200,
+ NULL,
+ chip_unexport_store);
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
@@ -431,7 +583,18 @@ static struct attribute *gpiochip_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(gpiochip);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+static struct attribute *gpiochip_ext_attrs[] = {
+ &dev_attr_label.attr,
+ &dev_attr_ngpio.attr,
+ &dev_attr_export.attr,
+ &dev_attr_unexport.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(gpiochip_ext);
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
@@ -439,11 +602,11 @@ ATTRIBUTE_GROUPS(gpiochip);
* integer N ... number of GPIO to unexport
*/
static ssize_t export_store(const struct class *class,
- const struct class_attribute *attr,
- const char *buf, size_t len)
+ const struct class_attribute *attr,
+ const char *buf, size_t len)
{
struct gpio_desc *desc;
- int status, offset;
+ int status;
long gpio;
status = kstrtol(buf, 0, &gpio);
@@ -457,40 +620,7 @@ static ssize_t export_store(const struct class *class,
return -EINVAL;
}
- CLASS(gpio_chip_guard, guard)(desc);
- if (!guard.gc)
- return -ENODEV;
-
- offset = gpio_chip_hwgpio(desc);
- if (!gpiochip_line_is_valid(guard.gc, offset)) {
- pr_debug_ratelimited("%s: GPIO %ld masked\n", __func__, gpio);
- return -EINVAL;
- }
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
-
- status = gpiod_request_user(desc, "sysfs");
- if (status)
- goto done;
-
- status = gpiod_set_transitory(desc, false);
- if (status) {
- gpiod_free(desc);
- goto done;
- }
-
- status = gpiod_export(desc, true);
- if (status < 0) {
- gpiod_free(desc);
- } else {
- set_bit(FLAG_SYSFS, &desc->flags);
- gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
- }
-
-done:
+ status = export_gpio_desc(desc);
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
@@ -498,8 +628,8 @@ done:
static CLASS_ATTR_WO(export);
static ssize_t unexport_store(const struct class *class,
- const struct class_attribute *attr,
- const char *buf, size_t len)
+ const struct class_attribute *attr,
+ const char *buf, size_t len)
{
struct gpio_desc *desc;
int status;
@@ -507,7 +637,7 @@ static ssize_t unexport_store(const struct class *class,
status = kstrtol(buf, 0, &gpio);
if (status < 0)
- goto done;
+ return status;
desc = gpio_to_desc(gpio);
/* reject bogus commands (gpiod_unexport() ignores them) */
@@ -516,18 +646,7 @@ static ssize_t unexport_store(const struct class *class,
return -EINVAL;
}
- status = -EINVAL;
-
- /* No extra locking here; FLAG_SYSFS just signifies that the
- * request and export were done by on behalf of userspace, so
- * they may be undone on its behalf too.
- */
- if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) {
- gpiod_unexport(desc);
- gpiod_free(desc);
- status = 0;
- }
-done:
+ status = unexport_gpio_desc(desc);
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
@@ -540,12 +659,55 @@ static struct attribute *gpio_class_attrs[] = {
NULL,
};
ATTRIBUTE_GROUPS(gpio_class);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
static const struct class gpio_class = {
.name = "gpio",
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
.class_groups = gpio_class_groups,
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+};
+
+static int match_gdev(struct device *dev, const void *desc)
+{
+ struct gpiodev_data *data = dev_get_drvdata(dev);
+ const struct gpio_device *gdev = desc;
+
+ return data && data->gdev == gdev;
+}
+
+static struct gpiodev_data *
+gdev_get_data(struct gpio_device *gdev) __must_hold(&sysfs_lock)
+{
+ /*
+ * Find the first device in GPIO class that matches. Whether that's
+ * the one indexed by GPIO base or device ID doesn't matter, it has
+ * the same address set as driver data.
+ */
+ struct device *cdev __free(put_device) = class_find_device(&gpio_class,
+ NULL, gdev,
+ match_gdev);
+ if (!cdev)
+ return NULL;
+
+ return dev_get_drvdata(cdev);
};
+static void gpiod_attr_init(struct device_attribute *dev_attr, const char *name,
+ ssize_t (*show)(struct device *dev,
+ struct device_attribute *attr,
+ char *buf),
+ ssize_t (*store)(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count))
+{
+ sysfs_attr_init(&dev_attr->attr);
+ dev_attr->attr.name = name;
+ dev_attr->attr.mode = 0644;
+ dev_attr->show = show;
+ dev_attr->store = store;
+}
+
/**
* gpiod_export - export a GPIO through sysfs
* @desc: GPIO to make available, already requested
@@ -564,9 +726,11 @@ static const struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
+ char *path __free(kfree) = NULL;
+ struct gpiodev_data *gdev_data;
+ struct gpiod_data *desc_data;
struct gpio_device *gdev;
- struct gpiod_data *data;
- struct device *dev;
+ struct attribute **attrs;
int status;
/* can't export until sysfs is available ... */
@@ -591,43 +755,116 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
guard(mutex)(&sysfs_lock);
- /* check if chip is being removed */
- if (!gdev->mockdev) {
- status = -ENODEV;
- goto err_clear_bit;
- }
-
if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
goto err_clear_bit;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
+ desc_data = kzalloc(sizeof(*desc_data), GFP_KERNEL);
+ if (!desc_data) {
status = -ENOMEM;
goto err_clear_bit;
}
- data->desc = desc;
- mutex_init(&data->mutex);
+ desc_data->desc = desc;
+ mutex_init(&desc_data->mutex);
if (guard.gc->direction_input && guard.gc->direction_output)
- data->direction_can_change = direction_may_change;
+ desc_data->direction_can_change = direction_may_change;
else
- data->direction_can_change = false;
+ desc_data->direction_can_change = false;
+
+ gpiod_attr_init(&desc_data->dir_attr, "direction",
+ direction_show, direction_store);
+ gpiod_attr_init(&desc_data->val_attr, "value", value_show, value_store);
- dev = device_create_with_groups(&gpio_class, &gdev->dev,
- MKDEV(0, 0), data, gpio_groups,
- "gpio%u", desc_to_gpio(desc));
- if (IS_ERR(dev)) {
- status = PTR_ERR(dev);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ gpiod_attr_init(&desc_data->edge_attr, "edge", edge_show, edge_store);
+ gpiod_attr_init(&desc_data->active_low_attr, "active_low",
+ active_low_show, active_low_store);
+
+ attrs = desc_data->class_attrs;
+ desc_data->class_attr_group.is_visible = gpio_is_visible;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_DIRECTION] = &desc_data->dir_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_VALUE] = &desc_data->val_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_EDGE] = &desc_data->edge_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CLASS_ATTR_ACTIVE_LOW] = &desc_data->active_low_attr.attr;
+
+ desc_data->class_attr_group.attrs = desc_data->class_attrs;
+ desc_data->class_attr_groups[0] = &desc_data->class_attr_group;
+
+ /*
+ * Note: we need to continue passing desc_data here as there's still
+ * at least one known user of gpiod_export_link() in the tree. This
+ * function still uses class_find_device() internally.
+ */
+ desc_data->dev = device_create_with_groups(&gpio_class, &gdev->dev,
+ MKDEV(0, 0), desc_data,
+ desc_data->class_attr_groups,
+ "gpio%u",
+ desc_to_gpio(desc));
+ if (IS_ERR(desc_data->dev)) {
+ status = PTR_ERR(desc_data->dev);
goto err_free_data;
}
+ desc_data->value_kn = sysfs_get_dirent(desc_data->dev->kobj.sd,
+ "value");
+ if (!desc_data->value_kn) {
+ status = -ENODEV;
+ goto err_unregister_device;
+ }
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ gdev_data = gdev_get_data(gdev);
+ if (!gdev_data) {
+ status = -ENODEV;
+ goto err_put_dirent;
+ }
+
+ desc_data->chip_attr_group.name = kasprintf(GFP_KERNEL, "gpio%u",
+ gpio_chip_hwgpio(desc));
+ if (!desc_data->chip_attr_group.name) {
+ status = -ENOMEM;
+ goto err_put_dirent;
+ }
+
+ attrs = desc_data->chip_attrs;
+ desc_data->chip_attr_group.is_visible = gpio_is_visible;
+ attrs[GPIO_SYSFS_LINE_CHIP_ATTR_DIRECTION] = &desc_data->dir_attr.attr;
+ attrs[GPIO_SYSFS_LINE_CHIP_ATTR_VALUE] = &desc_data->val_attr.attr;
+
+ desc_data->chip_attr_group.attrs = attrs;
+ desc_data->chip_attr_groups[0] = &desc_data->chip_attr_group;
+
+ desc_data->parent = &gdev_data->cdev_id->kobj;
+ status = sysfs_create_groups(desc_data->parent,
+ desc_data->chip_attr_groups);
+ if (status)
+ goto err_free_name;
+
+ path = kasprintf(GFP_KERNEL, "gpio%u/value", gpio_chip_hwgpio(desc));
+ if (!path) {
+ status = -ENOMEM;
+ goto err_remove_groups;
+ }
+
+ list_add(&desc_data->list, &gdev_data->exported_lines);
+
return 0;
+err_remove_groups:
+ sysfs_remove_groups(desc_data->parent, desc_data->chip_attr_groups);
+err_free_name:
+ kfree(desc_data->chip_attr_group.name);
+err_put_dirent:
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ sysfs_put(desc_data->value_kn);
+err_unregister_device:
+ device_unregister(desc_data->dev);
err_free_data:
- kfree(data);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ kfree(desc_data);
err_clear_bit:
clear_bit(FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -635,12 +872,14 @@ err_clear_bit:
}
EXPORT_SYMBOL_GPL(gpiod_export);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
static int match_export(struct device *dev, const void *desc)
{
struct gpiod_data *data = dev_get_drvdata(dev);
- return data->desc == desc;
+ return gpiod_is_equal(data->desc, desc);
}
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
/**
* gpiod_export_link - create a sysfs link to an exported GPIO node
@@ -657,6 +896,7 @@ static int match_export(struct device *dev, const void *desc)
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc)
{
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
struct device *cdev;
int ret;
@@ -673,6 +913,9 @@ int gpiod_export_link(struct device *dev, const char *name,
put_device(cdev);
return ret;
+#else
+ return -EOPNOTSUPP;
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
EXPORT_SYMBOL_GPL(gpiod_export_link);
@@ -684,8 +927,9 @@ EXPORT_SYMBOL_GPL(gpiod_export_link);
*/
void gpiod_unexport(struct gpio_desc *desc)
{
- struct gpiod_data *data;
- struct device *dev;
+ struct gpiod_data *tmp, *desc_data = NULL;
+ struct gpiodev_data *gdev_data;
+ struct gpio_device *gdev;
if (!desc) {
pr_warn("%s: invalid GPIO\n", __func__);
@@ -696,32 +940,50 @@ void gpiod_unexport(struct gpio_desc *desc)
if (!test_bit(FLAG_EXPORT, &desc->flags))
return;
- dev = class_find_device(&gpio_class, NULL, desc, match_export);
- if (!dev)
+ gdev = gpiod_to_gpio_device(desc);
+ gdev_data = gdev_get_data(gdev);
+ if (!gdev_data)
+ return;
+
+ list_for_each_entry(tmp, &gdev_data->exported_lines, list) {
+ if (gpiod_is_equal(desc, tmp->desc)) {
+ desc_data = tmp;
+ break;
+ }
+ }
+
+ if (!desc_data)
return;
- data = dev_get_drvdata(dev);
+ list_del(&desc_data->list);
clear_bit(FLAG_EXPORT, &desc->flags);
- device_unregister(dev);
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ sysfs_put(desc_data->value_kn);
+ device_unregister(desc_data->dev);
/*
* Release irq after deregistration to prevent race with
* edge_store.
*/
- if (data->irq_flags)
- gpio_sysfs_free_irq(dev);
+ if (desc_data->irq_flags)
+ gpio_sysfs_free_irq(desc_data);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ sysfs_remove_groups(desc_data->parent,
+ desc_data->chip_attr_groups);
}
- put_device(dev);
- kfree(data);
+ mutex_destroy(&desc_data->mutex);
+ kfree(desc_data);
}
EXPORT_SYMBOL_GPL(gpiod_unexport);
int gpiochip_sysfs_register(struct gpio_device *gdev)
{
+ struct gpiodev_data *data;
struct gpio_chip *chip;
struct device *parent;
- struct device *dev;
+ int err;
/*
* Many systems add gpio chips for SOC support very early,
@@ -747,32 +1009,61 @@ int gpiochip_sysfs_register(struct gpio_device *gdev)
else
parent = &gdev->dev;
- /* use chip->base for the ID; it's already known to be unique */
- dev = device_create_with_groups(&gpio_class, parent, MKDEV(0, 0), gdev,
- gpiochip_groups, GPIOCHIP_NAME "%d",
- chip->base);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->gdev = gdev;
+ INIT_LIST_HEAD(&data->exported_lines);
guard(mutex)(&sysfs_lock);
- gdev->mockdev = dev;
+
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ /* use chip->base for the ID; it's already known to be unique */
+ data->cdev_base = device_create_with_groups(&gpio_class, parent,
+ MKDEV(0, 0), data,
+ gpiochip_groups,
+ GPIOCHIP_NAME "%d",
+ chip->base);
+ if (IS_ERR(data->cdev_base)) {
+ err = PTR_ERR(data->cdev_base);
+ kfree(data);
+ return err;
+ }
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+
+ data->cdev_id = device_create_with_groups(&gpio_class, parent,
+ MKDEV(0, 0), data,
+ gpiochip_ext_groups,
+ "chip%d", gdev->id);
+ if (IS_ERR(data->cdev_id)) {
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ device_unregister(data->cdev_base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ err = PTR_ERR(data->cdev_id);
+ kfree(data);
+ return err;
+ }
return 0;
}
void gpiochip_sysfs_unregister(struct gpio_device *gdev)
{
+ struct gpiodev_data *data;
struct gpio_desc *desc;
struct gpio_chip *chip;
scoped_guard(mutex, &sysfs_lock) {
- if (!gdev->mockdev)
+ data = gdev_get_data(gdev);
+ if (!data)
return;
- device_unregister(gdev->mockdev);
-
- /* prevent further gpiod exports */
- gdev->mockdev = NULL;
+#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
+ device_unregister(data->cdev_base);
+#endif /* CONFIG_GPIO_SYSFS_LEGACY */
+ device_unregister(data->cdev_id);
+ kfree(data);
}
guard(srcu)(&gdev->srcu);
@@ -798,9 +1089,6 @@ static int gpiofind_sysfs_register(struct gpio_chip *gc, const void *data)
struct gpio_device *gdev = gc->gpiodev;
int ret;
- if (gdev->mockdev)
- return 0;
-
ret = gpiochip_sysfs_register(gdev);
if (ret)
chip_err(gc, "failed to register the sysfs entry: %d\n", ret);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3a3eca5b4c40..a93d2a9355e2 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -75,6 +75,19 @@ static const struct bus_type gpio_bus_type = {
};
/*
+ * At the end we want all GPIOs to be dynamically allocated from 0.
+ * However, some legacy drivers still perform fixed allocation.
+ * Until they are all fixed, leave 0-512 space for them.
+ */
+#define GPIO_DYNAMIC_BASE 512
+/*
+ * Define the maximum of the possible GPIO in the global numberspace.
+ * While the GPIO base and numbers are positive, we limit it with signed
+ * maximum as a lot of code is using negative values for special cases.
+ */
+#define GPIO_DYNAMIC_MAX INT_MAX
+
+/*
* Number of GPIOs to use for the fast path in set array
*/
#define FASTPATH_NGPIO CONFIG_GPIOLIB_FASTPATH_LIMIT
@@ -266,20 +279,6 @@ struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc)
EXPORT_SYMBOL_GPL(gpiod_to_gpio_device);
/**
- * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
- * @desc: Descriptor to compare.
- * @other: The second descriptor to compare against.
- *
- * Returns:
- * True if the descriptors refer to the same physical pin. False otherwise.
- */
-bool gpiod_is_equal(struct gpio_desc *desc, struct gpio_desc *other)
-{
- return desc == other;
-}
-EXPORT_SYMBOL_GPL(gpiod_is_equal);
-
-/**
* gpio_device_get_base() - Get the base GPIO number allocated by this device
* @gdev: GPIO device
*
@@ -387,6 +386,21 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
return; \
} while (0)
+/**
+ * gpiod_is_equal() - Check if two GPIO descriptors refer to the same pin.
+ * @desc: Descriptor to compare.
+ * @other: The second descriptor to compare against.
+ *
+ * Returns:
+ * True if the descriptors refer to the same physical pin. False otherwise.
+ */
+bool gpiod_is_equal(const struct gpio_desc *desc, const struct gpio_desc *other)
+{
+ return validate_desc(desc, __func__) > 0 &&
+ !IS_ERR_OR_NULL(other) && desc == other;
+}
+EXPORT_SYMBOL_GPL(gpiod_is_equal);
+
static int gpiochip_get_direction(struct gpio_chip *gc, unsigned int offset)
{
int ret;
@@ -5221,8 +5235,8 @@ core_initcall(gpiolib_dev_init);
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
{
bool active_low, is_irq, is_out;
- unsigned int gpio = gdev->base;
struct gpio_desc *desc;
+ unsigned int gpio = 0;
struct gpio_chip *gc;
unsigned long flags;
int value;
@@ -5326,8 +5340,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v)
return 0;
}
- seq_printf(s, "%s: GPIOs %u-%u", dev_name(&gdev->dev), gdev->base,
- gdev->base + gdev->ngpio - 1);
+ seq_printf(s, "%s: %u GPIOs", dev_name(&gdev->dev), gdev->ngpio);
parent = gc->parent;
if (parent)
seq_printf(s, ", parent: %s/%s",
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 58f64056de77..9b74738a9ca5 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -27,8 +27,6 @@
* @dev: the GPIO device struct
* @chrdev: character device for the GPIO device
* @id: numerical ID number for the GPIO chip
- * @mockdev: class device used by the deprecated sysfs interface (may be
- * NULL)
* @owner: helps prevent removal of modules exporting active GPIOs
* @chip: pointer to the corresponding gpiochip, holding static
* data for this device
@@ -65,7 +63,6 @@ struct gpio_device {
struct device dev;
struct cdev chrdev;
int id;
- struct device *mockdev;
struct module *owner;
struct gpio_chip __rcu *chip;
struct gpio_desc *descs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 78f8755996f0..aa32df7e2fb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5193,6 +5193,8 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
+
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 426834806fbf..6ac0ce361a2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -427,6 +427,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
unsigned long flags;
ktime_t deadline;
+ bool ret;
if (unlikely(ring->adev->debug_disable_soft_recovery))
return false;
@@ -441,12 +442,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
dma_fence_set_error(fence, -ENODATA);
spin_unlock_irqrestore(fence->lock, flags);
- atomic_inc(&ring->adev->gpu_reset_counter);
while (!dma_fence_is_signaled(fence) &&
ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
ring->funcs->soft_recovery(ring, vmid);
- return dma_fence_is_signaled(fence);
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 208b7d1d8a27..450e4bf093b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index abdc52b0895a..07c936e90d8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -783,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
}
/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_reset_clear(mm, false);
+ mutex_unlock(&mgr->lock);
+}
+
+/**
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
*
* @man: TTM memory type manager
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5ee2237d8ee8..bc983ecf3d99 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
amdgpu_ring_clear_ring(ring);
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 87058271b00c..2551823382f8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -728,7 +728,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
* support programmable degamma anywhere.
*/
is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0,
+ /* Dont't enable DRM CRTC degamma property for DCN401 since the
+ * pre-blending degamma LUT doesn't apply to cursor, and therefore
+ * can't work similar to a post-blending degamma LUT as in other hw
+ * versions.
+ * TODO: revisit it once KMS plane color API is merged.
+ */
+ drm_crtc_enable_color_mgmt(&acrtc->base,
+ (is_dcn &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
+ MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index a3b8e3d4a429..4b17d2fcd565 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -1565,7 +1565,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL);
if (!clk_mgr->base.bw_params) {
BREAK_TO_DEBUGGER();
- kfree(clk_mgr);
+ kfree(clk_mgr401);
return NULL;
}
@@ -1576,6 +1576,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct(
if (!clk_mgr->wm_range_table) {
BREAK_TO_DEBUGGER();
kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr401);
return NULL;
}
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index de9c23537465..834b42a4d31f 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1373,7 +1373,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
HPD_DISABLE, 0);
mutex_unlock(&pdata->comms_mutex);
- };
+ }
drm_bridge_add(&pdata->bridge);
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index ec7eac6b595f..718c9122bc3a 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -57,7 +57,7 @@ static int dp_aux_ep_probe(struct device *dev)
container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n");
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index dc622c78db9d..ea78c6c8ca7a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -725,7 +725,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
* monitor doesn't power down exactly after the throw away read.
*/
if (!aux->is_remote) {
- ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS);
+ ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET);
if (ret < 0)
return ret;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 241c855f891f..66aff35f8647 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -405,6 +405,49 @@ drm_get_buddy(struct drm_buddy_block *block)
EXPORT_SYMBOL(drm_get_buddy);
/**
+ * drm_buddy_reset_clear - reset blocks clear state
+ *
+ * @mm: DRM buddy manager
+ * @is_clear: blocks clear state
+ *
+ * Reset the clear state based on @is_clear value for each block
+ * in the freelist.
+ */
+void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
+{
+ u64 root_size, size, start;
+ unsigned int order;
+ int i;
+
+ size = mm->size;
+ for (i = 0; i < mm->n_roots; ++i) {
+ order = ilog2(size) - ilog2(mm->chunk_size);
+ start = drm_buddy_block_offset(mm->roots[i]);
+ __force_merge(mm, start, start + size, order);
+
+ root_size = mm->chunk_size << order;
+ size -= root_size;
+ }
+
+ for (i = 0; i <= mm->max_order; ++i) {
+ struct drm_buddy_block *block;
+
+ list_for_each_entry_reverse(block, &mm->free_list[i], link) {
+ if (is_clear != drm_buddy_block_is_clear(block)) {
+ if (is_clear) {
+ mark_cleared(block);
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+ } else {
+ clear_reset(block);
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+ }
+ }
+ }
+ }
+}
+EXPORT_SYMBOL(drm_buddy_reset_clear);
+
+/**
* drm_buddy_free_block - free a block
*
* @mm: DRM buddy manager
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index b7f033d4352a..4f0320df858f 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
if (drm_gem_is_imported(gem_obj)) {
if (dma_obj->vaddr)
- dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map);
+ dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 6f72e7a0f427..6ff22e04029e 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -419,6 +419,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap);
static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir,
unsigned int num_planes)
{
+ struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
int ret;
@@ -427,9 +428,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
obj = drm_gem_fb_get_obj(fb, num_planes);
if (!obj)
continue;
+ import_attach = obj->import_attach;
if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_end_cpu_access(obj->dma_buf, dir);
+ ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir);
if (ret)
drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n",
ret, num_planes, dir);
@@ -452,6 +454,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat
*/
int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir)
{
+ struct dma_buf_attachment *import_attach;
struct drm_gem_object *obj;
unsigned int i;
int ret;
@@ -462,9 +465,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct
ret = -EINVAL;
goto err___drm_gem_fb_end_cpu_access;
}
+ import_attach = obj->import_attach;
if (!drm_gem_is_imported(obj))
continue;
- ret = dma_buf_begin_cpu_access(obj->dma_buf, dir);
+ ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir);
if (ret)
goto err___drm_gem_fb_end_cpu_access;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index aa43265f4f4f..a5dbee6974ab 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -349,7 +349,7 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
int ret = 0;
if (drm_gem_is_imported(obj)) {
- ret = dma_buf_vmap(obj->dma_buf, map);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
} else {
pgprot_t prot = PAGE_KERNEL;
@@ -409,7 +409,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
if (drm_gem_is_imported(obj)) {
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
dma_resv_assert_held(shmem->base.resv);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d828502268b8..a0a5d725eab0 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev,
}
mutex_lock(&dev->object_name_lock);
- /* re-export the original imported/exported object */
+ /* re-export the original imported object */
+ if (obj->import_attach) {
+ dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
+ goto out_have_obj;
+ }
+
if (obj->dma_buf) {
get_dma_buf(obj->dma_buf);
dmabuf = obj->dma_buf;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 917ad527c961..40a50c60dfff 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
- dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map);
+ dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
lockdep_assert_held(&etnaviv_obj->lock);
- ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map);
+ ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
if (ret)
return NULL;
return map.vaddr;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 6f0a0bc71b06..43aa1f97378b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7061,7 +7061,8 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
- int ret, i;
+ long ret;
+ int i;
for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
if (new_plane_state->fence) {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 640c43bf62d4..724de7ed3c04 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1604,6 +1604,12 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ /* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */
+ if (display->platform.g4x && port_clock == 268800)
+ port_clock = 270000;
+
/* eDP 1.4 rate select method. */
if (intel_dp->use_rate_select) {
*link_bw = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 05e440643aa2..f4f1c979d1b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -105,7 +105,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (!obj->base.filp)
return -ENODEV;
- ret = call_mmap(obj->base.filp, vma);
+ ret = vfs_mmap(obj->base.filp, vma);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 19a3eb82dc6a..9cbb0f68a5bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -6,6 +6,7 @@
#include <linux/pagevec.h>
#include <linux/shmem_fs.h>
#include <linux/swap.h>
+#include <linux/uio.h>
#include <drm/drm_cache.h>
@@ -400,12 +401,12 @@ static int
shmem_pwrite(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
{
- struct address_space *mapping = obj->base.filp->f_mapping;
- const struct address_space_operations *aops = mapping->a_ops;
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
- u64 remain;
- loff_t pos;
- unsigned int pg;
+ struct file *file = obj->base.filp;
+ struct kiocb kiocb;
+ struct iov_iter iter;
+ ssize_t written;
+ u64 size = arg->size;
/* Caller already validated user args */
GEM_BUG_ON(!access_ok(user_data, arg->size));
@@ -428,63 +429,24 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
- /*
- * Before the pages are instantiated the object is treated as being
- * in the CPU domain. The pages will be clflushed as required before
- * use, and we can freely write into the pages directly. If userspace
- * races pwrite with any other operation; corruption will ensue -
- * that is userspace's prerogative!
- */
+ if (size > MAX_RW_COUNT)
+ return -EFBIG;
- remain = arg->size;
- pos = arg->offset;
- pg = offset_in_page(pos);
+ if (!file->f_op->write_iter)
+ return -EINVAL;
- do {
- unsigned int len, unwritten;
- struct folio *folio;
- void *data, *vaddr;
- int err;
- char __maybe_unused c;
-
- len = PAGE_SIZE - pg;
- if (len > remain)
- len = remain;
-
- /* Prefault the user page to reduce potential recursion */
- err = __get_user(c, user_data);
- if (err)
- return err;
-
- err = __get_user(c, user_data + len - 1);
- if (err)
- return err;
-
- err = aops->write_begin(obj->base.filp, mapping, pos, len,
- &folio, &data);
- if (err < 0)
- return err;
-
- vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos));
- pagefault_disable();
- unwritten = __copy_from_user_inatomic(vaddr, user_data, len);
- pagefault_enable();
- kunmap_local(vaddr);
-
- err = aops->write_end(obj->base.filp, mapping, pos, len,
- len - unwritten, folio, data);
- if (err < 0)
- return err;
-
- /* We don't handle -EFAULT, leave it to the caller to check */
- if (unwritten)
- return -ENODEV;
-
- remain -= len;
- user_data += len;
- pos += len;
- pg = 0;
- } while (remain);
+ init_sync_kiocb(&kiocb, file);
+ kiocb.ki_pos = arg->offset;
+ iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)user_data, size);
+
+ written = file->f_op->write_iter(&kiocb, &iter);
+ BUG_ON(written == -EIOCBQUEUED);
+
+ if (written != size)
+ return -EIO;
+
+ if (written < 0)
+ return written;
return 0;
}
@@ -637,9 +599,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
{
struct drm_i915_gem_object *obj;
struct file *file;
- const struct address_space_operations *aops;
- loff_t pos;
- int err;
+ loff_t pos = 0;
+ ssize_t err;
GEM_WARN_ON(IS_DGFX(i915));
obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE));
@@ -649,29 +610,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
file = obj->base.filp;
- aops = file->f_mapping->a_ops;
- pos = 0;
- do {
- unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
- struct folio *folio;
- void *fsdata;
-
- err = aops->write_begin(file, file->f_mapping, pos, len,
- &folio, &fsdata);
- if (err < 0)
- goto fail;
+ err = kernel_write(file, data, size, &pos);
- memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len);
+ if (err < 0)
+ goto fail;
- err = aops->write_end(file, file->f_mapping, pos, len, len,
- folio, fsdata);
- if (err < 0)
- goto fail;
-
- size -= len;
- data += len;
- pos += len;
- } while (size);
+ if (err != size) {
+ err = -EIO;
+ goto fail;
+ }
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 65d84a93c525..a09e2eb47175 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -5,16 +5,23 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "i915_drv.h"
#include "i915_gemfs.h"
#include "i915_utils.h"
+static int add_param(struct fs_context *fc, const char *key, const char *val)
+{
+ return vfs_parse_fs_string(fc, key, val, strlen(val));
+}
+
void i915_gemfs_init(struct drm_i915_private *i915)
{
- char huge_opt[] = "huge=within_size"; /* r/w */
struct file_system_type *type;
+ struct fs_context *fc;
struct vfsmount *gemfs;
+ int ret;
/*
* By creating our own shmemfs mountpoint, we can pass in
@@ -38,8 +45,16 @@ void i915_gemfs_init(struct drm_i915_private *i915)
if (!type)
goto err;
- gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
- if (IS_ERR(gemfs))
+ fc = fs_context_for_mount(type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ goto err;
+ ret = add_param(fc, "source", "tmpfs");
+ if (!ret)
+ ret = add_param(fc, "huge", "within_size");
+ if (!ret)
+ gemfs = fc_mount_longterm(fc);
+ put_fs_context(fc);
+ if (ret)
goto err;
i915->mm.gemfs = gemfs;
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 8f6fba4217ec..bc7527542fdc 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
return 0;
}
+void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+ struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
+ int i;
+
+ /* no need to wait for disabling the plane by CPU */
+ if (!mtk_crtc->cmdq_client.chan)
+ return;
+
+ if (!mtk_crtc->enabled)
+ return;
+
+ /* set pending plane state to disabled */
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *mtk_plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state);
+
+ if (mtk_plane->index == plane->index) {
+ memcpy(mtk_plane_state, plane_state, sizeof(*plane_state));
+ break;
+ }
+ }
+ mtk_crtc_update_config(mtk_crtc, false);
+
+ /* wait for planes to be disabled by CMDQ */
+ wait_event_timeout(mtk_crtc->cb_blocking_queue,
+ mtk_crtc->cmdq_vblank_cnt == 0,
+ msecs_to_jiffies(500));
+#endif
+}
+
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *state)
{
@@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_blend_modes(comp),
mtk_ddp_comp_get_formats(comp),
- mtk_ddp_comp_get_num_formats(comp), i);
+ mtk_ddp_comp_get_num_formats(comp),
+ mtk_ddp_comp_is_afbc_supported(comp), i);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h
index 388e900b6f4d..828f109b83e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.h
@@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
unsigned int num_conn_routes);
int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
+void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane);
void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
struct drm_atomic_state *plane_state);
struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index edc6417639e6..ac6620e10262 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = {
.get_blend_modes = mtk_ovl_get_blend_modes,
.get_formats = mtk_ovl_get_formats,
.get_num_formats = mtk_ovl_get_num_formats,
+ .is_afbc_supported = mtk_ovl_is_afbc_supported,
};
static const struct mtk_ddp_comp_funcs ddp_postmask = {
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index 39720b27f4e9..7289b3dcf22f 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs {
u32 (*get_blend_modes)(struct device *dev);
const u32 *(*get_formats)(struct device *dev);
size_t (*get_num_formats)(struct device *dev);
+ bool (*is_afbc_supported)(struct device *dev);
void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next);
void (*add)(struct device *dev, struct mtk_mutex *mutex);
@@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp)
return 0;
}
+static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->is_afbc_supported)
+ return comp->funcs->is_afbc_supported(comp->dev);
+
+ return false;
+}
+
static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex)
{
if (comp->funcs && comp->funcs->add) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
index 04217a36939c..679d413bf10b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h
@@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev);
u32 mtk_ovl_get_blend_modes(struct device *dev);
const u32 *mtk_ovl_get_formats(struct device *dev);
size_t mtk_ovl_get_num_formats(struct device *dev);
+bool mtk_ovl_is_afbc_supported(struct device *dev);
void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex);
void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index d0581c4e3c99..e0236353d499 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev)
return ovl->data->num_formats;
}
+bool mtk_ovl_is_afbc_supported(struct device *dev)
+{
+ struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
+
+ return ovl->data->supports_afbc;
+}
+
int mtk_ovl_clk_enable(struct device *dev)
{
struct mtk_disp_ovl *ovl = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 6fb85bc6487a..a2fdceadf209 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -1095,7 +1095,6 @@ static const u32 mt8183_output_fmts[] = {
};
static const u32 mt8195_dpi_output_fmts[] = {
- MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
@@ -1103,18 +1102,19 @@ static const u32 mt8195_dpi_output_fmts[] = {
MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV10_1X20,
MEDIA_BUS_FMT_YUYV12_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUV10_1X30,
};
static const u32 mt8195_dp_intf_output_fmts[] = {
- MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE,
MEDIA_BUS_FMT_RGB888_2X12_BE,
MEDIA_BUS_FMT_RGB101010_1X30,
MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV10_1X20,
+ MEDIA_BUS_FMT_BGR888_1X24,
MEDIA_BUS_FMT_YUV8_1X24,
MEDIA_BUS_FMT_YUV10_1X30,
};
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 655106bbb76d..cbc4f37da8ba 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+
mtk_plane_state->pending.enable = false;
wmb(); /* Make sure the above parameter is set before update */
mtk_plane_state->pending.dirty = true;
+
+ mtk_crtc_plane_disable(old_state->crtc, plane);
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
@@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
- const u32 *formats, size_t num_formats, unsigned int plane_idx)
+ const u32 *formats, size_t num_formats,
+ bool supports_afbc, unsigned int plane_idx)
{
int err;
@@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
err = drm_universal_plane_init(dev, plane, possible_crtcs,
&mtk_plane_funcs, formats,
- num_formats, modifiers, type, NULL);
+ num_formats,
+ supports_afbc ? modifiers : NULL,
+ type, NULL);
if (err) {
DRM_ERROR("failed to initialize plane\n");
return err;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 3b13b89989c7..95c5fa5295d8 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 blend_modes,
- const u32 *formats, size_t num_formats, unsigned int plane_idx);
+ const u32 *formats, size_t num_formats,
+ bool supports_afbc, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c
index baa10227d51a..80c01017d642 100644
--- a/drivers/gpu/drm/nouveau/nvif/chan.c
+++ b/drivers/gpu/drm/nouveau/nvif/chan.c
@@ -39,6 +39,9 @@ nvif_chan_gpfifo_post(struct nvif_chan *chan)
const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size;
const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max;
+ if (!chan->func->gpfifo.post)
+ return 0;
+
return chan->func->gpfifo.post(chan, gpptr, pbptr);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 5657106c2f7d..15e2d505550f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = 2,
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
- .timeout_wq = pfdev->reset.wq,
.name = "pan_js",
.dev = pfdev->dev,
};
@@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
if (!pfdev->reset.wq)
return -ENOMEM;
+ args.timeout_wq = pfdev->reset.wq;
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index bbd39348a7ab..7a3e510327b7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -26,7 +26,6 @@
* Jerome Glisse
*/
-#include <linux/console.h>
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
pci_set_power_state(pdev, PCI_D3hot);
}
- if (notify_clients) {
- console_lock();
- drm_client_dev_suspend(dev, true);
- console_unlock();
- }
+ if (notify_clients)
+ drm_client_dev_suspend(dev, false);
+
return 0;
}
@@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (notify_clients) {
- console_lock();
- }
if (resume) {
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- if (pci_enable_device(pdev)) {
- if (notify_clients)
- console_unlock();
+ if (pci_enable_device(pdev))
return -1;
- }
}
/* resume AGP if in use */
radeon_agp_resume(rdev);
@@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
radeon_pm_compute_clocks(rdev);
- if (notify_clients) {
- drm_client_dev_resume(dev, true);
- console_unlock();
- }
+ if (notify_clients)
+ drm_client_dev_resume(dev, false);
return 0;
}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index e671aa241720..ac678de7fe5e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -355,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
-/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
-static void drm_sched_entity_clear_dep(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_entity *entity =
- container_of(cb, struct drm_sched_entity, cb);
-
- entity->dependency = NULL;
- dma_fence_put(f);
-}
-
/*
* drm_sched_entity_wakeup - callback to clear the entity's dependency and
* wake up the scheduler
@@ -376,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
- drm_sched_entity_clear_dep(f, cb);
+ entity->dependency = NULL;
+ dma_fence_put(f);
drm_sched_wakeup(entity->rq->sched);
}
@@ -429,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(entity->dependency);
entity->dependency = fence;
- if (!dma_fence_add_callback(fence, &entity->cb,
- drm_sched_entity_clear_dep))
- return true;
-
- /* Ignore it when it is already scheduled */
- dma_fence_put(fence);
- return false;
}
if (!dma_fence_add_callback(entity->dependency, &entity->cb,
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
index 4c5e18590a5c..8ec6ed82b3d9 100644
--- a/drivers/gpu/drm/v3d/v3d_gemfs.c
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -3,14 +3,21 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/fs_context.h>
#include "v3d_drv.h"
+static int add_param(struct fs_context *fc, const char *key, const char *val)
+{
+ return vfs_parse_fs_string(fc, key, val, strlen(val));
+}
+
void v3d_gemfs_init(struct v3d_dev *v3d)
{
- char huge_opt[] = "huge=within_size";
struct file_system_type *type;
+ struct fs_context *fc;
struct vfsmount *gemfs;
+ int ret;
/*
* By creating our own shmemfs mountpoint, we can pass in
@@ -28,8 +35,16 @@ void v3d_gemfs_init(struct v3d_dev *v3d)
if (!type)
goto err;
- gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt);
- if (IS_ERR(gemfs))
+ fc = fs_context_for_mount(type, SB_KERNMOUNT);
+ if (IS_ERR(fc))
+ goto err;
+ ret = add_param(fc, "source", "tmpfs");
+ if (!ret)
+ ret = add_param(fc, "huge", "within_size");
+ if (!ret)
+ gemfs = fc_mount_longterm(fc);
+ put_fs_context(fc);
+ if (ret)
goto err;
v3d->gemfs = gemfs;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 1118a0250279..ce49282198cb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -204,15 +204,16 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct dma_buf_attachment *attach = obj->import_attach;
if (drm_gem_is_imported(obj)) {
- struct dma_buf *dmabuf = obj->dma_buf;
+ struct dma_buf *dmabuf = attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
virtgpu_dma_buf_unmap(bo);
dma_resv_unlock(dmabuf->resv);
- dma_buf_detach(dmabuf, obj->import_attach);
+ dma_buf_detach(dmabuf, attach);
dma_buf_put(dmabuf);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index c55382167c1b..e417921af584 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -85,10 +85,10 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
int ret;
if (drm_gem_is_imported(obj)) {
- ret = dma_buf_vmap(obj->dma_buf, map);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
if (drm_WARN_ON(obj->dev, map->is_iomem)) {
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
return -EIO;
}
}
@@ -102,7 +102,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
if (drm_gem_is_imported(obj))
- dma_buf_vunmap(obj->dma_buf, map);
+ dma_buf_vunmap(obj->import_attach->dmabuf, map);
else
drm_gem_ttm_vunmap(obj, map);
}
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 6c4cb9576fb6..e3517ce2e18c 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ xe_mocs_init_early(gt);
+
return 0;
}
@@ -630,17 +632,15 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = xe_gt_pagefault_init(gt);
+ err = xe_gt_sysfs_init(gt);
if (err)
return err;
- xe_mocs_init_early(gt);
-
- err = xe_gt_sysfs_init(gt);
+ err = gt_fw_domain_init(gt);
if (err)
return err;
- err = gt_fw_domain_init(gt);
+ err = xe_gt_pagefault_init(gt);
if (err)
return err;
@@ -839,6 +839,9 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
}
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
+ xe_gt_sriov_pf_stop_prepare(gt);
+
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
xe_gt_pagefault_reset(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 187fa6490eaf..6357325f3939 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -24,7 +24,7 @@
extern struct fault_attr gt_reset_failure;
static inline bool xe_fault_inject_gt_reset(void)
{
- return should_fail(&gt_reset_failure, 1);
+ return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&gt_reset_failure, 1);
}
struct xe_gt *xe_gt_alloc(struct xe_tile *tile);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index c08efca6420e..35489fa81825 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -172,6 +172,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid)
pf_clear_vf_scratch_regs(gt, vfid);
}
+static void pf_cancel_restart(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ if (cancel_work_sync(&gt->sriov.pf.workers.restart))
+ xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
+}
+
+/**
+ * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support.
+ * @gt: the &xe_gt
+ *
+ * This function can only be called on the PF.
+ */
+void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+{
+ pf_cancel_restart(gt);
+}
+
static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index f474509411c0..e2b2ff8132dc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -13,6 +13,7 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
+void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
#else
static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
@@ -29,6 +30,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
+static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt)
+{
+}
+
static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 2420a548cacc..53a44702c04a 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -2364,6 +2364,21 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
return err;
}
+static int pf_push_self_config(struct xe_gt *gt)
+{
+ int err;
+
+ err = pf_push_full_vf_config(gt, PFID);
+ if (err) {
+ xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n",
+ ERR_PTR(err));
+ return err;
+ }
+
+ xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n");
+ return 0;
+}
+
static void fini_config(void *arg)
{
struct xe_gt *gt = arg;
@@ -2387,9 +2402,17 @@ static void fini_config(void *arg)
int xe_gt_sriov_pf_config_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ int err;
xe_gt_assert(gt, IS_SRIOV_PF(xe));
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (err)
+ return err;
+
return devm_add_action_or_reset(xe->drm.dev, fini_config, gt);
}
@@ -2407,6 +2430,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt)
unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt));
unsigned int fail = 0, skip = 0;
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ pf_push_self_config(gt);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
for (n = 1; n <= total_vfs; n++) {
if (xe_gt_sriov_pf_config_is_empty(gt, n))
skip++;
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 66bc02302c55..07a5161c7d5b 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -1817,8 +1817,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
xe_bo_assert_held(bo);
/* Use bounce buffer for small access and unaligned access */
- if (len & XE_CACHELINE_MASK ||
- ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) {
+ if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
+ !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
int buf_offset = 0;
/*
@@ -1848,7 +1848,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
err = xe_migrate_access_memory(m, bo,
offset & ~XE_CACHELINE_MASK,
(void *)ptr,
- sizeof(bounce), 0);
+ sizeof(bounce), write);
if (err)
return err;
} else {
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index bc1689db4cd7..7b50c7c1ee21 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 *dw, int i)
+static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
{
dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
- MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX;
- dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR;
- dw[i++] = 0;
+ MI_FLUSH_IMM_DW;
+
+ dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
+ dw[i++] = val;
return i;
}
@@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
static void emit_migration_job_gen12(struct xe_sched_job *job,
struct xe_lrc *lrc, u32 seqno)
{
+ u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
i = emit_copy_timestamp(lrc, dw, i);
- i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
- seqno, dw, i);
+ i = emit_store_imm_ggtt(saddr, seqno, dw, i);
dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
- if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) {
- /* XXX: Do we need this? Leaving for now. */
- dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(dw, i);
- dw[i++] = preparser_disable(false);
- }
+ dw[i++] = preparser_disable(true);
+ i = emit_flush_invalidate(saddr, seqno, dw, i);
+ dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index b348d0464314..768f4df623c4 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1883,9 +1883,12 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
/*
* 7 extra bytes are necessary to achieve proper functionality
* of implement() working on 8 byte chunks
+ * 1 extra byte for the report ID if it is null (not used) so
+ * we can reserve that extra byte in the first position of the buffer
+ * when sending it to .raw_request()
*/
- u32 len = hid_report_len(report) + 7;
+ u32 len = hid_report_len(report) + 7 + (report->id == 0);
return kzalloc(len, flags);
}
@@ -1973,7 +1976,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
int __hid_request(struct hid_device *hid, struct hid_report *report,
enum hid_class_request reqtype)
{
- char *buf;
+ char *buf, *data_buf;
int ret;
u32 len;
@@ -1981,13 +1984,19 @@ int __hid_request(struct hid_device *hid, struct hid_report *report,
if (!buf)
return -ENOMEM;
+ data_buf = buf;
len = hid_report_len(report);
+ if (report->id == 0) {
+ /* reserve the first byte for the report ID */
+ data_buf++;
+ len++;
+ }
+
if (reqtype == HID_REQ_SET_REPORT)
- hid_output_report(report, buf);
+ hid_output_report(report, data_buf);
- ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
- report->type, reqtype);
+ ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype);
if (ret < 0) {
dbg_hid("unable to complete request: %d\n", ret);
goto out;
@@ -2294,6 +2303,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
case BUS_I2C:
bus = "I2C";
break;
+ case BUS_SDW:
+ bus = "SOUNDWIRE";
+ break;
case BUS_VIRTUAL:
bus = "VIRTUAL";
break;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index c6b6b1029540..4424c0512bae 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3299,7 +3299,7 @@ static const char *keys[KEY_MAX + 1] = {
[BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap",
[BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_TOOL_QUADTAP] = "ToolQuadrupleTap",
[BTN_GEAR_DOWN] = "BtnGearDown", [BTN_GEAR_UP] = "BtnGearUp",
- [BTN_WHEEL] = "BtnWheel", [KEY_OK] = "Ok",
+ [KEY_OK] = "Ok",
[KEY_SELECT] = "Select", [KEY_GOTO] = "Goto",
[KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2",
[KEY_OPTION] = "Option", [KEY_INFO] = "Info",
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 1cd188b73b74..57623ca7f350 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -9,7 +9,7 @@ config HYPERV
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
- select SYSFB if !HYPERV_VTL_MODE
+ select SYSFB if EFI && !HYPERV_VTL_MODE
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 35f26fa1ffe7..7c7c66e0dc3f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -18,6 +18,7 @@
#include <linux/uio.h>
#include <linux/interrupt.h>
#include <linux/set_memory.h>
+#include <linux/export.h>
#include <asm/page.h>
#include <asm/mshyperv.h>
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6e084c207414..65dd299e2944 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
#include <linux/sched/isolation.h>
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index be490c598785..1fe3573ae52a 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -519,7 +519,10 @@ void vmbus_set_event(struct vmbus_channel *channel)
else
WARN_ON_ONCE(1);
} else {
- hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+ u64 control = HVCALL_SIGNAL_EVENT;
+
+ control |= hv_nested ? HV_HYPERCALL_NESTED : 0;
+ hv_do_fast_hypercall8(control, channel->sig_event);
}
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 308c8f279df8..b14c5f9e0ef2 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -85,8 +85,10 @@ int hv_post_message(union hv_connection_id connection_id,
else
status = HV_STATUS_INVALID_PARAMETER;
} else {
- status = hv_do_hypercall(HVCALL_POST_MESSAGE,
- aligned_msg, NULL);
+ u64 control = HVCALL_POST_MESSAGE;
+
+ control |= hv_nested ? HV_HYPERCALL_NESTED : 0;
+ status = hv_do_hypercall(control, aligned_msg, NULL);
}
local_irq_restore(flags);
diff --git a/drivers/hv/hv_proc.c b/drivers/hv/hv_proc.c
index 7d7ecb6f6137..fbb4eb3901bb 100644
--- a/drivers/hv/hv_proc.c
+++ b/drivers/hv/hv_proc.c
@@ -6,6 +6,7 @@
#include <linux/slab.h>
#include <linux/cpuhotplug.h>
#include <linux/minmax.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
/*
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
index 2575e6d7a71f..6f227a8a5af7 100644
--- a/drivers/hv/mshv_common.c
+++ b/drivers/hv/mshv_common.c
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <asm/mshyperv.h>
#include <linux/resume_user_mode.h>
+#include <linux/export.h>
#include "mshv.h"
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
index 8dd22be2ca0b..48c514da34eb 100644
--- a/drivers/hv/mshv_eventfd.c
+++ b/drivers/hv/mshv_eventfd.c
@@ -377,10 +377,11 @@ static int mshv_irqfd_assign(struct mshv_partition *pt,
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
struct mshv_irqfd *irqfd, *tmp;
unsigned int events;
- struct fd f;
int ret;
int idx;
+ CLASS(fd, f)(args->fd);
+
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
if (!irqfd)
return -ENOMEM;
@@ -390,8 +391,7 @@ static int mshv_irqfd_assign(struct mshv_partition *pt,
INIT_WORK(&irqfd->irqfd_shutdown, mshv_irqfd_shutdown);
seqcount_spinlock_init(&irqfd->irqfd_irqe_sc, &pt->pt_irqfds_lock);
- f = fdget(args->fd);
- if (!fd_file(f)) {
+ if (fd_empty(f)) {
ret = -EBADF;
goto out;
}
@@ -496,12 +496,6 @@ static int mshv_irqfd_assign(struct mshv_partition *pt,
mshv_assert_irq_slow(irqfd);
srcu_read_unlock(&pt->pt_irq_srcu, idx);
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the POLLHUP
- */
- fdput(f);
-
return 0;
fail:
@@ -514,8 +508,6 @@ fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
- fdput(f);
-
out:
kfree(irqfd);
return ret;
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
index a222a16107f6..c9c274f29c3c 100644
--- a/drivers/hv/mshv_root_hv_call.c
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
#include "mshv_root.h"
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3c9b02471760..23ce1fb70de1 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/io.h>
+#include <linux/export.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 33b524b4eb5e..2ed5a1e89d69 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2509,7 +2509,7 @@ static int vmbus_acpi_add(struct platform_device *pdev)
return 0;
}
#endif
-
+#ifndef HYPERVISOR_CALLBACK_VECTOR
static int vmbus_set_irq(struct platform_device *pdev)
{
struct irq_data *data;
@@ -2534,6 +2534,7 @@ static int vmbus_set_irq(struct platform_device *pdev)
return 0;
}
+#endif
static int vmbus_device_add(struct platform_device *pdev)
{
@@ -2549,11 +2550,11 @@ static int vmbus_device_add(struct platform_device *pdev)
if (ret)
return ret;
- if (!__is_defined(HYPERVISOR_CALLBACK_VECTOR))
- ret = vmbus_set_irq(pdev);
+#ifndef HYPERVISOR_CALLBACK_VECTOR
+ ret = vmbus_set_irq(pdev);
if (ret)
return ret;
-
+#endif
for_each_of_range(&parser, &range) {
struct resource *res;
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index e1a7f7aa7f80..b7b911f8359c 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -89,6 +89,7 @@ struct ccp_device {
struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */
u8 *cmd_buffer;
u8 *buffer;
+ int buffer_recv_size; /* number of received bytes in buffer */
int target[6];
DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
DECLARE_BITMAP(fan_cnct, NUM_FANS);
@@ -146,6 +147,9 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2,
if (!t)
return -ETIMEDOUT;
+ if (ccp->buffer_recv_size != IN_BUFFER_SIZE)
+ return -EPROTO;
+
return ccp_get_errno(ccp);
}
@@ -157,6 +161,7 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8
spin_lock(&ccp->wait_input_report_lock);
if (!completion_done(&ccp->wait_input_report)) {
memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size));
+ ccp->buffer_recv_size = size;
complete_all(&ccp->wait_input_report);
}
spin_unlock(&ccp->wait_input_report_lock);
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index a4a41742786b..9a5fd16a4ec2 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -97,7 +97,7 @@
* Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
* (Specific for SQ52206)
* Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
- * Energy (mJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain
+ * Energy (uJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain * 1000
*/
#define INA238_CALIBRATION_VALUE 16384
#define INA238_FIXED_SHUNT 20000
@@ -500,9 +500,9 @@ static ssize_t energy1_input_show(struct device *dev,
if (ret)
return ret;
- /* result in mJ */
- energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ /* result in uJ */
+ energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 *
+ data->config->power_calculate_factor, 4 * data->rshunt);
return sysfs_emit(buf, "%llu\n", energy);
}
diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c
index 2bc8cccb01fd..52d4000902d5 100644
--- a/drivers/hwmon/pmbus/ucd9000.c
+++ b/drivers/hwmon/pmbus/ucd9000.c
@@ -226,15 +226,15 @@ static int ucd9000_gpio_set(struct gpio_chip *gc, unsigned int offset,
}
if (value) {
- if (ret & UCD9000_GPIO_CONFIG_STATUS)
+ if (ret & UCD9000_GPIO_CONFIG_OUT_VALUE)
return 0;
- ret |= UCD9000_GPIO_CONFIG_STATUS;
+ ret |= UCD9000_GPIO_CONFIG_OUT_VALUE;
} else {
- if (!(ret & UCD9000_GPIO_CONFIG_STATUS))
+ if (!(ret & UCD9000_GPIO_CONFIG_OUT_VALUE))
return 0;
- ret &= ~UCD9000_GPIO_CONFIG_STATUS;
+ ret &= ~UCD9000_GPIO_CONFIG_OUT_VALUE;
}
ret |= UCD9000_GPIO_CONFIG_ENABLE;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 8b01df3cc8e9..5fcc9f6c33e5 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1472,7 +1472,9 @@ omap_i2c_probe(struct platform_device *pdev)
}
/* reset ASAP, clearing any IRQs */
- omap_i2c_init(omap);
+ r = omap_i2c_init(omap);
+ if (r)
+ goto err_mux_state_deselect;
if (omap->rev < OMAP_I2C_OMAP1_REV_2)
r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr,
@@ -1515,12 +1517,13 @@ omap_i2c_probe(struct platform_device *pdev)
err_unuse_clocks:
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+err_mux_state_deselect:
if (omap->mux_state)
mux_state_deselect(omap->mux_state);
err_put_pm:
- pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_put_sync(omap->dev);
err_disable_pm:
+ pm_runtime_dont_use_autosuspend(omap->dev);
pm_runtime_disable(&pdev->dev);
return r;
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index 6059f585843e..fc348924d522 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -452,8 +452,10 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len)
if (!(status & I2C_STATUS_BUS_ACTIVE))
break;
- if (time_after(jiffies, timeout))
+ if (time_after(jiffies, timeout)) {
ret = -ETIMEDOUT;
+ break;
+ }
usleep_range(len, len * 2);
}
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index 157c64e27d0b..f84ec056e36d 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -102,7 +102,6 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
void *dma_async_param)
{
struct dma_async_tx_descriptor *txdesc;
- struct device *chan_dev;
int ret;
if (rd_wr) {
@@ -116,11 +115,10 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
}
dma->dma_len = len;
- chan_dev = dma->chan_using->device->dev;
- dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len,
+ dma->dma_buf = dma_map_single(dev, buf, dma->dma_len,
dma->dma_data_dir);
- if (dma_mapping_error(chan_dev, dma->dma_buf)) {
+ if (dma_mapping_error(dev, dma->dma_buf)) {
dev_err(dev, "DMA mapping failed\n");
return -EINVAL;
}
@@ -150,7 +148,7 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma,
return 0;
err:
- dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len,
+ dma_unmap_single(dev, dma->dma_buf, dma->dma_len,
dma->dma_data_dir);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index e4aaeb2262d0..73a7b8894c0d 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -739,12 +739,13 @@ static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev)
static void stm32f7_i2c_dma_callback(void *arg)
{
- struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg;
+ struct stm32f7_i2c_dev *i2c_dev = arg;
struct stm32_i2c_dma *dma = i2c_dev->dma;
- struct device *dev = dma->chan_using->device->dev;
stm32f7_i2c_disable_dma_req(i2c_dev);
- dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir);
+ dmaengine_terminate_async(dma->chan_using);
+ dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len,
+ dma->dma_data_dir);
complete(&dma->dma_complete);
}
@@ -1510,7 +1511,6 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev,
u16 addr = f7_msg->addr;
void __iomem *base = i2c_dev->base;
struct device *dev = i2c_dev->dev;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
/* Bus error */
if (status & STM32F7_I2C_ISR_BERR) {
@@ -1551,10 +1551,8 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev,
}
/* Disable dma */
- if (i2c_dev->use_dma) {
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
- }
+ if (i2c_dev->use_dma)
+ stm32f7_i2c_dma_callback(i2c_dev);
i2c_dev->master_mode = false;
complete(&i2c_dev->complete);
@@ -1600,7 +1598,6 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
{
struct stm32f7_i2c_dev *i2c_dev = data;
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
- struct stm32_i2c_dma *dma = i2c_dev->dma;
void __iomem *base = i2c_dev->base;
u32 status, mask;
int ret;
@@ -1619,10 +1616,8 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
__func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
- if (i2c_dev->use_dma) {
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
- }
+ if (i2c_dev->use_dma)
+ stm32f7_i2c_dma_callback(i2c_dev);
f7_msg->result = -ENXIO;
}
@@ -1640,8 +1635,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ);
if (!ret) {
dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
- stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_async(dma->chan_using);
+ stm32f7_i2c_dma_callback(i2c_dev);
f7_msg->result = -ETIMEDOUT;
}
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 0862b98007f5..6316360475e7 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -607,7 +607,6 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
- acpi_handle handle = ACPI_HANDLE(i2c_dev->dev);
struct i2c_timings *t = &i2c_dev->timings;
int err;
@@ -619,11 +618,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
* emit a noisy warning on error, which won't stay unnoticed and
* won't hose machine entirely.
*/
- if (handle)
- err = acpi_evaluate_object(handle, "_RST", NULL, NULL);
- else
- err = reset_control_reset(i2c_dev->rst);
-
+ err = device_reset(i2c_dev->dev);
WARN_ON_ONCE(err);
if (IS_DVC(i2c_dev))
@@ -1666,19 +1661,6 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
i2c_dev->is_vi = true;
}
-static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev)
-{
- if (ACPI_HANDLE(i2c_dev->dev))
- return 0;
-
- i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
- if (IS_ERR(i2c_dev->rst))
- return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
- "failed to get reset control\n");
-
- return 0;
-}
-
static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
{
int err;
@@ -1788,10 +1770,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
tegra_i2c_parse_dt(i2c_dev);
- err = tegra_i2c_init_reset(i2c_dev);
- if (err)
- return err;
-
err = tegra_i2c_init_clocks(i2c_dev);
if (err)
return err;
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 9b05ff53d3d7..af1381949f50 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -116,15 +116,16 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
for (i = 0; i < num; i++) {
struct virtio_i2c_req *req = &reqs[i];
- wait_for_completion(&req->completion);
-
- if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK)
- failed = true;
+ if (!failed) {
+ if (wait_for_completion_interruptible(&req->completion))
+ failed = true;
+ else if (req->in_hdr.status != VIRTIO_I2C_MSG_OK)
+ failed = true;
+ else
+ j++;
+ }
i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed);
-
- if (!failed)
- j++;
}
return j;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 2ad2b1838f0f..38eabf1173da 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -573,7 +573,7 @@ static int i2c_device_probe(struct device *dev)
goto err_clear_wakeup_irq;
do_power_on = !i2c_acpi_waive_d0_probe(dev);
- status = dev_pm_domain_attach(&client->dev, do_power_on);
+ status = dev_pm_domain_attach(&client->dev, do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0);
if (status)
goto err_clear_wakeup_irq;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 12598feaa693..b10a30960e1e 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -877,6 +877,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ synchronize_irq(data->irq);
+
ret = __fxls8962af_fifo_set_mode(data, false);
if (data->enable_event)
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 99cb661fabb2..a7961c610ed2 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -1353,6 +1353,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
union acpi_object *ont;
union acpi_object *elements;
acpi_status status;
+ struct device *parent = indio_dev->dev.parent;
int ret = -EINVAL;
unsigned int val;
int i, j;
@@ -1371,7 +1372,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
};
- adev = ACPI_COMPANION(indio_dev->dev.parent);
+ adev = ACPI_COMPANION(parent);
if (!adev)
return -ENXIO;
@@ -1380,8 +1381,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
if (status == AE_NOT_FOUND) {
return -ENXIO;
} else if (ACPI_FAILURE(status)) {
- dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
- status);
+ dev_warn(parent, "failed to execute _ONT: %d\n", status);
return status;
}
@@ -1457,12 +1457,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
}
ret = 0;
- dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n");
+ dev_info(parent, "computed mount matrix from ACPI\n");
out:
kfree(buffer.pointer);
if (ret)
- dev_dbg(&indio_dev->dev,
+ dev_dbg(parent,
"failed to apply ACPI orientation data: %d\n", ret);
return ret;
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index d96bd12dfea6..cabf5511d116 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -1953,8 +1953,9 @@ static int ad7380_probe(struct spi_device *spi)
if (st->chip_info->has_hardware_gain) {
device_for_each_child_node_scoped(dev, node) {
- unsigned int channel, gain;
+ unsigned int channel;
int gain_idx;
+ u16 gain;
ret = fwnode_property_read_u32(node, "reg", &channel);
if (ret)
@@ -1966,7 +1967,7 @@ static int ad7380_probe(struct spi_device *spi)
"Invalid channel number %i\n",
channel);
- ret = fwnode_property_read_u32(node, "adi,gain-milli",
+ ret = fwnode_property_read_u16(node, "adi,gain-milli",
&gain);
if (ret && ret != -EINVAL)
return dev_err_probe(dev, ret,
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index edd0c3a35ab7..202561cad401 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -308,7 +308,6 @@ static void ad7949_disable_reg(void *reg)
static int ad7949_spi_probe(struct spi_device *spi)
{
- u32 spi_ctrl_mask = spi->controller->bits_per_word_mask;
struct device *dev = &spi->dev;
const struct ad7949_adc_spec *spec;
struct ad7949_adc_chip *ad7949_adc;
@@ -337,11 +336,11 @@ static int ad7949_spi_probe(struct spi_device *spi)
ad7949_adc->resolution = spec->resolution;
/* Set SPI bits per word */
- if (spi_ctrl_mask & SPI_BPW_MASK(ad7949_adc->resolution)) {
+ if (spi_is_bpw_supported(spi, ad7949_adc->resolution)) {
spi->bits_per_word = ad7949_adc->resolution;
- } else if (spi_ctrl_mask == SPI_BPW_MASK(16)) {
+ } else if (spi_is_bpw_supported(spi, 16)) {
spi->bits_per_word = 16;
- } else if (spi_ctrl_mask == SPI_BPW_MASK(8)) {
+ } else if (spi_is_bpw_supported(spi, 8)) {
spi->bits_per_word = 8;
} else {
dev_err(dev, "unable to find common BPW with spi controller\n");
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 4116c44197b8..2dbaa0b5b3d6 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -445,7 +445,7 @@ static int axi_adc_raw_read(struct iio_backend *back, u32 *val)
static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val)
{
struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- int addr;
+ u32 addr, reg_val;
guard(mutex)(&st->lock);
@@ -455,7 +455,9 @@ static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val)
*/
addr = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) | ADI_AXI_REG_READ_BIT;
axi_adc_raw_write(back, addr);
- axi_adc_raw_read(back, val);
+ axi_adc_raw_read(back, &reg_val);
+
+ *val = FIELD_GET(ADI_AXI_REG_VALUE_MASK, reg_val);
/* Write 0x0 on the bus to get back to ADC mode */
axi_adc_raw_write(back, 0);
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index 71584ffd3632..1b49325ec1ce 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -187,6 +187,7 @@ static struct iio_map axp717_maps[] = {
.consumer_channel = "batt_chrg_i",
.adc_channel_label = "batt_chrg_i",
},
+ { }
};
/*
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index a7e9912fb44a..9dd547e62b6c 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -511,10 +511,10 @@ static const struct iio_event_spec max1363_events[] = {
MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \
MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \
MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \
- MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, ev_spec, num_ev_spec), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, ev_spec, num_ev_spec), \
IIO_CHAN_SOFT_TIMESTAMP(8) \
}
@@ -532,23 +532,23 @@ static const struct iio_chan_spec max1363_channels[] =
/* Applies to max1236, max1237 */
static const enum max1363_modes max1236_mode_list[] = {
_s0, _s1, _s2, _s3,
- s0to1, s0to2, s0to3,
+ s0to1, s0to2, s2to3, s0to3,
d0m1, d2m3, d1m0, d3m2,
d0m1to2m3, d1m0to3m2,
- s2to3,
};
/* Applies to max1238, max1239 */
static const enum max1363_modes max1238_mode_list[] = {
_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11,
s0to1, s0to2, s0to3, s0to4, s0to5, s0to6,
+ s6to7, s6to8, s6to9, s6to10, s6to11,
s0to7, s0to8, s0to9, s0to10, s0to11,
d0m1, d2m3, d4m5, d6m7, d8m9, d10m11,
d1m0, d3m2, d5m4, d7m6, d9m8, d11m10,
- d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11,
- d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10,
- s6to7, s6to8, s6to9, s6to10, s6to11,
- d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10,
+ d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9,
+ d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2,
+ d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8,
+ d7m6to11m10, d1m0to11m10,
};
#define MAX1363_12X_CHANS(bits) { \
@@ -584,16 +584,15 @@ static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12);
static const enum max1363_modes max11607_mode_list[] = {
_s0, _s1, _s2, _s3,
- s0to1, s0to2, s0to3,
- s2to3,
+ s0to1, s0to2, s2to3,
+ s0to3,
d0m1, d2m3, d1m0, d3m2,
d0m1to2m3, d1m0to3m2,
};
static const enum max1363_modes max11608_mode_list[] = {
_s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7,
- s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7,
- s6to7,
+ s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s6to7, s0to7,
d0m1, d2m3, d4m5, d6m7,
d1m0, d3m2, d5m4, d7m6,
d0m1to2m3, d0m1to4m5, d0m1to6m7,
@@ -609,14 +608,14 @@ static const enum max1363_modes max11608_mode_list[] = {
MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \
MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \
MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \
- MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \
- MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \
- MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \
- MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \
- MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \
- MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \
- MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \
- MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \
+ MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \
+ MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \
+ MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \
+ MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \
+ MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \
+ MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \
+ MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \
+ MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \
IIO_CHAN_SOFT_TIMESTAMP(16) \
}
static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8);
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index bd3458965bff..21c04a98b3b6 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -430,10 +430,9 @@ static int stm32_adc_irq_probe(struct platform_device *pdev,
return -ENOMEM;
}
- for (i = 0; i < priv->cfg->num_irqs; i++) {
- irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler);
- irq_set_handler_data(priv->irq[i], priv);
- }
+ for (i = 0; i < priv->cfg->num_irqs; i++)
+ irq_set_chained_handler_and_data(priv->irq[i],
+ stm32_adc_irq_handler, priv);
return 0;
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 8ce1dccfea4f..dac593be5695 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -154,7 +154,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs)
return err;
st_accel_set_fullscale_error:
- dev_err(&indio_dev->dev, "failed to set new fullscale.\n");
+ dev_err(indio_dev->dev.parent, "failed to set new fullscale.\n");
return err;
}
@@ -231,8 +231,7 @@ int st_sensors_power_enable(struct iio_dev *indio_dev)
ARRAY_SIZE(regulator_names),
regulator_names);
if (err)
- return dev_err_probe(&indio_dev->dev, err,
- "unable to enable supplies\n");
+ return dev_err_probe(parent, err, "unable to enable supplies\n");
return 0;
}
@@ -241,13 +240,14 @@ EXPORT_SYMBOL_NS(st_sensors_power_enable, "IIO_ST_SENSORS");
static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
+ struct device *parent = indio_dev->dev.parent;
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Sensor does not support interrupts */
if (!sdata->sensor_settings->drdy_irq.int1.addr &&
!sdata->sensor_settings->drdy_irq.int2.addr) {
if (pdata->drdy_int_pin)
- dev_info(&indio_dev->dev,
+ dev_info(parent,
"DRDY on pin INT%d specified, but sensor does not support interrupts\n",
pdata->drdy_int_pin);
return 0;
@@ -256,29 +256,27 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
switch (pdata->drdy_int_pin) {
case 1:
if (!sdata->sensor_settings->drdy_irq.int1.mask) {
- dev_err(&indio_dev->dev,
- "DRDY on INT1 not available.\n");
+ dev_err(parent, "DRDY on INT1 not available.\n");
return -EINVAL;
}
sdata->drdy_int_pin = 1;
break;
case 2:
if (!sdata->sensor_settings->drdy_irq.int2.mask) {
- dev_err(&indio_dev->dev,
- "DRDY on INT2 not available.\n");
+ dev_err(parent, "DRDY on INT2 not available.\n");
return -EINVAL;
}
sdata->drdy_int_pin = 2;
break;
default:
- dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n");
+ dev_err(parent, "DRDY on pdata not valid.\n");
return -EINVAL;
}
if (pdata->open_drain) {
if (!sdata->sensor_settings->drdy_irq.int1.addr_od &&
!sdata->sensor_settings->drdy_irq.int2.addr_od)
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"open drain requested but unsupported.\n");
else
sdata->int_pin_open_drain = true;
@@ -336,6 +334,7 @@ EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, "IIO_ST_SENSORS");
int st_sensors_init_sensor(struct iio_dev *indio_dev,
struct st_sensors_platform_data *pdata)
{
+ struct device *parent = indio_dev->dev.parent;
struct st_sensor_data *sdata = iio_priv(indio_dev);
struct st_sensors_platform_data *of_pdata;
int err = 0;
@@ -343,7 +342,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
mutex_init(&sdata->odr_lock);
/* If OF/DT pdata exists, it will take precedence of anything else */
- of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
+ of_pdata = st_sensors_dev_probe(parent, pdata);
if (IS_ERR(of_pdata))
return PTR_ERR(of_pdata);
if (of_pdata)
@@ -370,7 +369,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
if (err < 0)
return err;
} else
- dev_info(&indio_dev->dev, "Full-scale not possible\n");
+ dev_info(parent, "Full-scale not possible\n");
err = st_sensors_set_odr(indio_dev, sdata->odr);
if (err < 0)
@@ -405,7 +404,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
mask = sdata->sensor_settings->drdy_irq.int2.mask_od;
}
- dev_info(&indio_dev->dev,
+ dev_info(parent,
"set interrupt line to open drain mode on pin %d\n",
sdata->drdy_int_pin);
err = st_sensors_write_data_with_mask(indio_dev, addr,
@@ -593,21 +592,20 @@ EXPORT_SYMBOL_NS(st_sensors_get_settings_index, "IIO_ST_SENSORS");
int st_sensors_verify_id(struct iio_dev *indio_dev)
{
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ struct device *parent = indio_dev->dev.parent;
int wai, err;
if (sdata->sensor_settings->wai_addr) {
err = regmap_read(sdata->regmap,
sdata->sensor_settings->wai_addr, &wai);
if (err < 0) {
- dev_err(&indio_dev->dev,
- "failed to read Who-Am-I register.\n");
- return err;
+ return dev_err_probe(parent, err,
+ "failed to read Who-Am-I register.\n");
}
if (sdata->sensor_settings->wai != wai) {
- dev_warn(&indio_dev->dev,
- "%s: WhoAmI mismatch (0x%x).\n",
- indio_dev->name, wai);
+ dev_warn(parent, "%s: WhoAmI mismatch (0x%x).\n",
+ indio_dev->name, wai);
}
}
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 9d4bf822a15d..8a8ab688d798 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -127,7 +127,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger",
indio_dev->name);
if (sdata->trig == NULL) {
- dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n");
+ dev_err(parent, "failed to allocate iio trigger.\n");
return -ENOMEM;
}
@@ -143,7 +143,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
case IRQF_TRIGGER_FALLING:
case IRQF_TRIGGER_LOW:
if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n");
if (irq_trig == IRQF_TRIGGER_FALLING)
irq_trig = IRQF_TRIGGER_RISING;
@@ -156,21 +156,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->sensor_settings->drdy_irq.mask_ihl, 1);
if (err < 0)
return err;
- dev_info(&indio_dev->dev,
+ dev_info(parent,
"interrupts on the falling edge or active low level\n");
}
break;
case IRQF_TRIGGER_RISING:
- dev_info(&indio_dev->dev,
- "interrupts on the rising edge\n");
+ dev_info(parent, "interrupts on the rising edge\n");
break;
case IRQF_TRIGGER_HIGH:
- dev_info(&indio_dev->dev,
- "interrupts active high level\n");
+ dev_info(parent, "interrupts active high level\n");
break;
default:
/* This is the most preferred mode, if possible */
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
@@ -179,7 +177,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
if (irq_trig == IRQF_TRIGGER_FALLING ||
irq_trig == IRQF_TRIGGER_RISING) {
if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
- dev_err(&indio_dev->dev,
+ dev_err(parent,
"edge IRQ not supported w/o stat register.\n");
return -EOPNOTSUPP;
}
@@ -214,13 +212,13 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
sdata->trig->name,
sdata->trig);
if (err) {
- dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n");
+ dev_err(parent, "failed to request trigger IRQ.\n");
return err;
}
err = devm_iio_trigger_register(parent, sdata->trig);
if (err < 0) {
- dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
+ dev_err(parent, "failed to register iio trigger.\n");
return err;
}
indio_dev->trig = iio_trigger_get(sdata->trig);
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
index f9752a571aa5..6134613777b8 100644
--- a/drivers/iio/dac/ad3530r.c
+++ b/drivers/iio/dac/ad3530r.c
@@ -166,7 +166,9 @@ static ssize_t ad3530r_set_dac_powerdown(struct iio_dev *indio_dev,
AD3530R_OUTPUT_OPERATING_MODE_0 :
AD3530R_OUTPUT_OPERATING_MODE_1;
pdmode = powerdown ? st->chan[chan->channel].powerdown_mode : 0;
- mask = AD3530R_OP_MODE_CHAN_MSK(chan->channel);
+ mask = chan->channel < AD3531R_MAX_CHANNELS ?
+ AD3530R_OP_MODE_CHAN_MSK(chan->channel) :
+ AD3530R_OP_MODE_CHAN_MSK(chan->channel - 4);
val = field_prep(mask, pdmode);
ret = regmap_update_bits(st->regmap, reg, mask, val);
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index c1eb9ef9db08..266e1b29bf91 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -155,11 +155,14 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file,
ssize_t rc;
int ret;
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
if (rc < 0)
return rc;
- buf[count] = '\0';
+ buf[rc] = '\0';
ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 178e99b111de..5ffda104d4b2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -411,12 +411,15 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
char buf[80];
int ret;
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
count);
if (ret < 0)
return ret;
- buf[count] = '\0';
+ buf[ret] = '\0';
ret = sscanf(buf, "%i %i", &reg, &val);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 5d9b7007a730..1d8c579b5433 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -172,12 +172,12 @@ static const struct xpad_device {
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
{ 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
{ 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
+ { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 },
{ 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
{ 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
{ 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
{ 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
- { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
{ 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
{ 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 721ab69e84ac..7c4f309a4cb6 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -37,6 +37,17 @@ config KEYBOARD_ADP5520
To compile this driver as a module, choose M here: the module will
be called adp5520-keys.
+config KEYBOARD_ADP5585
+ tristate "ADP558x keypad support"
+ depends on MFD_ADP5585
+ select INPUT_MATRIXKMAP
+ help
+ This option enables support for the KEYPAD function found in the Analog
+ Devices ADP5585 and similar devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adp5585-keys.
+
config KEYBOARD_ADP5588
tristate "ADP5588/87 I2C QWERTY Keypad and IO Expander"
depends on I2C
@@ -50,16 +61,6 @@ config KEYBOARD_ADP5588
To compile this driver as a module, choose M here: the
module will be called adp5588-keys.
-config KEYBOARD_ADP5589
- tristate "ADP5585/ADP5589 I2C QWERTY Keypad and IO Expander"
- depends on I2C
- help
- Say Y here if you want to use a ADP5585/ADP5589 attached to your
- system I2C bus.
-
- To compile this driver as a module, choose M here: the
- module will be called adp5589-keys.
-
config KEYBOARD_AMIGA
tristate "Amiga keyboard"
depends on AMIGA
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 1e0721c30709..8bc20ab2b103 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -7,8 +7,8 @@
obj-$(CONFIG_KEYBOARD_ADC) += adc-keys.o
obj-$(CONFIG_KEYBOARD_ADP5520) += adp5520-keys.o
+obj-$(CONFIG_KEYBOARD_ADP5585) += adp5585-keys.o
obj-$(CONFIG_KEYBOARD_ADP5588) += adp5588-keys.o
-obj-$(CONFIG_KEYBOARD_ADP5589) += adp5589-keys.o
obj-$(CONFIG_KEYBOARD_AMIGA) += amikbd.o
obj-$(CONFIG_KEYBOARD_APPLESPI) += applespi.o
obj-$(CONFIG_KEYBOARD_ATARI) += atakbd.o
diff --git a/drivers/input/keyboard/adp5585-keys.c b/drivers/input/keyboard/adp5585-keys.c
new file mode 100644
index 000000000000..4208229e1356
--- /dev/null
+++ b/drivers/input/keyboard/adp5585-keys.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices ADP5585 Keys driver
+ *
+ * Copyright (C) 2025 Analog Devices, Inc.
+ */
+
+#include <linux/bitmap.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/find.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/mfd/adp5585.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+/* As needed for the matrix parsing code */
+#define ADP5589_MAX_KEYMAPSIZE 123
+
+struct adp5585_kpad_chip {
+ u8 key_ev_min;
+ u8 key_ev_max;
+ u8 max_rows;
+ u8 max_cols;
+};
+
+struct adp5585_kpad {
+ const struct adp5585_kpad_chip *info;
+ struct notifier_block nb;
+ struct input_dev *input;
+ unsigned short keycode[ADP5589_MAX_KEYMAPSIZE];
+ struct device *dev;
+ unsigned long keypad;
+ int row_shift;
+};
+
+static int adp5585_keys_validate_events(const struct adp5585_kpad *kpad,
+ const u32 *events, u32 n_events)
+{
+ unsigned int ev;
+ u32 row, col;
+
+ for (ev = 0; ev < n_events; ev++) {
+ if (events[ev] < kpad->info->key_ev_min ||
+ events[ev] > kpad->info->key_ev_max)
+ continue;
+
+ /*
+ * if the event is to be generated by the keymap, we need to make
+ * sure that the pins are part of it!
+ */
+ row = (events[ev] - 1) / kpad->info->max_cols;
+ col = (events[ev] - 1) % kpad->info->max_cols;
+
+ if (test_bit(row, &kpad->keypad) &&
+ test_bit(col + kpad->info->max_rows, &kpad->keypad))
+ continue;
+
+ return dev_err_probe(kpad->dev, -EINVAL,
+ "Invalid unlock/reset event(%u) not used in the keypad\n",
+ events[ev]);
+ }
+
+ return 0;
+}
+
+static int adp5585_keys_check_special_events(const struct adp5585_dev *adp5585,
+ const struct adp5585_kpad *kpad)
+{
+ int error;
+
+ error = adp5585_keys_validate_events(kpad, adp5585->unlock_keys,
+ adp5585->nkeys_unlock);
+ if (error)
+ return error;
+
+ error = adp5585_keys_validate_events(kpad, adp5585->reset1_keys,
+ adp5585->nkeys_reset1);
+ if (error)
+ return error;
+
+ return adp5585_keys_validate_events(kpad, adp5585->reset2_keys,
+ adp5585->nkeys_reset2);
+}
+
+static void adp5585_keys_pins_free(void *data)
+{
+ struct adp5585_kpad *kpad = data;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(kpad->dev->parent);
+ unsigned int pin;
+
+ for_each_set_bit(pin, &kpad->keypad, adp5585->n_pins)
+ clear_bit(pin, adp5585->pin_usage);
+}
+
+static int adp5585_keys_parse_fw(const struct adp5585_dev *adp5585,
+ struct adp5585_kpad *kpad)
+{
+ struct device *dev = kpad->dev;
+ u32 cols = 0, rows = 0, pin;
+ int error, n_pins;
+
+ /*
+ * We do not check for errors (or no value) since the input device is
+ * only added if this property is present in the first place.
+ */
+ n_pins = device_property_count_u32(dev, "adi,keypad-pins");
+ if (n_pins > adp5585->n_pins)
+ return dev_err_probe(dev, -EINVAL,
+ "Too many keypad pins (%d) defined (max=%d)\n",
+ n_pins, adp5585->n_pins);
+
+ unsigned int *keypad_pins __free(kfree) = kcalloc(n_pins, sizeof(*keypad_pins),
+ GFP_KERNEL);
+ if (!keypad_pins)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(dev, "adi,keypad-pins",
+ keypad_pins, n_pins);
+ if (error)
+ return error;
+
+ /*
+ * We can add the action here since it makes the code easier and nothing
+ * "bad" will happen out of it. Worst case, it will be a no-op and no
+ * bit will set.
+ */
+ error = devm_add_action_or_reset(dev, adp5585_keys_pins_free, kpad);
+ if (error)
+ return error;
+
+ for (pin = 0; pin < n_pins; pin++) {
+ if (keypad_pins[pin] >= adp5585->n_pins)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid keypad pin(%u) defined\n",
+ keypad_pins[pin]);
+
+ if (test_and_set_bit(keypad_pins[pin], adp5585->pin_usage))
+ return dev_err_probe(dev, -EBUSY,
+ "Keypad pin(%u) already used\n",
+ keypad_pins[pin]);
+
+ __set_bit(keypad_pins[pin], &kpad->keypad);
+ }
+
+ /*
+ * Note that given that we get a mask (and the HW allows it), we
+ * can have holes in our keypad (eg: row0, row1 and row7 enabled).
+ * However, for the matrix parsing functions we need to pass the
+ * number of rows/cols as the maximum row/col used plus 1. This
+ * pretty much means we will also have holes in our SW keypad.
+ */
+
+ rows = find_last_bit(&kpad->keypad, kpad->info->max_rows) + 1;
+ if (rows == kpad->info->max_rows + 1)
+ return dev_err_probe(dev, -EINVAL,
+ "Now rows defined in the keypad!\n");
+
+ cols = find_last_bit(&kpad->keypad, kpad->info->max_cols + kpad->info->max_rows);
+ if (cols < kpad->info->max_rows)
+ return dev_err_probe(dev, -EINVAL,
+ "No columns defined in the keypad!\n");
+
+ cols = cols + 1 - kpad->info->max_rows;
+
+ error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
+ kpad->keycode, kpad->input);
+ if (error)
+ return error;
+
+ kpad->row_shift = get_count_order(cols);
+
+ if (device_property_read_bool(kpad->dev, "autorepeat"))
+ __set_bit(EV_REP, kpad->input->evbit);
+
+ error = adp5585_keys_check_special_events(adp5585, kpad);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int adp5585_keys_setup(const struct adp5585_dev *adp5585,
+ struct adp5585_kpad *kpad)
+{
+ unsigned long keys_bits, start = 0, nbits = kpad->info->max_rows;
+ const struct adp5585_regs *regs = adp5585->regs;
+ unsigned int i = 0, max_cols = kpad->info->max_cols;
+ int error;
+
+ /*
+ * Take care as the below assumes max_rows is always less or equal than
+ * 8 which is true for the supported devices. If we happen to add
+ * another device we need to make sure this still holds true. Although
+ * adding a new device is very unlikely.
+ */
+ do {
+ keys_bits = bitmap_read(&kpad->keypad, start, nbits);
+ if (keys_bits) {
+ error = regmap_write(adp5585->regmap, regs->pin_cfg_a + i,
+ keys_bits);
+ if (error)
+ return error;
+ }
+
+ start += nbits;
+ if (max_cols > 8) {
+ nbits = 8;
+ max_cols -= nbits;
+ } else {
+ nbits = max_cols;
+ }
+
+ i++;
+ } while (start < kpad->info->max_rows + kpad->info->max_cols);
+
+ return 0;
+}
+
+static int adp5585_keys_ev_handle(struct notifier_block *nb, unsigned long key,
+ void *data)
+{
+ struct adp5585_kpad *kpad = container_of(nb, struct adp5585_kpad, nb);
+ unsigned long key_press = (unsigned long)data;
+ unsigned int row, col, code;
+
+ /* make sure the event is for us */
+ if (key < kpad->info->key_ev_min || key > kpad->info->key_ev_max)
+ return NOTIFY_DONE;
+
+ /*
+ * Unlikely but lets be on the safe side! We do not return any error
+ * because the event was indeed for us but with some weird value. So,
+ * we still want the caller know that the right handler was called.
+ */
+ if (!key)
+ return NOTIFY_BAD;
+
+ row = (key - 1) / (kpad->info->max_cols);
+ col = (key - 1) % (kpad->info->max_cols);
+ code = MATRIX_SCAN_CODE(row, col, kpad->row_shift);
+
+ dev_dbg_ratelimited(kpad->dev, "report key(%lu) r(%d) c(%d) code(%d)\n",
+ key, row, col, kpad->keycode[code]);
+
+ input_report_key(kpad->input, kpad->keycode[code], key_press);
+ input_sync(kpad->input);
+
+ return NOTIFY_STOP;
+}
+
+static void adp5585_keys_unreg_notifier(void *data)
+{
+ struct adp5585_kpad *kpad = data;
+ struct adp5585_dev *adp5585 = dev_get_drvdata(kpad->dev->parent);
+
+ blocking_notifier_chain_unregister(&adp5585->event_notifier,
+ &kpad->nb);
+}
+
+static int adp5585_keys_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct adp5585_dev *adp5585 = dev_get_drvdata(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct adp5585_kpad *kpad;
+ unsigned int revid;
+ const char *phys;
+ int error;
+
+ kpad = devm_kzalloc(dev, sizeof(*kpad), GFP_KERNEL);
+ if (!kpad)
+ return -ENOMEM;
+
+ if (!adp5585->irq)
+ return dev_err_probe(dev, -EINVAL,
+ "IRQ is mandatory for the keypad\n");
+
+ kpad->dev = dev;
+
+ kpad->input = devm_input_allocate_device(dev);
+ if (!kpad->input)
+ return -ENOMEM;
+
+ kpad->info = (const struct adp5585_kpad_chip *)id->driver_data;
+ if (!kpad->info)
+ return -ENODEV;
+
+ error = regmap_read(adp5585->regmap, ADP5585_ID, &revid);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to read device ID\n");
+
+ phys = devm_kasprintf(dev, GFP_KERNEL, "%s/input0", pdev->name);
+ if (!phys)
+ return -ENOMEM;
+
+ kpad->input->name = pdev->name;
+ kpad->input->phys = phys;
+
+ kpad->input->id.bustype = BUS_I2C;
+ kpad->input->id.vendor = 0x0001;
+ kpad->input->id.product = 0x0001;
+ kpad->input->id.version = revid & ADP5585_REV_ID_MASK;
+
+ device_set_of_node_from_dev(dev, dev->parent);
+
+ error = adp5585_keys_parse_fw(adp5585, kpad);
+ if (error)
+ return error;
+
+ error = adp5585_keys_setup(adp5585, kpad);
+ if (error)
+ return error;
+
+ kpad->nb.notifier_call = adp5585_keys_ev_handle;
+ error = blocking_notifier_chain_register(&adp5585->event_notifier,
+ &kpad->nb);
+ if (error)
+ return error;
+
+ error = devm_add_action_or_reset(dev, adp5585_keys_unreg_notifier, kpad);
+ if (error)
+ return error;
+
+ error = input_register_device(kpad->input);
+ if (error)
+ return dev_err_probe(dev, error,
+ "Failed to register input device\n");
+
+ return 0;
+}
+
+static const struct adp5585_kpad_chip adp5585_kpad_chip_info = {
+ .max_rows = 6,
+ .max_cols = 5,
+ .key_ev_min = ADP5585_ROW5_KEY_EVENT_START,
+ .key_ev_max = ADP5585_ROW5_KEY_EVENT_END,
+};
+
+static const struct adp5585_kpad_chip adp5589_kpad_chip_info = {
+ .max_rows = 8,
+ .max_cols = 11,
+ .key_ev_min = ADP5589_KEY_EVENT_START,
+ .key_ev_max = ADP5589_KEY_EVENT_END,
+};
+
+static const struct platform_device_id adp5585_keys_id_table[] = {
+ { "adp5585-keys", (kernel_ulong_t)&adp5585_kpad_chip_info },
+ { "adp5589-keys", (kernel_ulong_t)&adp5589_kpad_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, adp5585_keys_id_table);
+
+static struct platform_driver adp5585_keys_driver = {
+ .driver = {
+ .name = "adp5585-keys",
+ },
+ .probe = adp5585_keys_probe,
+ .id_table = adp5585_keys_id_table,
+};
+module_platform_driver(adp5585_keys_driver);
+
+MODULE_AUTHOR("Nuno Sá <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("ADP5585 Keys Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
deleted file mode 100644
index 81d0876ee358..000000000000
--- a/drivers/input/keyboard/adp5589-keys.c
+++ /dev/null
@@ -1,1066 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Description: keypad driver for ADP5589, ADP5585
- * I2C QWERTY Keypad and IO Expander
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
- *
- * Copyright (C) 2010-2011 Analog Devices Inc.
- */
-
-#include <linux/bitops.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/pm.h>
-#include <linux/pm_wakeirq.h>
-#include <linux/platform_device.h>
-#include <linux/input.h>
-#include <linux/i2c.h>
-#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-
-#include <linux/input/adp5589.h>
-
-/* ADP5589/ADP5585 Common Registers */
-#define ADP5589_5_ID 0x00
-#define ADP5589_5_INT_STATUS 0x01
-#define ADP5589_5_STATUS 0x02
-#define ADP5589_5_FIFO_1 0x03
-#define ADP5589_5_FIFO_2 0x04
-#define ADP5589_5_FIFO_3 0x05
-#define ADP5589_5_FIFO_4 0x06
-#define ADP5589_5_FIFO_5 0x07
-#define ADP5589_5_FIFO_6 0x08
-#define ADP5589_5_FIFO_7 0x09
-#define ADP5589_5_FIFO_8 0x0A
-#define ADP5589_5_FIFO_9 0x0B
-#define ADP5589_5_FIFO_10 0x0C
-#define ADP5589_5_FIFO_11 0x0D
-#define ADP5589_5_FIFO_12 0x0E
-#define ADP5589_5_FIFO_13 0x0F
-#define ADP5589_5_FIFO_14 0x10
-#define ADP5589_5_FIFO_15 0x11
-#define ADP5589_5_FIFO_16 0x12
-#define ADP5589_5_GPI_INT_STAT_A 0x13
-#define ADP5589_5_GPI_INT_STAT_B 0x14
-
-/* ADP5589 Registers */
-#define ADP5589_GPI_INT_STAT_C 0x15
-#define ADP5589_GPI_STATUS_A 0x16
-#define ADP5589_GPI_STATUS_B 0x17
-#define ADP5589_GPI_STATUS_C 0x18
-#define ADP5589_RPULL_CONFIG_A 0x19
-#define ADP5589_RPULL_CONFIG_B 0x1A
-#define ADP5589_RPULL_CONFIG_C 0x1B
-#define ADP5589_RPULL_CONFIG_D 0x1C
-#define ADP5589_RPULL_CONFIG_E 0x1D
-#define ADP5589_GPI_INT_LEVEL_A 0x1E
-#define ADP5589_GPI_INT_LEVEL_B 0x1F
-#define ADP5589_GPI_INT_LEVEL_C 0x20
-#define ADP5589_GPI_EVENT_EN_A 0x21
-#define ADP5589_GPI_EVENT_EN_B 0x22
-#define ADP5589_GPI_EVENT_EN_C 0x23
-#define ADP5589_GPI_INTERRUPT_EN_A 0x24
-#define ADP5589_GPI_INTERRUPT_EN_B 0x25
-#define ADP5589_GPI_INTERRUPT_EN_C 0x26
-#define ADP5589_DEBOUNCE_DIS_A 0x27
-#define ADP5589_DEBOUNCE_DIS_B 0x28
-#define ADP5589_DEBOUNCE_DIS_C 0x29
-#define ADP5589_GPO_DATA_OUT_A 0x2A
-#define ADP5589_GPO_DATA_OUT_B 0x2B
-#define ADP5589_GPO_DATA_OUT_C 0x2C
-#define ADP5589_GPO_OUT_MODE_A 0x2D
-#define ADP5589_GPO_OUT_MODE_B 0x2E
-#define ADP5589_GPO_OUT_MODE_C 0x2F
-#define ADP5589_GPIO_DIRECTION_A 0x30
-#define ADP5589_GPIO_DIRECTION_B 0x31
-#define ADP5589_GPIO_DIRECTION_C 0x32
-#define ADP5589_UNLOCK1 0x33
-#define ADP5589_UNLOCK2 0x34
-#define ADP5589_EXT_LOCK_EVENT 0x35
-#define ADP5589_UNLOCK_TIMERS 0x36
-#define ADP5589_LOCK_CFG 0x37
-#define ADP5589_RESET1_EVENT_A 0x38
-#define ADP5589_RESET1_EVENT_B 0x39
-#define ADP5589_RESET1_EVENT_C 0x3A
-#define ADP5589_RESET2_EVENT_A 0x3B
-#define ADP5589_RESET2_EVENT_B 0x3C
-#define ADP5589_RESET_CFG 0x3D
-#define ADP5589_PWM_OFFT_LOW 0x3E
-#define ADP5589_PWM_OFFT_HIGH 0x3F
-#define ADP5589_PWM_ONT_LOW 0x40
-#define ADP5589_PWM_ONT_HIGH 0x41
-#define ADP5589_PWM_CFG 0x42
-#define ADP5589_CLOCK_DIV_CFG 0x43
-#define ADP5589_LOGIC_1_CFG 0x44
-#define ADP5589_LOGIC_2_CFG 0x45
-#define ADP5589_LOGIC_FF_CFG 0x46
-#define ADP5589_LOGIC_INT_EVENT_EN 0x47
-#define ADP5589_POLL_PTIME_CFG 0x48
-#define ADP5589_PIN_CONFIG_A 0x49
-#define ADP5589_PIN_CONFIG_B 0x4A
-#define ADP5589_PIN_CONFIG_C 0x4B
-#define ADP5589_PIN_CONFIG_D 0x4C
-#define ADP5589_GENERAL_CFG 0x4D
-#define ADP5589_INT_EN 0x4E
-
-/* ADP5585 Registers */
-#define ADP5585_GPI_STATUS_A 0x15
-#define ADP5585_GPI_STATUS_B 0x16
-#define ADP5585_RPULL_CONFIG_A 0x17
-#define ADP5585_RPULL_CONFIG_B 0x18
-#define ADP5585_RPULL_CONFIG_C 0x19
-#define ADP5585_RPULL_CONFIG_D 0x1A
-#define ADP5585_GPI_INT_LEVEL_A 0x1B
-#define ADP5585_GPI_INT_LEVEL_B 0x1C
-#define ADP5585_GPI_EVENT_EN_A 0x1D
-#define ADP5585_GPI_EVENT_EN_B 0x1E
-#define ADP5585_GPI_INTERRUPT_EN_A 0x1F
-#define ADP5585_GPI_INTERRUPT_EN_B 0x20
-#define ADP5585_DEBOUNCE_DIS_A 0x21
-#define ADP5585_DEBOUNCE_DIS_B 0x22
-#define ADP5585_GPO_DATA_OUT_A 0x23
-#define ADP5585_GPO_DATA_OUT_B 0x24
-#define ADP5585_GPO_OUT_MODE_A 0x25
-#define ADP5585_GPO_OUT_MODE_B 0x26
-#define ADP5585_GPIO_DIRECTION_A 0x27
-#define ADP5585_GPIO_DIRECTION_B 0x28
-#define ADP5585_RESET1_EVENT_A 0x29
-#define ADP5585_RESET1_EVENT_B 0x2A
-#define ADP5585_RESET1_EVENT_C 0x2B
-#define ADP5585_RESET2_EVENT_A 0x2C
-#define ADP5585_RESET2_EVENT_B 0x2D
-#define ADP5585_RESET_CFG 0x2E
-#define ADP5585_PWM_OFFT_LOW 0x2F
-#define ADP5585_PWM_OFFT_HIGH 0x30
-#define ADP5585_PWM_ONT_LOW 0x31
-#define ADP5585_PWM_ONT_HIGH 0x32
-#define ADP5585_PWM_CFG 0x33
-#define ADP5585_LOGIC_CFG 0x34
-#define ADP5585_LOGIC_FF_CFG 0x35
-#define ADP5585_LOGIC_INT_EVENT_EN 0x36
-#define ADP5585_POLL_PTIME_CFG 0x37
-#define ADP5585_PIN_CONFIG_A 0x38
-#define ADP5585_PIN_CONFIG_B 0x39
-#define ADP5585_PIN_CONFIG_D 0x3A
-#define ADP5585_GENERAL_CFG 0x3B
-#define ADP5585_INT_EN 0x3C
-
-/* ID Register */
-#define ADP5589_5_DEVICE_ID_MASK 0xF
-#define ADP5589_5_MAN_ID_MASK 0xF
-#define ADP5589_5_MAN_ID_SHIFT 4
-#define ADP5589_5_MAN_ID 0x02
-
-/* GENERAL_CFG Register */
-#define OSC_EN BIT(7)
-#define CORE_CLK(x) (((x) & 0x3) << 5)
-#define LCK_TRK_LOGIC BIT(4) /* ADP5589 only */
-#define LCK_TRK_GPI BIT(3) /* ADP5589 only */
-#define INT_CFG BIT(1)
-#define RST_CFG BIT(0)
-
-/* INT_EN Register */
-#define LOGIC2_IEN BIT(5) /* ADP5589 only */
-#define LOGIC1_IEN BIT(4)
-#define LOCK_IEN BIT(3) /* ADP5589 only */
-#define OVRFLOW_IEN BIT(2)
-#define GPI_IEN BIT(1)
-#define EVENT_IEN BIT(0)
-
-/* Interrupt Status Register */
-#define LOGIC2_INT BIT(5) /* ADP5589 only */
-#define LOGIC1_INT BIT(4)
-#define LOCK_INT BIT(3) /* ADP5589 only */
-#define OVRFLOW_INT BIT(2)
-#define GPI_INT BIT(1)
-#define EVENT_INT BIT(0)
-
-/* STATUS Register */
-#define LOGIC2_STAT BIT(7) /* ADP5589 only */
-#define LOGIC1_STAT BIT(6)
-#define LOCK_STAT BIT(5) /* ADP5589 only */
-#define KEC 0x1F
-
-/* PIN_CONFIG_D Register */
-#define C4_EXTEND_CFG BIT(6) /* RESET2 */
-#define R4_EXTEND_CFG BIT(5) /* RESET1 */
-
-/* LOCK_CFG */
-#define LOCK_EN BIT(0)
-
-#define PTIME_MASK 0x3
-#define LTIME_MASK 0x3 /* ADP5589 only */
-
-/* Key Event Register xy */
-#define KEY_EV_PRESSED BIT(7)
-#define KEY_EV_MASK 0x7F
-
-#define KEYP_MAX_EVENT 16
-#define ADP5589_MAXGPIO 19
-#define ADP5585_MAXGPIO 11 /* 10 on the ADP5585-01, 11 on ADP5585-02 */
-
-enum {
- ADP5589,
- ADP5585_01,
- ADP5585_02
-};
-
-struct adp_constants {
- u8 maxgpio;
- u8 keymapsize;
- u8 gpi_pin_row_base;
- u8 gpi_pin_row_end;
- u8 gpi_pin_col_base;
- u8 gpi_pin_base;
- u8 gpi_pin_end;
- u8 gpimapsize_max;
- u8 max_row_num;
- u8 max_col_num;
- u8 row_mask;
- u8 col_mask;
- u8 col_shift;
- u8 c4_extend_cfg;
- u8 (*bank) (u8 offset);
- u8 (*bit) (u8 offset);
- u8 (*reg) (u8 reg);
-};
-
-struct adp5589_kpad {
- struct i2c_client *client;
- struct input_dev *input;
- const struct adp_constants *var;
- unsigned short keycode[ADP5589_KEYMAPSIZE];
- const struct adp5589_gpi_map *gpimap;
- unsigned short gpimapsize;
- unsigned extend_cfg;
- bool is_adp5585;
- bool support_row5;
-#ifdef CONFIG_GPIOLIB
- unsigned char gpiomap[ADP5589_MAXGPIO];
- struct gpio_chip gc;
- struct mutex gpio_lock; /* Protect cached dir, dat_out */
- u8 dat_out[3];
- u8 dir[3];
-#endif
-};
-
-/*
- * ADP5589 / ADP5585 derivative / variant handling
- */
-
-
-/* ADP5589 */
-
-static unsigned char adp5589_bank(unsigned char offset)
-{
- return offset >> 3;
-}
-
-static unsigned char adp5589_bit(unsigned char offset)
-{
- return 1u << (offset & 0x7);
-}
-
-static unsigned char adp5589_reg(unsigned char reg)
-{
- return reg;
-}
-
-static const struct adp_constants const_adp5589 = {
- .maxgpio = ADP5589_MAXGPIO,
- .keymapsize = ADP5589_KEYMAPSIZE,
- .gpi_pin_row_base = ADP5589_GPI_PIN_ROW_BASE,
- .gpi_pin_row_end = ADP5589_GPI_PIN_ROW_END,
- .gpi_pin_col_base = ADP5589_GPI_PIN_COL_BASE,
- .gpi_pin_base = ADP5589_GPI_PIN_BASE,
- .gpi_pin_end = ADP5589_GPI_PIN_END,
- .gpimapsize_max = ADP5589_GPIMAPSIZE_MAX,
- .c4_extend_cfg = 12,
- .max_row_num = ADP5589_MAX_ROW_NUM,
- .max_col_num = ADP5589_MAX_COL_NUM,
- .row_mask = ADP5589_ROW_MASK,
- .col_mask = ADP5589_COL_MASK,
- .col_shift = ADP5589_COL_SHIFT,
- .bank = adp5589_bank,
- .bit = adp5589_bit,
- .reg = adp5589_reg,
-};
-
-/* ADP5585 */
-
-static unsigned char adp5585_bank(unsigned char offset)
-{
- return offset > ADP5585_MAX_ROW_NUM;
-}
-
-static unsigned char adp5585_bit(unsigned char offset)
-{
- return (offset > ADP5585_MAX_ROW_NUM) ?
- 1u << (offset - ADP5585_COL_SHIFT) : 1u << offset;
-}
-
-static const unsigned char adp5585_reg_lut[] = {
- [ADP5589_GPI_STATUS_A] = ADP5585_GPI_STATUS_A,
- [ADP5589_GPI_STATUS_B] = ADP5585_GPI_STATUS_B,
- [ADP5589_RPULL_CONFIG_A] = ADP5585_RPULL_CONFIG_A,
- [ADP5589_RPULL_CONFIG_B] = ADP5585_RPULL_CONFIG_B,
- [ADP5589_RPULL_CONFIG_C] = ADP5585_RPULL_CONFIG_C,
- [ADP5589_RPULL_CONFIG_D] = ADP5585_RPULL_CONFIG_D,
- [ADP5589_GPI_INT_LEVEL_A] = ADP5585_GPI_INT_LEVEL_A,
- [ADP5589_GPI_INT_LEVEL_B] = ADP5585_GPI_INT_LEVEL_B,
- [ADP5589_GPI_EVENT_EN_A] = ADP5585_GPI_EVENT_EN_A,
- [ADP5589_GPI_EVENT_EN_B] = ADP5585_GPI_EVENT_EN_B,
- [ADP5589_GPI_INTERRUPT_EN_A] = ADP5585_GPI_INTERRUPT_EN_A,
- [ADP5589_GPI_INTERRUPT_EN_B] = ADP5585_GPI_INTERRUPT_EN_B,
- [ADP5589_DEBOUNCE_DIS_A] = ADP5585_DEBOUNCE_DIS_A,
- [ADP5589_DEBOUNCE_DIS_B] = ADP5585_DEBOUNCE_DIS_B,
- [ADP5589_GPO_DATA_OUT_A] = ADP5585_GPO_DATA_OUT_A,
- [ADP5589_GPO_DATA_OUT_B] = ADP5585_GPO_DATA_OUT_B,
- [ADP5589_GPO_OUT_MODE_A] = ADP5585_GPO_OUT_MODE_A,
- [ADP5589_GPO_OUT_MODE_B] = ADP5585_GPO_OUT_MODE_B,
- [ADP5589_GPIO_DIRECTION_A] = ADP5585_GPIO_DIRECTION_A,
- [ADP5589_GPIO_DIRECTION_B] = ADP5585_GPIO_DIRECTION_B,
- [ADP5589_RESET1_EVENT_A] = ADP5585_RESET1_EVENT_A,
- [ADP5589_RESET1_EVENT_B] = ADP5585_RESET1_EVENT_B,
- [ADP5589_RESET1_EVENT_C] = ADP5585_RESET1_EVENT_C,
- [ADP5589_RESET2_EVENT_A] = ADP5585_RESET2_EVENT_A,
- [ADP5589_RESET2_EVENT_B] = ADP5585_RESET2_EVENT_B,
- [ADP5589_RESET_CFG] = ADP5585_RESET_CFG,
- [ADP5589_PWM_OFFT_LOW] = ADP5585_PWM_OFFT_LOW,
- [ADP5589_PWM_OFFT_HIGH] = ADP5585_PWM_OFFT_HIGH,
- [ADP5589_PWM_ONT_LOW] = ADP5585_PWM_ONT_LOW,
- [ADP5589_PWM_ONT_HIGH] = ADP5585_PWM_ONT_HIGH,
- [ADP5589_PWM_CFG] = ADP5585_PWM_CFG,
- [ADP5589_LOGIC_1_CFG] = ADP5585_LOGIC_CFG,
- [ADP5589_LOGIC_FF_CFG] = ADP5585_LOGIC_FF_CFG,
- [ADP5589_LOGIC_INT_EVENT_EN] = ADP5585_LOGIC_INT_EVENT_EN,
- [ADP5589_POLL_PTIME_CFG] = ADP5585_POLL_PTIME_CFG,
- [ADP5589_PIN_CONFIG_A] = ADP5585_PIN_CONFIG_A,
- [ADP5589_PIN_CONFIG_B] = ADP5585_PIN_CONFIG_B,
- [ADP5589_PIN_CONFIG_D] = ADP5585_PIN_CONFIG_D,
- [ADP5589_GENERAL_CFG] = ADP5585_GENERAL_CFG,
- [ADP5589_INT_EN] = ADP5585_INT_EN,
-};
-
-static unsigned char adp5585_reg(unsigned char reg)
-{
- return adp5585_reg_lut[reg];
-}
-
-static const struct adp_constants const_adp5585 = {
- .maxgpio = ADP5585_MAXGPIO,
- .keymapsize = ADP5585_KEYMAPSIZE,
- .gpi_pin_row_base = ADP5585_GPI_PIN_ROW_BASE,
- .gpi_pin_row_end = ADP5585_GPI_PIN_ROW_END,
- .gpi_pin_col_base = ADP5585_GPI_PIN_COL_BASE,
- .gpi_pin_base = ADP5585_GPI_PIN_BASE,
- .gpi_pin_end = ADP5585_GPI_PIN_END,
- .gpimapsize_max = ADP5585_GPIMAPSIZE_MAX,
- .c4_extend_cfg = 10,
- .max_row_num = ADP5585_MAX_ROW_NUM,
- .max_col_num = ADP5585_MAX_COL_NUM,
- .row_mask = ADP5585_ROW_MASK,
- .col_mask = ADP5585_COL_MASK,
- .col_shift = ADP5585_COL_SHIFT,
- .bank = adp5585_bank,
- .bit = adp5585_bit,
- .reg = adp5585_reg,
-};
-
-static int adp5589_read(struct i2c_client *client, u8 reg)
-{
- int ret = i2c_smbus_read_byte_data(client, reg);
-
- if (ret < 0)
- dev_err(&client->dev, "Read Error\n");
-
- return ret;
-}
-
-static int adp5589_write(struct i2c_client *client, u8 reg, u8 val)
-{
- return i2c_smbus_write_byte_data(client, reg, val);
-}
-
-#ifdef CONFIG_GPIOLIB
-static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off)
-{
- struct adp5589_kpad *kpad = gpiochip_get_data(chip);
- unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
- unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
- int val;
-
- mutex_lock(&kpad->gpio_lock);
- if (kpad->dir[bank] & bit)
- val = kpad->dat_out[bank];
- else
- val = adp5589_read(kpad->client,
- kpad->var->reg(ADP5589_GPI_STATUS_A) + bank);
- mutex_unlock(&kpad->gpio_lock);
-
- return !!(val & bit);
-}
-
-static void adp5589_gpio_set_value(struct gpio_chip *chip,
- unsigned off, int val)
-{
- struct adp5589_kpad *kpad = gpiochip_get_data(chip);
- unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
- unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
-
- guard(mutex)(&kpad->gpio_lock);
-
- if (val)
- kpad->dat_out[bank] |= bit;
- else
- kpad->dat_out[bank] &= ~bit;
-
- adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) +
- bank, kpad->dat_out[bank]);
-}
-
-static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off)
-{
- struct adp5589_kpad *kpad = gpiochip_get_data(chip);
- unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
- unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
-
- guard(mutex)(&kpad->gpio_lock);
-
- kpad->dir[bank] &= ~bit;
- return adp5589_write(kpad->client,
- kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank,
- kpad->dir[bank]);
-}
-
-static int adp5589_gpio_direction_output(struct gpio_chip *chip,
- unsigned off, int val)
-{
- struct adp5589_kpad *kpad = gpiochip_get_data(chip);
- unsigned int bank = kpad->var->bank(kpad->gpiomap[off]);
- unsigned int bit = kpad->var->bit(kpad->gpiomap[off]);
- int error;
-
- guard(mutex)(&kpad->gpio_lock);
-
- kpad->dir[bank] |= bit;
-
- if (val)
- kpad->dat_out[bank] |= bit;
- else
- kpad->dat_out[bank] &= ~bit;
-
- error = adp5589_write(kpad->client,
- kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + bank,
- kpad->dat_out[bank]);
- if (error)
- return error;
-
- error = adp5589_write(kpad->client,
- kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank,
- kpad->dir[bank]);
- if (error)
- return error;
-
- return 0;
-}
-
-static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
- const struct adp5589_kpad_platform_data *pdata)
-{
- bool pin_used[ADP5589_MAXGPIO];
- int n_unused = 0;
- int i;
-
- memset(pin_used, false, sizeof(pin_used));
-
- for (i = 0; i < kpad->var->maxgpio; i++)
- if (pdata->keypad_en_mask & BIT(i))
- pin_used[i] = true;
-
- for (i = 0; i < kpad->gpimapsize; i++)
- pin_used[kpad->gpimap[i].pin - kpad->var->gpi_pin_base] = true;
-
- if (kpad->extend_cfg & R4_EXTEND_CFG)
- pin_used[4] = true;
-
- if (kpad->extend_cfg & C4_EXTEND_CFG)
- pin_used[kpad->var->c4_extend_cfg] = true;
-
- if (!kpad->support_row5)
- pin_used[5] = true;
-
- for (i = 0; i < kpad->var->maxgpio; i++)
- if (!pin_used[i])
- kpad->gpiomap[n_unused++] = i;
-
- return n_unused;
-}
-
-static int adp5589_gpio_add(struct adp5589_kpad *kpad)
-{
- struct device *dev = &kpad->client->dev;
- const struct adp5589_kpad_platform_data *pdata = dev_get_platdata(dev);
- const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data;
- int i, error;
-
- if (!gpio_data)
- return 0;
-
- kpad->gc.parent = dev;
- kpad->gc.ngpio = adp5589_build_gpiomap(kpad, pdata);
- if (kpad->gc.ngpio == 0) {
- dev_info(dev, "No unused gpios left to export\n");
- return 0;
- }
-
- kpad->gc.direction_input = adp5589_gpio_direction_input;
- kpad->gc.direction_output = adp5589_gpio_direction_output;
- kpad->gc.get = adp5589_gpio_get_value;
- kpad->gc.set = adp5589_gpio_set_value;
- kpad->gc.can_sleep = 1;
-
- kpad->gc.base = gpio_data->gpio_start;
- kpad->gc.label = kpad->client->name;
- kpad->gc.owner = THIS_MODULE;
-
- mutex_init(&kpad->gpio_lock);
-
- error = devm_gpiochip_add_data(dev, &kpad->gc, kpad);
- if (error)
- return error;
-
- for (i = 0; i <= kpad->var->bank(kpad->var->maxgpio); i++) {
- kpad->dat_out[i] = adp5589_read(kpad->client, kpad->var->reg(
- ADP5589_GPO_DATA_OUT_A) + i);
- kpad->dir[i] = adp5589_read(kpad->client, kpad->var->reg(
- ADP5589_GPIO_DIRECTION_A) + i);
- }
-
- return 0;
-}
-#else
-static inline int adp5589_gpio_add(struct adp5589_kpad *kpad)
-{
- return 0;
-}
-#endif
-
-static void adp5589_report_switches(struct adp5589_kpad *kpad,
- int key, int key_val)
-{
- int i;
-
- for (i = 0; i < kpad->gpimapsize; i++) {
- if (key_val == kpad->gpimap[i].pin) {
- input_report_switch(kpad->input,
- kpad->gpimap[i].sw_evt,
- key & KEY_EV_PRESSED);
- break;
- }
- }
-}
-
-static void adp5589_report_events(struct adp5589_kpad *kpad, int ev_cnt)
-{
- int i;
-
- for (i = 0; i < ev_cnt; i++) {
- int key = adp5589_read(kpad->client, ADP5589_5_FIFO_1 + i);
- int key_val = key & KEY_EV_MASK;
-
- if (key_val >= kpad->var->gpi_pin_base &&
- key_val <= kpad->var->gpi_pin_end) {
- adp5589_report_switches(kpad, key, key_val);
- } else {
- input_report_key(kpad->input,
- kpad->keycode[key_val - 1],
- key & KEY_EV_PRESSED);
- }
- }
-}
-
-static irqreturn_t adp5589_irq(int irq, void *handle)
-{
- struct adp5589_kpad *kpad = handle;
- struct i2c_client *client = kpad->client;
- int status, ev_cnt;
-
- status = adp5589_read(client, ADP5589_5_INT_STATUS);
-
- if (status & OVRFLOW_INT) /* Unlikely and should never happen */
- dev_err(&client->dev, "Event Overflow Error\n");
-
- if (status & EVENT_INT) {
- ev_cnt = adp5589_read(client, ADP5589_5_STATUS) & KEC;
- if (ev_cnt) {
- adp5589_report_events(kpad, ev_cnt);
- input_sync(kpad->input);
- }
- }
-
- adp5589_write(client, ADP5589_5_INT_STATUS, status); /* Status is W1C */
-
- return IRQ_HANDLED;
-}
-
-static int adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key)
-{
- int i;
-
- for (i = 0; i < kpad->var->keymapsize; i++)
- if (key == kpad->keycode[i])
- return (i + 1) | KEY_EV_PRESSED;
-
- dev_err(&kpad->client->dev, "RESET/UNLOCK key not in keycode map\n");
-
- return -EINVAL;
-}
-
-static int adp5589_setup(struct adp5589_kpad *kpad)
-{
- struct i2c_client *client = kpad->client;
- const struct adp5589_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
- u8 (*reg) (u8) = kpad->var->reg;
- unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0;
- unsigned char pull_mask = 0;
- int i, ret;
-
- ret = adp5589_write(client, reg(ADP5589_PIN_CONFIG_A),
- pdata->keypad_en_mask & kpad->var->row_mask);
- ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_B),
- (pdata->keypad_en_mask >> kpad->var->col_shift) &
- kpad->var->col_mask);
-
- if (!kpad->is_adp5585)
- ret |= adp5589_write(client, ADP5589_PIN_CONFIG_C,
- (pdata->keypad_en_mask >> 16) & 0xFF);
-
- if (!kpad->is_adp5585 && pdata->en_keylock) {
- ret |= adp5589_write(client, ADP5589_UNLOCK1,
- pdata->unlock_key1);
- ret |= adp5589_write(client, ADP5589_UNLOCK2,
- pdata->unlock_key2);
- ret |= adp5589_write(client, ADP5589_UNLOCK_TIMERS,
- pdata->unlock_timer & LTIME_MASK);
- ret |= adp5589_write(client, ADP5589_LOCK_CFG, LOCK_EN);
- }
-
- for (i = 0; i < KEYP_MAX_EVENT; i++)
- ret |= adp5589_read(client, ADP5589_5_FIFO_1 + i);
-
- for (i = 0; i < pdata->gpimapsize; i++) {
- unsigned short pin = pdata->gpimap[i].pin;
-
- if (pin <= kpad->var->gpi_pin_row_end) {
- evt_mode1 |= BIT(pin - kpad->var->gpi_pin_row_base);
- } else {
- evt_mode2 |=
- BIT(pin - kpad->var->gpi_pin_col_base) & 0xFF;
- if (!kpad->is_adp5585)
- evt_mode3 |=
- BIT(pin - kpad->var->gpi_pin_col_base) >> 8;
- }
- }
-
- if (pdata->gpimapsize) {
- ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_A),
- evt_mode1);
- ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_B),
- evt_mode2);
- if (!kpad->is_adp5585)
- ret |= adp5589_write(client,
- reg(ADP5589_GPI_EVENT_EN_C),
- evt_mode3);
- }
-
- if (pdata->pull_dis_mask & pdata->pullup_en_100k &
- pdata->pullup_en_300k & pdata->pulldown_en_300k)
- dev_warn(&client->dev, "Conflicting pull resistor config\n");
-
- for (i = 0; i <= kpad->var->max_row_num; i++) {
- unsigned int val = 0, bit = BIT(i);
- if (pdata->pullup_en_300k & bit)
- val = 0;
- else if (pdata->pulldown_en_300k & bit)
- val = 1;
- else if (pdata->pullup_en_100k & bit)
- val = 2;
- else if (pdata->pull_dis_mask & bit)
- val = 3;
-
- pull_mask |= val << (2 * (i & 0x3));
-
- if (i % 4 == 3 || i == kpad->var->max_row_num) {
- ret |= adp5589_write(client, reg(ADP5585_RPULL_CONFIG_A)
- + (i >> 2), pull_mask);
- pull_mask = 0;
- }
- }
-
- for (i = 0; i <= kpad->var->max_col_num; i++) {
- unsigned int val = 0, bit = BIT(i + kpad->var->col_shift);
- if (pdata->pullup_en_300k & bit)
- val = 0;
- else if (pdata->pulldown_en_300k & bit)
- val = 1;
- else if (pdata->pullup_en_100k & bit)
- val = 2;
- else if (pdata->pull_dis_mask & bit)
- val = 3;
-
- pull_mask |= val << (2 * (i & 0x3));
-
- if (i % 4 == 3 || i == kpad->var->max_col_num) {
- ret |= adp5589_write(client,
- reg(ADP5585_RPULL_CONFIG_C) +
- (i >> 2), pull_mask);
- pull_mask = 0;
- }
- }
-
- if (pdata->reset1_key_1 && pdata->reset1_key_2 && pdata->reset1_key_3) {
- ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_A),
- adp5589_get_evcode(kpad,
- pdata->reset1_key_1));
- ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_B),
- adp5589_get_evcode(kpad,
- pdata->reset1_key_2));
- ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_C),
- adp5589_get_evcode(kpad,
- pdata->reset1_key_3));
- kpad->extend_cfg |= R4_EXTEND_CFG;
- }
-
- if (pdata->reset2_key_1 && pdata->reset2_key_2) {
- ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_A),
- adp5589_get_evcode(kpad,
- pdata->reset2_key_1));
- ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_B),
- adp5589_get_evcode(kpad,
- pdata->reset2_key_2));
- kpad->extend_cfg |= C4_EXTEND_CFG;
- }
-
- if (kpad->extend_cfg) {
- ret |= adp5589_write(client, reg(ADP5589_RESET_CFG),
- pdata->reset_cfg);
- ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_D),
- kpad->extend_cfg);
- }
-
- ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_A),
- pdata->debounce_dis_mask & kpad->var->row_mask);
-
- ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_B),
- (pdata->debounce_dis_mask >> kpad->var->col_shift)
- & kpad->var->col_mask);
-
- if (!kpad->is_adp5585)
- ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_C),
- (pdata->debounce_dis_mask >> 16) & 0xFF);
-
- ret |= adp5589_write(client, reg(ADP5589_POLL_PTIME_CFG),
- pdata->scan_cycle_time & PTIME_MASK);
- ret |= adp5589_write(client, ADP5589_5_INT_STATUS,
- (kpad->is_adp5585 ? 0 : LOGIC2_INT) |
- LOGIC1_INT | OVRFLOW_INT |
- (kpad->is_adp5585 ? 0 : LOCK_INT) |
- GPI_INT | EVENT_INT); /* Status is W1C */
-
- ret |= adp5589_write(client, reg(ADP5589_GENERAL_CFG),
- INT_CFG | OSC_EN | CORE_CLK(3));
- ret |= adp5589_write(client, reg(ADP5589_INT_EN),
- OVRFLOW_IEN | GPI_IEN | EVENT_IEN);
-
- if (ret < 0) {
- dev_err(&client->dev, "Write Error\n");
- return ret;
- }
-
- return 0;
-}
-
-static void adp5589_report_switch_state(struct adp5589_kpad *kpad)
-{
- int gpi_stat_tmp, pin_loc;
- int i;
- int gpi_stat1 = adp5589_read(kpad->client,
- kpad->var->reg(ADP5589_GPI_STATUS_A));
- int gpi_stat2 = adp5589_read(kpad->client,
- kpad->var->reg(ADP5589_GPI_STATUS_B));
- int gpi_stat3 = !kpad->is_adp5585 ?
- adp5589_read(kpad->client, ADP5589_GPI_STATUS_C) : 0;
-
- for (i = 0; i < kpad->gpimapsize; i++) {
- unsigned short pin = kpad->gpimap[i].pin;
-
- if (pin <= kpad->var->gpi_pin_row_end) {
- gpi_stat_tmp = gpi_stat1;
- pin_loc = pin - kpad->var->gpi_pin_row_base;
- } else if ((pin - kpad->var->gpi_pin_col_base) < 8) {
- gpi_stat_tmp = gpi_stat2;
- pin_loc = pin - kpad->var->gpi_pin_col_base;
- } else {
- gpi_stat_tmp = gpi_stat3;
- pin_loc = pin - kpad->var->gpi_pin_col_base - 8;
- }
-
- if (gpi_stat_tmp < 0) {
- dev_err(&kpad->client->dev,
- "Can't read GPIO_DAT_STAT switch %d, default to OFF\n",
- pin);
- gpi_stat_tmp = 0;
- }
-
- input_report_switch(kpad->input,
- kpad->gpimap[i].sw_evt,
- !(gpi_stat_tmp & BIT(pin_loc)));
- }
-
- input_sync(kpad->input);
-}
-
-static int adp5589_keypad_add(struct adp5589_kpad *kpad, unsigned int revid)
-{
- struct i2c_client *client = kpad->client;
- const struct adp5589_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
- struct input_dev *input;
- unsigned int i;
- int error;
-
- if (!((pdata->keypad_en_mask & kpad->var->row_mask) &&
- (pdata->keypad_en_mask >> kpad->var->col_shift)) ||
- !pdata->keymap) {
- dev_err(&client->dev, "no rows, cols or keymap from pdata\n");
- return -EINVAL;
- }
-
- if (pdata->keymapsize != kpad->var->keymapsize) {
- dev_err(&client->dev, "invalid keymapsize\n");
- return -EINVAL;
- }
-
- if (!pdata->gpimap && pdata->gpimapsize) {
- dev_err(&client->dev, "invalid gpimap from pdata\n");
- return -EINVAL;
- }
-
- if (pdata->gpimapsize > kpad->var->gpimapsize_max) {
- dev_err(&client->dev, "invalid gpimapsize\n");
- return -EINVAL;
- }
-
- for (i = 0; i < pdata->gpimapsize; i++) {
- unsigned short pin = pdata->gpimap[i].pin;
-
- if (pin < kpad->var->gpi_pin_base ||
- pin > kpad->var->gpi_pin_end) {
- dev_err(&client->dev, "invalid gpi pin data\n");
- return -EINVAL;
- }
-
- if (BIT(pin - kpad->var->gpi_pin_row_base) &
- pdata->keypad_en_mask) {
- dev_err(&client->dev, "invalid gpi row/col data\n");
- return -EINVAL;
- }
- }
-
- if (!client->irq) {
- dev_err(&client->dev, "no IRQ?\n");
- return -EINVAL;
- }
-
- input = devm_input_allocate_device(&client->dev);
- if (!input)
- return -ENOMEM;
-
- kpad->input = input;
-
- input->name = client->name;
- input->phys = "adp5589-keys/input0";
- input->dev.parent = &client->dev;
-
- input_set_drvdata(input, kpad);
-
- input->id.bustype = BUS_I2C;
- input->id.vendor = 0x0001;
- input->id.product = 0x0001;
- input->id.version = revid;
-
- input->keycodesize = sizeof(kpad->keycode[0]);
- input->keycodemax = pdata->keymapsize;
- input->keycode = kpad->keycode;
-
- memcpy(kpad->keycode, pdata->keymap,
- pdata->keymapsize * input->keycodesize);
-
- kpad->gpimap = pdata->gpimap;
- kpad->gpimapsize = pdata->gpimapsize;
-
- /* setup input device */
- __set_bit(EV_KEY, input->evbit);
-
- if (pdata->repeat)
- __set_bit(EV_REP, input->evbit);
-
- for (i = 0; i < input->keycodemax; i++)
- if (kpad->keycode[i] <= KEY_MAX)
- __set_bit(kpad->keycode[i], input->keybit);
- __clear_bit(KEY_RESERVED, input->keybit);
-
- if (kpad->gpimapsize)
- __set_bit(EV_SW, input->evbit);
- for (i = 0; i < kpad->gpimapsize; i++)
- __set_bit(kpad->gpimap[i].sw_evt, input->swbit);
-
- error = input_register_device(input);
- if (error) {
- dev_err(&client->dev, "unable to register input device\n");
- return error;
- }
-
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, adp5589_irq,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, kpad);
- if (error) {
- dev_err(&client->dev, "unable to request irq %d\n", client->irq);
- return error;
- }
-
- return 0;
-}
-
-static void adp5589_clear_config(void *data)
-{
- struct adp5589_kpad *kpad = data;
-
- adp5589_write(kpad->client, kpad->var->reg(ADP5589_GENERAL_CFG), 0);
-}
-
-static int adp5589_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- struct adp5589_kpad *kpad;
- const struct adp5589_kpad_platform_data *pdata =
- dev_get_platdata(&client->dev);
- unsigned int revid;
- int error, ret;
-
- if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
- return -EIO;
- }
-
- if (!pdata) {
- dev_err(&client->dev, "no platform data?\n");
- return -EINVAL;
- }
-
- kpad = devm_kzalloc(&client->dev, sizeof(*kpad), GFP_KERNEL);
- if (!kpad)
- return -ENOMEM;
-
- kpad->client = client;
-
- switch (id->driver_data) {
- case ADP5585_02:
- kpad->support_row5 = true;
- fallthrough;
- case ADP5585_01:
- kpad->is_adp5585 = true;
- kpad->var = &const_adp5585;
- break;
- case ADP5589:
- kpad->support_row5 = true;
- kpad->var = &const_adp5589;
- break;
- }
-
- error = devm_add_action_or_reset(&client->dev, adp5589_clear_config,
- kpad);
- if (error)
- return error;
-
- ret = adp5589_read(client, ADP5589_5_ID);
- if (ret < 0)
- return ret;
-
- revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK;
-
- if (pdata->keymapsize) {
- error = adp5589_keypad_add(kpad, revid);
- if (error)
- return error;
- }
-
- error = adp5589_setup(kpad);
- if (error)
- return error;
-
- if (kpad->gpimapsize)
- adp5589_report_switch_state(kpad);
-
- error = adp5589_gpio_add(kpad);
- if (error)
- return error;
-
- dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq);
- return 0;
-}
-
-static int adp5589_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct adp5589_kpad *kpad = i2c_get_clientdata(client);
-
- if (kpad->input)
- disable_irq(client->irq);
-
- return 0;
-}
-
-static int adp5589_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct adp5589_kpad *kpad = i2c_get_clientdata(client);
-
- if (kpad->input)
- enable_irq(client->irq);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(adp5589_dev_pm_ops, adp5589_suspend, adp5589_resume);
-
-static const struct i2c_device_id adp5589_id[] = {
- {"adp5589-keys", ADP5589},
- {"adp5585-keys", ADP5585_01},
- {"adp5585-02-keys", ADP5585_02}, /* Adds ROW5 to ADP5585 */
- {}
-};
-
-MODULE_DEVICE_TABLE(i2c, adp5589_id);
-
-static struct i2c_driver adp5589_driver = {
- .driver = {
- .name = KBUILD_MODNAME,
- .pm = pm_sleep_ptr(&adp5589_dev_pm_ops),
- },
- .probe = adp5589_probe,
- .id_table = adp5589_id,
-};
-
-module_i2c_driver(adp5589_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("ADP5589/ADP5585 Keypad driver");
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 1a41e59c77f8..3ebf37ddfc18 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -20,7 +20,7 @@
#include "internal.h"
-#define ICC_DYN_ID_START 10000
+#define ICC_DYN_ID_START 100000
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -819,6 +819,9 @@ static struct icc_node *icc_node_create_nolock(int id)
{
struct icc_node *node;
+ if (id >= ICC_DYN_ID_START)
+ return ERR_PTR(-EINVAL);
+
/* check if node already exists */
node = node_find(id);
if (node)
@@ -906,11 +909,36 @@ void icc_node_destroy(int id)
return;
kfree(node->links);
+ if (node->id >= ICC_DYN_ID_START)
+ kfree(node->name);
kfree(node);
}
EXPORT_SYMBOL_GPL(icc_node_destroy);
/**
+ * icc_node_set_name() - set node name
+ * @node: node
+ * @provider: node provider
+ * @name: node name
+ *
+ * Return: 0 on success, or -ENOMEM on allocation failure
+ */
+int icc_node_set_name(struct icc_node *node, const struct icc_provider *provider, const char *name)
+{
+ if (node->id >= ICC_DYN_ID_START) {
+ node->name = kasprintf(GFP_KERNEL, "%s@%s", name,
+ dev_name(provider->dev));
+ if (!node->name)
+ return -ENOMEM;
+ } else {
+ node->name = name;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icc_node_set_name);
+
+/**
* icc_link_nodes() - create link between two nodes
* @src_node: source node
* @dst_node: destination node
@@ -1038,10 +1066,6 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->avg_bw = node->init_avg;
node->peak_bw = node->init_peak;
- if (node->id >= ICC_DYN_ID_START)
- node->name = devm_kasprintf(provider->dev, GFP_KERNEL, "%s@%s",
- node->name, dev_name(provider->dev));
-
if (node->avg_bw || node->peak_bw) {
if (provider->pre_aggregate)
provider->pre_aggregate(node);
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index 88f311c11020..93c030608d3e 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -117,6 +117,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_master", data[i].name);
if (!node->name) {
+ icc_node_destroy(node->id);
ret = -ENOMEM;
goto err;
}
@@ -135,6 +136,7 @@ struct icc_provider *icc_clk_register(struct device *dev,
node->name = devm_kasprintf(dev, GFP_KERNEL, "%s_slave", data[i].name);
if (!node->name) {
+ icc_node_destroy(node->id);
ret = -ENOMEM;
goto err;
}
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 41bfc6e7ee1d..001404e91041 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -293,7 +293,12 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
goto err_remove_nodes;
}
- node->name = qn->name;
+ ret = icc_node_set_name(node, provider, qn->name);
+ if (ret) {
+ icc_node_destroy(node->id);
+ goto err_remove_nodes;
+ }
+
node->data = qn;
icc_node_add(node, provider);
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index baecbf2533f7..b33f00da1880 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -236,7 +236,12 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
goto err;
}
- node->name = qnodes[i]->name;
+ ret = icc_node_set_name(node, provider, qnodes[i]->name);
+ if (ret) {
+ icc_node_destroy(node->id);
+ goto err;
+ }
+
/* Cast away const and add it back in qcom_osm_l3_set() */
node->data = (void *)qnodes[i];
icc_node_add(node, provider);
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index 346f18d70e9e..905403a3a930 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -238,6 +238,7 @@ static struct qcom_icc_node xm_pcie3_1 = {
.id = SC7280_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
+ .num_links = 1,
.links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
};
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
index 9e041365d909..8e8f56186a36 100644
--- a/drivers/interconnect/samsung/exynos.c
+++ b/drivers/interconnect/samsung/exynos.c
@@ -134,6 +134,11 @@ static int exynos_generic_icc_probe(struct platform_device *pdev)
priv->node = icc_node;
icc_node->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
bus_dev->of_node);
+ if (!icc_node->name) {
+ icc_node_destroy(pdev->id);
+ return -ENOMEM;
+ }
+
if (of_property_read_u32(bus_dev->of_node, "samsung,data-clock-ratio",
&priv->bus_clk_ratio))
priv->bus_clk_ratio = EXYNOS_ICC_DEFAULT_BUS_CLK_RATIO;
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 761ab647f372..0961ac805944 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -193,15 +193,13 @@ struct hyperv_root_ir_data {
static void
hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
{
- u64 status;
- u32 vector;
- struct irq_cfg *cfg;
- int ioapic_id;
- const struct cpumask *affinity;
- int cpu;
- struct hv_interrupt_entry entry;
struct hyperv_root_ir_data *data = irq_data->chip_data;
+ struct hv_interrupt_entry entry;
+ const struct cpumask *affinity;
struct IO_APIC_route_entry e;
+ struct irq_cfg *cfg;
+ int cpu, ioapic_id;
+ u32 vector;
cfg = irqd_cfg(irq_data);
affinity = irq_data_get_effective_affinity_mask(irq_data);
@@ -214,23 +212,16 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
&& data->entry.ioapic_rte.as_uint64) {
entry = data->entry;
- status = hv_unmap_ioapic_interrupt(ioapic_id, &entry);
-
- if (status != HV_STATUS_SUCCESS)
- hv_status_debug(status, "failed to unmap\n");
+ (void)hv_unmap_ioapic_interrupt(ioapic_id, &entry);
data->entry.ioapic_rte.as_uint64 = 0;
data->entry.source = 0; /* Invalid source */
}
- status = hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
- vector, &entry);
-
- if (status != HV_STATUS_SUCCESS) {
- hv_status_err(status, "map failed\n");
+ if (hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu,
+ vector, &entry))
return;
- }
data->entry = entry;
@@ -322,10 +313,10 @@ static void hyperv_root_irq_remapping_free(struct irq_domain *domain,
data = irq_data->chip_data;
e = &data->entry;
- if (e->source == HV_DEVICE_TYPE_IOAPIC
- && e->ioapic_rte.as_uint64)
- hv_unmap_ioapic_interrupt(data->ioapic_id,
- &data->entry);
+ if (e->source == HV_DEVICE_TYPE_IOAPIC &&
+ e->ioapic_rte.as_uint64)
+ (void)hv_unmap_ioapic_interrupt(data->ioapic_id,
+ &data->entry);
kfree(data);
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2ea490b9d370..1492c8552255 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -168,14 +168,14 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
{
const char *err;
struct cache_sb_disk *s;
- struct page *page;
+ struct folio *folio;
unsigned int i;
- page = read_cache_page_gfp(bdev->bd_mapping,
- SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
- if (IS_ERR(page))
+ folio = mapping_read_folio_gfp(bdev->bd_mapping,
+ SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
+ if (IS_ERR(folio))
return "IO error";
- s = page_address(page) + offset_in_page(SB_OFFSET);
+ s = folio_address(folio) + offset_in_folio(folio, SB_OFFSET);
sb->offset = le64_to_cpu(s->offset);
sb->version = le64_to_cpu(s->version);
@@ -272,7 +272,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
*res = s;
return NULL;
err:
- put_page(page);
+ folio_put(folio);
return err;
}
@@ -1366,7 +1366,7 @@ static CLOSURE_CALLBACK(cached_dev_free)
mutex_unlock(&bch_register_lock);
if (dc->sb_disk)
- put_page(virt_to_page(dc->sb_disk));
+ folio_put(virt_to_folio(dc->sb_disk));
if (dc->bdev_file)
fput(dc->bdev_file);
@@ -2216,7 +2216,7 @@ void bch_cache_release(struct kobject *kobj)
free_fifo(&ca->free[i]);
if (ca->sb_disk)
- put_page(virt_to_page(ca->sb_disk));
+ folio_put(virt_to_folio(ca->sb_disk));
if (ca->bdev_file)
fput(ca->bdev_file);
@@ -2593,7 +2593,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (!holder) {
ret = -ENOMEM;
err = "cannot allocate memory";
- goto out_put_sb_page;
+ goto out_put_sb_folio;
}
/* Now reopen in exclusive mode with proper holder */
@@ -2667,8 +2667,8 @@ async_done:
out_free_holder:
kfree(holder);
-out_put_sb_page:
- put_page(virt_to_page(sb_disk));
+out_put_sb_folio:
+ folio_put(virt_to_folio(sb_disk));
out_blkdev_put:
if (bdev_file)
fput(bdev_file);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index ec84ba5e93e5..ff7595caf440 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -2742,7 +2742,11 @@ static unsigned long __evict_a_few(unsigned long nr_buffers)
__make_buffer_clean(b);
__free_buffer_wake(b);
- cond_resched();
+ if (need_resched()) {
+ dm_bufio_unlock(c);
+ cond_resched();
+ dm_bufio_lock(c);
+ }
}
dm_bufio_unlock(c);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 17157c4216a5..5ef43231fe77 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -253,17 +253,35 @@ MODULE_PARM_DESC(max_read_size, "Maximum size of a read request");
static unsigned int max_write_size = 0;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
-static unsigned get_max_request_size(struct crypt_config *cc, bool wrt)
+
+static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
{
+ struct crypt_config *cc = ti->private;
unsigned val, sector_align;
- val = !wrt ? READ_ONCE(max_read_size) : READ_ONCE(max_write_size);
- if (likely(!val))
- val = !wrt ? DM_CRYPT_DEFAULT_MAX_READ_SIZE : DM_CRYPT_DEFAULT_MAX_WRITE_SIZE;
- if (wrt || cc->used_tag_size) {
- if (unlikely(val > BIO_MAX_VECS << PAGE_SHIFT))
- val = BIO_MAX_VECS << PAGE_SHIFT;
- }
- sector_align = max(bdev_logical_block_size(cc->dev->bdev), (unsigned)cc->sector_size);
+ bool wrt = op_is_write(bio_op(bio));
+
+ if (wrt) {
+ /*
+ * For zoned devices, splitting write operations creates the
+ * risk of deadlocking queue freeze operations with zone write
+ * plugging BIO work when the reminder of a split BIO is
+ * issued. So always allow the entire BIO to proceed.
+ */
+ if (ti->emulate_zone_append)
+ return bio_sectors(bio);
+
+ val = min_not_zero(READ_ONCE(max_write_size),
+ DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
+ } else {
+ val = min_not_zero(READ_ONCE(max_read_size),
+ DM_CRYPT_DEFAULT_MAX_READ_SIZE);
+ }
+
+ if (wrt || cc->used_tag_size)
+ val = min(val, BIO_MAX_VECS << PAGE_SHIFT);
+
+ sector_align = max(bdev_logical_block_size(cc->dev->bdev),
+ (unsigned)cc->sector_size);
val = round_down(val, sector_align);
if (unlikely(!val))
val = sector_align;
@@ -1192,11 +1210,11 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
return -EINVAL;
}
- if (bi->tuple_size < cc->used_tag_size) {
+ if (bi->metadata_size < cc->used_tag_size) {
ti->error = "Integrity profile tag size mismatch.";
return -EINVAL;
}
- cc->tuple_size = bi->tuple_size;
+ cc->tuple_size = bi->metadata_size;
if (1 << bi->interval_exp != cc->sector_size) {
ti->error = "Integrity profile sector size mismatch.";
return -EINVAL;
@@ -3496,7 +3514,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
/*
* Check if bio is too large, split as needed.
*/
- max_sectors = get_max_request_size(cc, bio_data_dir(bio) == WRITE);
+ max_sectors = get_max_request_sectors(ti, bio);
if (unlikely(bio_sectors(bio) > max_sectors))
dm_accept_partial_bio(bio, max_sectors);
@@ -3733,6 +3751,17 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
max_t(unsigned int, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
limits->dma_alignment = limits->logical_block_size - 1;
+
+ /*
+ * For zoned dm-crypt targets, there will be no internal splitting of
+ * write BIOs to avoid exceeding BIO_MAX_VECS vectors per BIO. But
+ * without respecting this limit, crypt_alloc_buffer() will trigger a
+ * BUG(). Avoid this by forcing DM core to split write BIOs to this
+ * limit.
+ */
+ if (ti->emulate_zone_append)
+ limits->max_hw_sectors = min(limits->max_hw_sectors,
+ BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
}
static struct target_type crypt_target = {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 4395657fa583..efeee0a873c0 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3906,8 +3906,8 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
struct blk_integrity *bi = &limits->integrity;
memset(bi, 0, sizeof(*bi));
- bi->tuple_size = ic->tag_size;
- bi->tag_size = bi->tuple_size;
+ bi->metadata_size = ic->tag_size;
+ bi->tag_size = bi->metadata_size;
bi->interval_exp =
ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
}
@@ -4746,18 +4746,18 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
ti->error = "Integrity profile not supported";
goto bad;
}
- /*printk("tag_size: %u, tuple_size: %u\n", bi->tag_size, bi->tuple_size);*/
- if (bi->tuple_size < ic->tag_size) {
+ /*printk("tag_size: %u, metadata_size: %u\n", bi->tag_size, bi->metadata_size);*/
+ if (bi->metadata_size < ic->tag_size) {
r = -EINVAL;
ti->error = "The integrity profile is smaller than tag size";
goto bad;
}
- if ((unsigned long)bi->tuple_size > PAGE_SIZE / 2) {
+ if ((unsigned long)bi->metadata_size > PAGE_SIZE / 2) {
r = -EINVAL;
ti->error = "Too big tuple size";
goto bad;
}
- ic->tuple_size = bi->tuple_size;
+ ic->tuple_size = bi->metadata_size;
if (1 << bi->interval_exp != ic->sectors_per_block << SECTOR_SHIFT) {
r = -EINVAL;
ti->error = "Integrity profile sector size mismatch";
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index a7dc04bd55e5..5bbbdf8fc1bd 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -458,6 +458,7 @@ static void stripe_io_hints(struct dm_target *ti,
struct stripe_c *sc = ti->private;
unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ limits->chunk_sectors = sc->chunk_size;
limits->io_min = chunk_size;
limits->io_opt = chunk_size * sc->stripes;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 24a857ff6d0b..d9d5e6aa5707 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -2065,8 +2065,10 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
limits->discard_alignment = 0;
}
- if (!dm_table_supports_write_zeroes(t))
+ if (!dm_table_supports_write_zeroes(t)) {
limits->max_write_zeroes_sectors = 0;
+ limits->max_hw_wzeroes_unmap_sectors = 0;
+ }
if (!dm_table_supports_secure_erase(t))
limits->max_secure_erase_sectors = 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1726f0f828cc..abfe0392b5a4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1293,8 +1293,9 @@ out:
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
- * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
- * __send_duplicate_bios().
+ * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
+ * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
+ * by __send_duplicate_bios().
*
* dm_accept_partial_bio informs the dm that the target only wants to process
* additional n_sectors sectors of the bio and the rest of the data should be
@@ -1327,11 +1328,19 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
unsigned int bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
- BUG_ON(op_is_zone_mgmt(bio_op(bio)));
- BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bio_sectors > *tio->len_ptr);
BUG_ON(n_sectors > bio_sectors);
+ if (static_branch_unlikely(&zoned_enabled) &&
+ unlikely(bdev_is_zoned(bio->bi_bdev))) {
+ enum req_op op = bio_op(bio);
+
+ BUG_ON(op_is_zone_mgmt(op));
+ BUG_ON(op == REQ_OP_WRITE);
+ BUG_ON(op == REQ_OP_WRITE_ZEROES);
+ BUG_ON(op == REQ_OP_ZONE_APPEND);
+ }
+
*tio->len_ptr -= bio_sectors - n_sectors;
bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
@@ -1776,19 +1785,35 @@ static void init_clone_info(struct clone_info *ci, struct dm_io *io,
}
#ifdef CONFIG_BLK_DEV_ZONED
-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
- struct bio *bio)
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
{
/*
- * For mapped device that need zone append emulation, we must
- * split any large BIO that straddles zone boundaries.
+ * Special case the zone operations that cannot or should not be split.
*/
- return dm_emulate_zone_append(md) && bio_straddles_zones(bio) &&
- !bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
+ switch (bio_op(bio)) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
+ return false;
+ default:
+ break;
+ }
+
+ /*
+ * When mapped devices use the block layer zone write plugging, we must
+ * split any large BIO to the mapped device limits to not submit BIOs
+ * that span zone boundaries and to avoid potential deadlocks with
+ * queue freeze operations.
+ */
+ return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
}
+
static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
{
- return dm_emulate_zone_append(md) && blk_zone_plug_bio(bio, 0);
+ if (!bio_needs_zone_write_plugging(bio))
+ return false;
+ return blk_zone_plug_bio(bio, 0);
}
static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
@@ -1904,8 +1929,7 @@ static blk_status_t __send_zone_reset_all(struct clone_info *ci)
}
#else
-static inline bool dm_zone_bio_needs_split(struct mapped_device *md,
- struct bio *bio)
+static inline bool dm_zone_bio_needs_split(struct bio *bio)
{
return false;
}
@@ -1932,9 +1956,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
is_abnormal = is_abnormal_io(bio);
if (static_branch_unlikely(&zoned_enabled)) {
- /* Special case REQ_OP_ZONE_RESET_ALL as it cannot be split. */
- need_split = (bio_op(bio) != REQ_OP_ZONE_RESET_ALL) &&
- (is_abnormal || dm_zone_bio_needs_split(md, bio));
+ need_split = is_abnormal || dm_zone_bio_needs_split(bio);
} else {
need_split = is_abnormal;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0f03b21e66e4..046fe85c76fe 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -636,9 +636,6 @@ static void __mddev_put(struct mddev *mddev)
mddev->ctime || mddev->hold_active)
return;
- /* Array is not configured at all, and not held active, so destroy it */
- set_bit(MD_DELETED, &mddev->flags);
-
/*
* Call queue_work inside the spinlock so that flush_workqueue() after
* mddev_find will succeed in waiting for the work to be done.
@@ -873,6 +870,16 @@ void mddev_unlock(struct mddev *mddev)
kobject_del(&rdev->kobj);
export_rdev(rdev, mddev);
}
+
+ /* Call del_gendisk after release reconfig_mutex to avoid
+ * deadlock (e.g. call del_gendisk under the lock and an
+ * access to sysfs files waits the lock)
+ * And MD_DELETED is only used for md raid which is set in
+ * do_md_stop. dm raid only uses md_stop to stop. So dm raid
+ * doesn't need to check MD_DELETED when getting reconfig lock
+ */
+ if (test_bit(MD_DELETED, &mddev->flags))
+ del_gendisk(mddev->gendisk);
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -5774,19 +5781,30 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr);
struct mddev *mddev = container_of(kobj, struct mddev, kobj);
ssize_t rv;
+ struct kernfs_node *kn = NULL;
if (!entry->store)
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+
+ if (entry->store == array_state_store && cmd_match(page, "clear"))
+ kn = sysfs_break_active_protection(kobj, attr);
+
spin_lock(&all_mddevs_lock);
if (!mddev_get(mddev)) {
spin_unlock(&all_mddevs_lock);
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return -EBUSY;
}
spin_unlock(&all_mddevs_lock);
rv = entry->store(mddev, page, length);
mddev_put(mddev);
+
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
+
return rv;
}
@@ -5794,12 +5812,6 @@ static void md_kobj_release(struct kobject *ko)
{
struct mddev *mddev = container_of(ko, struct mddev, kobj);
- if (mddev->sysfs_state)
- sysfs_put(mddev->sysfs_state);
- if (mddev->sysfs_level)
- sysfs_put(mddev->sysfs_level);
-
- del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
@@ -6413,15 +6425,10 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- /*
- * Don't clear MD_CLOSING, or mddev can be opened again.
- * 'hold_active != 0' means mddev is still in the creation
- * process and will be used later.
- */
- if (mddev->hold_active)
- mddev->flags = 0;
- else
- mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
+ /* if UNTIL_STOP is set, it's cleared here */
+ mddev->hold_active = 0;
+ /* Don't clear MD_CLOSING, or mddev can be opened again. */
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6516,8 +6523,6 @@ static void __md_stop(struct mddev *mddev)
if (mddev->private)
pers->free(mddev, mddev->private);
mddev->private = NULL;
- if (pers->sync_request && mddev->to_remove == NULL)
- mddev->to_remove = &md_redundancy_group;
put_pers(pers);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -6646,10 +6651,8 @@ static int do_md_stop(struct mddev *mddev, int mode)
mddev->bitmap_info.offset = 0;
export_array(mddev);
-
md_clean(mddev);
- if (mddev->hold_active == UNTIL_STOP)
- mddev->hold_active = 0;
+ set_bit(MD_DELETED, &mddev->flags);
}
md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
@@ -9456,17 +9459,11 @@ static bool md_spares_need_change(struct mddev *mddev)
return false;
}
-static int remove_and_add_spares(struct mddev *mddev,
- struct md_rdev *this)
+static int remove_spares(struct mddev *mddev, struct md_rdev *this)
{
struct md_rdev *rdev;
- int spares = 0;
int removed = 0;
- if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
- /* Mustn't remove devices when resync thread is running */
- return 0;
-
rdev_for_each(rdev, mddev) {
if ((this == NULL || rdev == this) && rdev_removeable(rdev) &&
!mddev->pers->hot_remove_disk(mddev, rdev)) {
@@ -9480,6 +9477,21 @@ static int remove_and_add_spares(struct mddev *mddev,
if (removed && mddev->kobj.sd)
sysfs_notify_dirent_safe(mddev->sysfs_degraded);
+ return removed;
+}
+
+static int remove_and_add_spares(struct mddev *mddev,
+ struct md_rdev *this)
+{
+ struct md_rdev *rdev;
+ int spares = 0;
+ int removed = 0;
+
+ if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ /* Mustn't remove devices when resync thread is running */
+ return 0;
+
+ removed = remove_spares(mddev, this);
if (this && removed)
goto no_add;
@@ -9522,6 +9534,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
/* Check if resync is in progress. */
if (mddev->recovery_cp < MaxSector) {
+ remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
return true;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index d45a9e6ead80..67b365621507 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -700,11 +700,26 @@ static inline bool reshape_interrupted(struct mddev *mddev)
static inline int __must_check mddev_lock(struct mddev *mddev)
{
- return mutex_lock_interruptible(&mddev->reconfig_mutex);
+ int ret;
+
+ ret = mutex_lock_interruptible(&mddev->reconfig_mutex);
+
+ /* MD_DELETED is set in do_md_stop with reconfig_mutex.
+ * So check it here.
+ */
+ if (!ret && test_bit(MD_DELETED, &mddev->flags)) {
+ ret = -ENODEV;
+ mutex_unlock(&mddev->reconfig_mutex);
+ }
+
+ return ret;
}
/* Sometimes we need to take the lock in a situation where
* failure due to interrupts is not acceptable.
+ * It doesn't need to check MD_DELETED here, the owner which
+ * holds the lock here can't be stopped. And all paths can't
+ * call this function after do_md_stop.
*/
static inline void mddev_lock_nointr(struct mddev *mddev)
{
@@ -713,7 +728,14 @@ static inline void mddev_lock_nointr(struct mddev *mddev)
static inline int mddev_trylock(struct mddev *mddev)
{
- return mutex_trylock(&mddev->reconfig_mutex);
+ int ret;
+
+ ret = mutex_trylock(&mddev->reconfig_mutex);
+ if (!ret && test_bit(MD_DELETED, &mddev->flags)) {
+ ret = -ENODEV;
+ mutex_unlock(&mddev->reconfig_mutex);
+ }
+ return ret;
}
extern void mddev_unlock(struct mddev *mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d8f639f4ae12..cbe2a9054cb9 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -384,6 +384,7 @@ static int raid0_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
+ lim.chunk_sectors = mddev->chunk_sectors;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c9bd2005bfd0..95dc354a86a0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2446,15 +2446,12 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* that are active
*/
for (i = 0; i < conf->copies; i++) {
- int d;
-
tbio = r10_bio->devs[i].repl_bio;
if (!tbio || !tbio->bi_end_io)
continue;
if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
&& r10_bio->devs[i].bio != fbio)
bio_copy_data(tbio, fbio);
- d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining);
submit_bio_noacct(tbio);
}
@@ -4012,6 +4009,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
+ lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index ca5b0e8ba707..7ec61ee7b218 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -9040,7 +9040,7 @@ static int __init raid5_init(void)
int ret;
raid5_wq = alloc_workqueue("raid5wq",
- WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0);
+ WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_SYSFS, 0);
if (!raid5_wq)
return -ENOMEM;
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 043b9ec756ff..7f3f47db4c98 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -324,7 +324,7 @@ EXPORT_SYMBOL(memstick_init_req);
static int h_memstick_read_dev_id(struct memstick_dev *card,
struct memstick_request **mrq)
{
- struct ms_id_register id_reg;
+ struct ms_id_register id_reg = {};
if (!(*mrq)) {
memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 6fb3768e3d71..c6cc42360887 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -285,6 +285,24 @@ config MFD_CS42L43_SDW
Select this to support the Cirrus Logic CS42L43 PC CODEC with
headphone and class D speaker drivers over SoundWire.
+config MFD_MACSMC
+ tristate "Apple Silicon System Management Controller (SMC)"
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on OF
+ depends on APPLE_RTKIT
+ select MFD_CORE
+ help
+ The System Management Controller (SMC) on Apple Silicon machines is a
+ piece of hardware that exposes various functionalities such as
+ temperature sensors, voltage/power meters, shutdown/reboot handling,
+ GPIOs and more.
+
+ Communication happens via a shared mailbox using the RTKit protocol
+ which is also used for other co-processors. The SMC protocol then
+ allows reading and writing many different keys which implement the
+ various features. The MFD core device handles this protocol and
+ exposes it to the sub-devices.
+
config MFD_MADERA
tristate "Cirrus Logic Madera codecs"
select MFD_CORE
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 79495f9f3457..f7bdedd5a66d 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_MFD_CS42L43_SDW) += cs42l43-sdw.o
obj-$(CONFIG_MFD_ENE_KB3930) += ene-kb3930.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o
+obj-$(CONFIG_MFD_MACSMC) += macsmc.o
obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o
obj-$(CONFIG_MFD_TI_LP87565) += lp87565.o
diff --git a/drivers/mfd/adp5585.c b/drivers/mfd/adp5585.c
index 160e0b38106a..58f7cebe2ea4 100644
--- a/drivers/mfd/adp5585.c
+++ b/drivers/mfd/adp5585.c
@@ -4,22 +4,40 @@
*
* Copyright 2022 NXP
* Copyright 2024 Ideas on Board Oy
+ * Copyright 2025 Analog Devices Inc.
*/
#include <linux/array_size.h>
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/i2c.h>
+#include <linux/gpio/consumer.h>
#include <linux/mfd/adp5585.h>
#include <linux/mfd/core.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/types.h>
-static const struct mfd_cell adp5585_devs[] = {
- { .name = "adp5585-gpio", },
- { .name = "adp5585-pwm", },
+enum {
+ ADP5585_DEV_GPIO,
+ ADP5585_DEV_PWM,
+ ADP5585_DEV_INPUT,
+ ADP5585_DEV_MAX
+};
+
+static const struct mfd_cell adp5585_devs[ADP5585_DEV_MAX] = {
+ MFD_CELL_NAME("adp5585-gpio"),
+ MFD_CELL_NAME("adp5585-pwm"),
+ MFD_CELL_NAME("adp5585-keys"),
+};
+
+static const struct mfd_cell adp5589_devs[] = {
+ MFD_CELL_NAME("adp5589-gpio"),
+ MFD_CELL_NAME("adp5589-pwm"),
+ MFD_CELL_NAME("adp5589-keys"),
};
static const struct regmap_range adp5585_volatile_ranges[] = {
@@ -31,6 +49,15 @@ static const struct regmap_access_table adp5585_volatile_regs = {
.n_yes_ranges = ARRAY_SIZE(adp5585_volatile_ranges),
};
+static const struct regmap_range adp5589_volatile_ranges[] = {
+ regmap_reg_range(ADP5585_ID, ADP5589_GPI_STATUS_C),
+};
+
+static const struct regmap_access_table adp5589_volatile_regs = {
+ .yes_ranges = adp5589_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(adp5589_volatile_ranges),
+};
+
/*
* Chip variants differ in the default configuration of pull-up and pull-down
* resistors, and therefore have different default register values:
@@ -74,46 +101,597 @@ static const u8 adp5585_regmap_defaults_04[ADP5585_MAX_REG + 1] = {
/* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00,
};
-enum adp5585_regmap_type {
- ADP5585_REGMAP_00,
- ADP5585_REGMAP_02,
- ADP5585_REGMAP_04,
+static const u8 adp5589_regmap_defaults_00[ADP5589_MAX_REG + 1] = {
+ /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x48 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-static const struct regmap_config adp5585_regmap_configs[] = {
- [ADP5585_REGMAP_00] = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = ADP5585_MAX_REG,
- .volatile_table = &adp5585_volatile_regs,
- .cache_type = REGCACHE_MAPLE,
- .reg_defaults_raw = adp5585_regmap_defaults_00,
- .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_00),
- },
- [ADP5585_REGMAP_02] = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = ADP5585_MAX_REG,
- .volatile_table = &adp5585_volatile_regs,
- .cache_type = REGCACHE_MAPLE,
- .reg_defaults_raw = adp5585_regmap_defaults_02,
- .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_02),
- },
- [ADP5585_REGMAP_04] = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = ADP5585_MAX_REG,
- .volatile_table = &adp5585_volatile_regs,
- .cache_type = REGCACHE_MAPLE,
- .reg_defaults_raw = adp5585_regmap_defaults_04,
- .num_reg_defaults_raw = sizeof(adp5585_regmap_defaults_04),
- },
+static const u8 adp5589_regmap_defaults_01[ADP5589_MAX_REG + 1] = {
+ /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x48 */ 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
};
+static const u8 adp5589_regmap_defaults_02[ADP5589_MAX_REG + 1] = {
+ /* 0x00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x08 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x18 */ 0x00, 0x41, 0x01, 0x00, 0x11, 0x04, 0x00, 0x00,
+ /* 0x20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x38 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x48 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 *adp5585_regmap_defaults[ADP5585_MAX] = {
+ [ADP5585_00] = adp5585_regmap_defaults_00,
+ [ADP5585_01] = adp5585_regmap_defaults_00,
+ [ADP5585_02] = adp5585_regmap_defaults_02,
+ [ADP5585_03] = adp5585_regmap_defaults_00,
+ [ADP5585_04] = adp5585_regmap_defaults_04,
+ [ADP5589_00] = adp5589_regmap_defaults_00,
+ [ADP5589_01] = adp5589_regmap_defaults_01,
+ [ADP5589_02] = adp5589_regmap_defaults_02,
+};
+
+static const struct regmap_config adp5585_regmap_config_template = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ADP5585_MAX_REG,
+ .volatile_table = &adp5585_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .num_reg_defaults_raw = ADP5585_MAX_REG + 1,
+};
+
+static const struct regmap_config adp5589_regmap_config_template = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ADP5589_MAX_REG,
+ .volatile_table = &adp5589_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .num_reg_defaults_raw = ADP5589_MAX_REG + 1,
+};
+
+static const struct adp5585_regs adp5585_regs = {
+ .ext_cfg = ADP5585_PIN_CONFIG_C,
+ .int_en = ADP5585_INT_EN,
+ .gen_cfg = ADP5585_GENERAL_CFG,
+ .poll_ptime_cfg = ADP5585_POLL_PTIME_CFG,
+ .reset_cfg = ADP5585_RESET_CFG,
+ .reset1_event_a = ADP5585_RESET1_EVENT_A,
+ .reset2_event_a = ADP5585_RESET2_EVENT_A,
+ .pin_cfg_a = ADP5585_PIN_CONFIG_A,
+};
+
+static const struct adp5585_regs adp5589_regs = {
+ .ext_cfg = ADP5589_PIN_CONFIG_D,
+ .int_en = ADP5589_INT_EN,
+ .gen_cfg = ADP5589_GENERAL_CFG,
+ .poll_ptime_cfg = ADP5589_POLL_PTIME_CFG,
+ .reset_cfg = ADP5589_RESET_CFG,
+ .reset1_event_a = ADP5589_RESET1_EVENT_A,
+ .reset2_event_a = ADP5589_RESET2_EVENT_A,
+ .pin_cfg_a = ADP5589_PIN_CONFIG_A,
+};
+
+static int adp5585_validate_event(const struct adp5585_dev *adp5585, unsigned int ev)
+{
+ if (adp5585->has_pin6) {
+ if (ev >= ADP5585_ROW5_KEY_EVENT_START && ev <= ADP5585_ROW5_KEY_EVENT_END)
+ return 0;
+ if (ev >= ADP5585_GPI_EVENT_START && ev <= ADP5585_GPI_EVENT_END)
+ return 0;
+
+ return dev_err_probe(adp5585->dev, -EINVAL,
+ "Invalid unlock/reset event(%u) for this device\n", ev);
+ }
+
+ if (ev >= ADP5585_KEY_EVENT_START && ev <= ADP5585_KEY_EVENT_END)
+ return 0;
+ if (ev >= ADP5585_GPI_EVENT_START && ev <= ADP5585_GPI_EVENT_END) {
+ /*
+ * Some variants of the adp5585 do not have the Row 5
+ * (meaning pin 6 or GPIO 6) available. Instead that pin serves
+ * as a reset pin. So, we need to make sure no event is
+ * configured for it.
+ */
+ if (ev == (ADP5585_GPI_EVENT_START + 5))
+ return dev_err_probe(adp5585->dev, -EINVAL,
+ "Invalid unlock/reset event(%u). R5 not available\n",
+ ev);
+ return 0;
+ }
+
+ return dev_err_probe(adp5585->dev, -EINVAL,
+ "Invalid unlock/reset event(%u) for this device\n", ev);
+}
+
+static int adp5589_validate_event(const struct adp5585_dev *adp5585, unsigned int ev)
+{
+ if (ev >= ADP5589_KEY_EVENT_START && ev <= ADP5589_KEY_EVENT_END)
+ return 0;
+ if (ev >= ADP5589_GPI_EVENT_START && ev <= ADP5589_GPI_EVENT_END)
+ return 0;
+
+ return dev_err_probe(adp5585->dev, -EINVAL,
+ "Invalid unlock/reset event(%u) for this device\n", ev);
+}
+
+static struct regmap_config *adp5585_fill_variant_config(struct adp5585_dev *adp5585)
+{
+ struct regmap_config *regmap_config;
+
+ switch (adp5585->variant) {
+ case ADP5585_00:
+ case ADP5585_01:
+ case ADP5585_02:
+ case ADP5585_03:
+ case ADP5585_04:
+ adp5585->id = ADP5585_MAN_ID_VALUE;
+ adp5585->regs = &adp5585_regs;
+ adp5585->n_pins = ADP5585_PIN_MAX;
+ adp5585->reset2_out = ADP5585_RESET2_OUT;
+ if (adp5585->variant == ADP5585_01)
+ adp5585->has_pin6 = true;
+ regmap_config = devm_kmemdup(adp5585->dev, &adp5585_regmap_config_template,
+ sizeof(*regmap_config), GFP_KERNEL);
+ break;
+ case ADP5589_00:
+ case ADP5589_01:
+ case ADP5589_02:
+ adp5585->id = ADP5589_MAN_ID_VALUE;
+ adp5585->regs = &adp5589_regs;
+ adp5585->has_unlock = true;
+ adp5585->has_pin6 = true;
+ adp5585->n_pins = ADP5589_PIN_MAX;
+ adp5585->reset2_out = ADP5589_RESET2_OUT;
+ regmap_config = devm_kmemdup(adp5585->dev, &adp5589_regmap_config_template,
+ sizeof(*regmap_config), GFP_KERNEL);
+ break;
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (!regmap_config)
+ return ERR_PTR(-ENOMEM);
+
+ regmap_config->reg_defaults_raw = adp5585_regmap_defaults[adp5585->variant];
+
+ return regmap_config;
+}
+
+static int adp5585_parse_ev_array(const struct adp5585_dev *adp5585, const char *prop, u32 *events,
+ u32 *n_events, u32 max_evs, bool reset_ev)
+{
+ struct device *dev = adp5585->dev;
+ unsigned int ev;
+ int ret;
+
+ /*
+ * The device has the capability of handling special events through GPIs or a Keypad:
+ * unlock events: Unlock the keymap until one of the configured events is detected.
+ * reset events: Generate a reset pulse when one of the configured events is detected.
+ */
+ ret = device_property_count_u32(dev, prop);
+ if (ret < 0)
+ return 0;
+
+ *n_events = ret;
+
+ if (!adp5585->has_unlock && !reset_ev)
+ return dev_err_probe(dev, -EOPNOTSUPP, "Unlock keys not supported\n");
+
+ if (*n_events > max_evs)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid number of keys(%u > %u) for %s\n",
+ *n_events, max_evs, prop);
+
+ ret = device_property_read_u32_array(dev, prop, events, *n_events);
+ if (ret)
+ return ret;
+
+ for (ev = 0; ev < *n_events; ev++) {
+ if (!reset_ev && events[ev] == ADP5589_UNLOCK_WILDCARD)
+ continue;
+
+ if (adp5585->id == ADP5585_MAN_ID_VALUE)
+ ret = adp5585_validate_event(adp5585, events[ev]);
+ else
+ ret = adp5589_validate_event(adp5585, events[ev]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp5585_unlock_ev_parse(struct adp5585_dev *adp5585)
+{
+ struct device *dev = adp5585->dev;
+ int ret;
+
+ ret = adp5585_parse_ev_array(adp5585, "adi,unlock-events", adp5585->unlock_keys,
+ &adp5585->nkeys_unlock, ARRAY_SIZE(adp5585->unlock_keys),
+ false);
+ if (ret)
+ return ret;
+ if (!adp5585->nkeys_unlock)
+ return 0;
+
+ ret = device_property_read_u32(dev, "adi,unlock-trigger-sec", &adp5585->unlock_time);
+ if (!ret) {
+ if (adp5585->unlock_time > ADP5585_MAX_UNLOCK_TIME_SEC)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid unlock time(%u > %d)\n",
+ adp5585->unlock_time,
+ ADP5585_MAX_UNLOCK_TIME_SEC);
+ }
+
+ return 0;
+}
+
+static int adp5585_reset_ev_parse(struct adp5585_dev *adp5585)
+{
+ struct device *dev = adp5585->dev;
+ u32 prop_val;
+ int ret;
+
+ ret = adp5585_parse_ev_array(adp5585, "adi,reset1-events", adp5585->reset1_keys,
+ &adp5585->nkeys_reset1,
+ ARRAY_SIZE(adp5585->reset1_keys), true);
+ if (ret)
+ return ret;
+
+ ret = adp5585_parse_ev_array(adp5585, "adi,reset2-events",
+ adp5585->reset2_keys,
+ &adp5585->nkeys_reset2,
+ ARRAY_SIZE(adp5585->reset2_keys), true);
+ if (ret)
+ return ret;
+
+ if (!adp5585->nkeys_reset1 && !adp5585->nkeys_reset2)
+ return 0;
+
+ if (adp5585->nkeys_reset1 && device_property_read_bool(dev, "adi,reset1-active-high"))
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET1_POL, 1);
+
+ if (adp5585->nkeys_reset2 && device_property_read_bool(dev, "adi,reset2-active-high"))
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET2_POL, 1);
+
+ if (device_property_read_bool(dev, "adi,rst-passthrough-enable"))
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RST_PASSTHRU_EN, 1);
+
+ ret = device_property_read_u32(dev, "adi,reset-trigger-ms", &prop_val);
+ if (!ret) {
+ switch (prop_val) {
+ case 0:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 0);
+ break;
+ case 1000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 1);
+ break;
+ case 1500:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 2);
+ break;
+ case 2000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 3);
+ break;
+ case 2500:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 4);
+ break;
+ case 3000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 5);
+ break;
+ case 3500:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 6);
+ break;
+ case 4000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_RESET_TRIG_TIME, 7);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value(%u) for adi,reset-trigger-ms\n",
+ prop_val);
+ }
+ }
+
+ ret = device_property_read_u32(dev, "adi,reset-pulse-width-us", &prop_val);
+ if (!ret) {
+ switch (prop_val) {
+ case 500:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 0);
+ break;
+ case 1000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 1);
+ break;
+ case 2000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 2);
+ break;
+ case 10000:
+ adp5585->reset_cfg |= FIELD_PREP(ADP5585_PULSE_WIDTH, 3);
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value(%u) for adi,reset-pulse-width-us\n",
+ prop_val);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adp5585_add_devices(const struct adp5585_dev *adp5585)
+{
+ struct device *dev = adp5585->dev;
+ const struct mfd_cell *cells;
+ int ret;
+
+ if (adp5585->id == ADP5585_MAN_ID_VALUE)
+ cells = adp5585_devs;
+ else
+ cells = adp5589_devs;
+
+ if (device_property_present(dev, "#pwm-cells")) {
+ /* Make sure the PWM output pin is not used by the GPIO or INPUT devices */
+ __set_bit(ADP5585_PWM_OUT, adp5585->pin_usage);
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ &cells[ADP5585_DEV_PWM], 1, NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWM device\n");
+ }
+
+ if (device_property_present(dev, "#gpio-cells")) {
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO,
+ &cells[ADP5585_DEV_GPIO], 1, NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add GPIO device\n");
+ }
+
+ if (device_property_present(adp5585->dev, "adi,keypad-pins")) {
+ ret = devm_mfd_add_devices(adp5585->dev, PLATFORM_DEVID_AUTO,
+ &cells[ADP5585_DEV_INPUT], 1, NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add input device\n");
+ }
+
+ return 0;
+}
+
+static void adp5585_osc_disable(void *data)
+{
+ const struct adp5585_dev *adp5585 = data;
+
+ regmap_write(adp5585->regmap, ADP5585_GENERAL_CFG, 0);
+}
+
+static void adp5585_report_events(struct adp5585_dev *adp5585, int ev_cnt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ev_cnt; i++) {
+ unsigned long key_val, key_press;
+ unsigned int key;
+ int ret;
+
+ ret = regmap_read(adp5585->regmap, ADP5585_FIFO_1 + i, &key);
+ if (ret)
+ return;
+
+ key_val = FIELD_GET(ADP5585_KEY_EVENT_MASK, key);
+ key_press = FIELD_GET(ADP5585_KEV_EV_PRESS_MASK, key);
+
+ blocking_notifier_call_chain(&adp5585->event_notifier, key_val, (void *)key_press);
+ }
+}
+
+static irqreturn_t adp5585_irq(int irq, void *data)
+{
+ struct adp5585_dev *adp5585 = data;
+ unsigned int status, ev_cnt;
+ int ret;
+
+ ret = regmap_read(adp5585->regmap, ADP5585_INT_STATUS, &status);
+ if (ret)
+ return IRQ_HANDLED;
+
+ if (status & ADP5585_OVRFLOW_INT)
+ dev_err_ratelimited(adp5585->dev, "Event overflow error\n");
+
+ if (!(status & ADP5585_EVENT_INT))
+ goto out_irq;
+
+ ret = regmap_read(adp5585->regmap, ADP5585_STATUS, &ev_cnt);
+ if (ret)
+ goto out_irq;
+
+ ev_cnt = FIELD_GET(ADP5585_EC_MASK, ev_cnt);
+ if (!ev_cnt)
+ goto out_irq;
+
+ adp5585_report_events(adp5585, ev_cnt);
+out_irq:
+ regmap_write(adp5585->regmap, ADP5585_INT_STATUS, status);
+ return IRQ_HANDLED;
+}
+
+static int adp5585_setup(struct adp5585_dev *adp5585)
+{
+ const struct adp5585_regs *regs = adp5585->regs;
+ unsigned int reg_val = 0, i;
+ int ret;
+
+ /* If pin_6 (ROW5/GPI6) is not available, make sure to mark it as "busy" */
+ if (!adp5585->has_pin6)
+ __set_bit(ADP5585_ROW5, adp5585->pin_usage);
+
+ /* Configure the device with reset and unlock events */
+ for (i = 0; i < adp5585->nkeys_unlock; i++) {
+ ret = regmap_write(adp5585->regmap, ADP5589_UNLOCK1 + i,
+ adp5585->unlock_keys[i] | ADP5589_UNLOCK_EV_PRESS);
+ if (ret)
+ return ret;
+ }
+
+ if (adp5585->nkeys_unlock) {
+ ret = regmap_update_bits(adp5585->regmap, ADP5589_UNLOCK_TIMERS,
+ ADP5589_UNLOCK_TIMER, adp5585->unlock_time);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(adp5585->regmap, ADP5589_LOCK_CFG, ADP5589_LOCK_EN);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < adp5585->nkeys_reset1; i++) {
+ ret = regmap_write(adp5585->regmap, regs->reset1_event_a + i,
+ adp5585->reset1_keys[i] | ADP5585_RESET_EV_PRESS);
+ if (ret)
+ return ret;
+
+ /* Mark that pin as not usable for the INPUT and GPIO devices. */
+ __set_bit(ADP5585_RESET1_OUT, adp5585->pin_usage);
+ }
+
+ for (i = 0; i < adp5585->nkeys_reset2; i++) {
+ ret = regmap_write(adp5585->regmap, regs->reset2_event_a + i,
+ adp5585->reset2_keys[i] | ADP5585_RESET_EV_PRESS);
+ if (ret)
+ return ret;
+
+ __set_bit(adp5585->reset2_out, adp5585->pin_usage);
+ }
+
+ if (adp5585->nkeys_reset1 || adp5585->nkeys_reset2) {
+ ret = regmap_write(adp5585->regmap, regs->reset_cfg, adp5585->reset_cfg);
+ if (ret)
+ return ret;
+
+ /* If there's a reset1 event, then R4 is used as an output for the reset signal */
+ if (adp5585->nkeys_reset1)
+ reg_val = ADP5585_R4_EXTEND_CFG_RESET1;
+ /* If there's a reset2 event, then C4 is used as an output for the reset signal */
+ if (adp5585->nkeys_reset2)
+ reg_val |= ADP5585_C4_EXTEND_CFG_RESET2;
+
+ ret = regmap_update_bits(adp5585->regmap, regs->ext_cfg,
+ ADP5585_C4_EXTEND_CFG_MASK | ADP5585_R4_EXTEND_CFG_MASK,
+ reg_val);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear any possible event by reading all the FIFO entries */
+ for (i = 0; i < ADP5585_EV_MAX; i++) {
+ ret = regmap_read(adp5585->regmap, ADP5585_FIFO_1 + i, &reg_val);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(adp5585->regmap, regs->poll_ptime_cfg, adp5585->ev_poll_time);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable the internal oscillator, as it's shared between multiple
+ * functions.
+ */
+ ret = regmap_write(adp5585->regmap, regs->gen_cfg,
+ ADP5585_OSC_FREQ_500KHZ | ADP5585_INT_CFG | ADP5585_OSC_EN);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(adp5585->dev, adp5585_osc_disable, adp5585);
+}
+
+static int adp5585_parse_fw(struct adp5585_dev *adp5585)
+{
+ unsigned int prop_val;
+ int ret;
+
+ ret = device_property_read_u32(adp5585->dev, "poll-interval", &prop_val);
+ if (!ret) {
+ adp5585->ev_poll_time = prop_val / 10 - 1;
+ /*
+ * ev_poll_time is the raw value to be written on the register and 0 to 3 are the
+ * valid values.
+ */
+ if (adp5585->ev_poll_time > 3)
+ return dev_err_probe(adp5585->dev, -EINVAL,
+ "Invalid value(%u) for poll-interval\n", prop_val);
+ }
+
+ ret = adp5585_unlock_ev_parse(adp5585);
+ if (ret)
+ return ret;
+
+ return adp5585_reset_ev_parse(adp5585);
+}
+
+static void adp5585_irq_disable(void *data)
+{
+ struct adp5585_dev *adp5585 = data;
+
+ regmap_write(adp5585->regmap, adp5585->regs->int_en, 0);
+}
+
+static int adp5585_irq_enable(struct i2c_client *i2c,
+ struct adp5585_dev *adp5585)
+{
+ const struct adp5585_regs *regs = adp5585->regs;
+ unsigned int stat;
+ int ret;
+
+ if (i2c->irq <= 0)
+ return 0;
+
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL, adp5585_irq,
+ IRQF_ONESHOT, i2c->name, adp5585);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear any possible outstanding interrupt before enabling them. We do that by reading
+ * the status register and writing back the same value.
+ */
+ ret = regmap_read(adp5585->regmap, ADP5585_INT_STATUS, &stat);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(adp5585->regmap, ADP5585_INT_STATUS, stat);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(adp5585->regmap, regs->int_en, ADP5585_OVRFLOW_IEN | ADP5585_EVENT_IEN);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&i2c->dev, adp5585_irq_disable, adp5585);
+}
+
static int adp5585_i2c_probe(struct i2c_client *i2c)
{
- const struct regmap_config *regmap_config;
+ struct regmap_config *regmap_config;
struct adp5585_dev *adp5585;
+ struct gpio_desc *gpio;
unsigned int id;
int ret;
@@ -122,8 +700,36 @@ static int adp5585_i2c_probe(struct i2c_client *i2c)
return -ENOMEM;
i2c_set_clientdata(i2c, adp5585);
+ adp5585->dev = &i2c->dev;
+ adp5585->irq = i2c->irq;
+ BLOCKING_INIT_NOTIFIER_HEAD(&adp5585->event_notifier);
+
+ adp5585->variant = (enum adp5585_variant)(uintptr_t)i2c_get_match_data(i2c);
+ if (!adp5585->variant)
+ return -ENODEV;
+
+ regmap_config = adp5585_fill_variant_config(adp5585);
+ if (IS_ERR(regmap_config))
+ return PTR_ERR(regmap_config);
+
+ ret = devm_regulator_get_enable(&i2c->dev, "vdd");
+ if (ret)
+ return ret;
+
+ gpio = devm_gpiod_get_optional(&i2c->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ /*
+ * Note the timings are not documented anywhere in the datasheet. They are just
+ * reasonable values that work.
+ */
+ if (gpio) {
+ fsleep(30);
+ gpiod_set_value_cansleep(gpio, 0);
+ fsleep(60);
+ }
- regmap_config = i2c_get_match_data(i2c);
adp5585->regmap = devm_regmap_init_i2c(i2c, regmap_config);
if (IS_ERR(adp5585->regmap))
return dev_err_probe(&i2c->dev, PTR_ERR(adp5585->regmap),
@@ -134,24 +740,37 @@ static int adp5585_i2c_probe(struct i2c_client *i2c)
return dev_err_probe(&i2c->dev, ret,
"Failed to read device ID\n");
- if ((id & ADP5585_MAN_ID_MASK) != ADP5585_MAN_ID_VALUE)
+ id &= ADP5585_MAN_ID_MASK;
+ if (id != adp5585->id)
return dev_err_probe(&i2c->dev, -ENODEV,
"Invalid device ID 0x%02x\n", id);
- ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_AUTO,
- adp5585_devs, ARRAY_SIZE(adp5585_devs),
- NULL, 0, NULL);
+ adp5585->pin_usage = devm_bitmap_zalloc(&i2c->dev, adp5585->n_pins, GFP_KERNEL);
+ if (!adp5585->pin_usage)
+ return -ENOMEM;
+
+ ret = adp5585_parse_fw(adp5585);
if (ret)
- return dev_err_probe(&i2c->dev, ret,
- "Failed to add child devices\n");
+ return ret;
- return 0;
+ ret = adp5585_setup(adp5585);
+ if (ret)
+ return ret;
+
+ ret = adp5585_add_devices(adp5585);
+ if (ret)
+ return ret;
+
+ return adp5585_irq_enable(i2c, adp5585);
}
static int adp5585_suspend(struct device *dev)
{
struct adp5585_dev *adp5585 = dev_get_drvdata(dev);
+ if (adp5585->irq)
+ disable_irq(adp5585->irq);
+
regcache_cache_only(adp5585->regmap, true);
return 0;
@@ -160,11 +779,19 @@ static int adp5585_suspend(struct device *dev)
static int adp5585_resume(struct device *dev)
{
struct adp5585_dev *adp5585 = dev_get_drvdata(dev);
+ int ret;
regcache_cache_only(adp5585->regmap, false);
regcache_mark_dirty(adp5585->regmap);
- return regcache_sync(adp5585->regmap);
+ ret = regcache_sync(adp5585->regmap);
+ if (ret)
+ return ret;
+
+ if (adp5585->irq)
+ enable_irq(adp5585->irq);
+
+ return 0;
}
static DEFINE_SIMPLE_DEV_PM_OPS(adp5585_pm, adp5585_suspend, adp5585_resume);
@@ -172,19 +799,31 @@ static DEFINE_SIMPLE_DEV_PM_OPS(adp5585_pm, adp5585_suspend, adp5585_resume);
static const struct of_device_id adp5585_of_match[] = {
{
.compatible = "adi,adp5585-00",
- .data = &adp5585_regmap_configs[ADP5585_REGMAP_00],
+ .data = (void *)ADP5585_00,
}, {
.compatible = "adi,adp5585-01",
- .data = &adp5585_regmap_configs[ADP5585_REGMAP_00],
+ .data = (void *)ADP5585_01,
}, {
.compatible = "adi,adp5585-02",
- .data = &adp5585_regmap_configs[ADP5585_REGMAP_02],
+ .data = (void *)ADP5585_02,
}, {
.compatible = "adi,adp5585-03",
- .data = &adp5585_regmap_configs[ADP5585_REGMAP_00],
+ .data = (void *)ADP5585_03,
}, {
.compatible = "adi,adp5585-04",
- .data = &adp5585_regmap_configs[ADP5585_REGMAP_04],
+ .data = (void *)ADP5585_04,
+ }, {
+ .compatible = "adi,adp5589-00",
+ .data = (void *)ADP5589_00,
+ }, {
+ .compatible = "adi,adp5589-01",
+ .data = (void *)ADP5589_01,
+ }, {
+ .compatible = "adi,adp5589-02",
+ .data = (void *)ADP5589_02,
+ }, {
+ .compatible = "adi,adp5589",
+ .data = (void *)ADP5589_00,
},
{ /* sentinel */ }
};
diff --git a/drivers/mfd/macsmc.c b/drivers/mfd/macsmc.c
new file mode 100644
index 000000000000..870c8b2028a8
--- /dev/null
+++ b/drivers/mfd/macsmc.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC (System Management Controller) MFD driver
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/math.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/unaligned.h>
+
+#define SMC_ENDPOINT 0x20
+
+/* We don't actually know the true size here but this seem reasonable */
+#define SMC_SHMEM_SIZE 0x1000
+#define SMC_MAX_SIZE 255
+
+#define SMC_MSG_READ_KEY 0x10
+#define SMC_MSG_WRITE_KEY 0x11
+#define SMC_MSG_GET_KEY_BY_INDEX 0x12
+#define SMC_MSG_GET_KEY_INFO 0x13
+#define SMC_MSG_INITIALIZE 0x17
+#define SMC_MSG_NOTIFICATION 0x18
+#define SMC_MSG_RW_KEY 0x20
+
+#define SMC_DATA GENMASK_ULL(63, 32)
+#define SMC_WSIZE GENMASK_ULL(31, 24)
+#define SMC_SIZE GENMASK_ULL(23, 16)
+#define SMC_ID GENMASK_ULL(15, 12)
+#define SMC_MSG GENMASK_ULL(7, 0)
+#define SMC_RESULT SMC_MSG
+
+#define SMC_TIMEOUT_MS 500
+
+static const struct mfd_cell apple_smc_devs[] = {
+ MFD_CELL_OF("macsmc-gpio", NULL, NULL, 0, 0, "apple,smc-gpio"),
+ MFD_CELL_OF("macsmc-reboot", NULL, NULL, 0, 0, "apple,smc-reboot"),
+};
+
+static int apple_smc_cmd_locked(struct apple_smc *smc, u64 cmd, u64 arg,
+ u64 size, u64 wsize, u32 *ret_data)
+{
+ u8 result;
+ int ret;
+ u64 msg;
+
+ lockdep_assert_held(&smc->mutex);
+
+ if (smc->boot_stage != APPLE_SMC_INITIALIZED)
+ return -EIO;
+ if (smc->atomic_mode)
+ return -EIO;
+
+ reinit_completion(&smc->cmd_done);
+
+ smc->msg_id = (smc->msg_id + 1) & 0xf;
+ msg = (FIELD_PREP(SMC_MSG, cmd) |
+ FIELD_PREP(SMC_SIZE, size) |
+ FIELD_PREP(SMC_WSIZE, wsize) |
+ FIELD_PREP(SMC_ID, smc->msg_id) |
+ FIELD_PREP(SMC_DATA, arg));
+
+ ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, false);
+ if (ret) {
+ dev_err(smc->dev, "Failed to send command\n");
+ return ret;
+ }
+
+ if (wait_for_completion_timeout(&smc->cmd_done, msecs_to_jiffies(SMC_TIMEOUT_MS)) <= 0) {
+ dev_err(smc->dev, "Command timed out (%llx)", msg);
+ return -ETIMEDOUT;
+ }
+
+ if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) {
+ dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n",
+ smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret));
+ return -EIO;
+ }
+
+ result = FIELD_GET(SMC_RESULT, smc->cmd_ret);
+ if (result)
+ return -EIO;
+
+ if (ret_data)
+ *ret_data = FIELD_GET(SMC_DATA, smc->cmd_ret);
+
+ return FIELD_GET(SMC_SIZE, smc->cmd_ret);
+}
+
+static int apple_smc_cmd(struct apple_smc *smc, u64 cmd, u64 arg,
+ u64 size, u64 wsize, u32 *ret_data)
+{
+ guard(mutex)(&smc->mutex);
+
+ return apple_smc_cmd_locked(smc, cmd, arg, size, wsize, ret_data);
+}
+
+static int apple_smc_rw_locked(struct apple_smc *smc, smc_key key,
+ const void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize)
+{
+ u64 smc_size, smc_wsize;
+ u32 rdata;
+ int ret;
+ u64 cmd;
+
+ lockdep_assert_held(&smc->mutex);
+
+ if (rsize > SMC_MAX_SIZE)
+ return -EINVAL;
+ if (wsize > SMC_MAX_SIZE)
+ return -EINVAL;
+
+ if (rsize && wsize) {
+ cmd = SMC_MSG_RW_KEY;
+ memcpy_toio(smc->shmem.iomem, wbuf, wsize);
+ smc_size = rsize;
+ smc_wsize = wsize;
+ } else if (wsize && !rsize) {
+ cmd = SMC_MSG_WRITE_KEY;
+ memcpy_toio(smc->shmem.iomem, wbuf, wsize);
+ /*
+ * Setting size to the length we want to write and wsize to 0
+ * looks silly but that's how the SMC protocol works ¯\_(ツ)_/¯
+ */
+ smc_size = wsize;
+ smc_wsize = 0;
+ } else if (!wsize && rsize) {
+ cmd = SMC_MSG_READ_KEY;
+ smc_size = rsize;
+ smc_wsize = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = apple_smc_cmd_locked(smc, cmd, key, smc_size, smc_wsize, &rdata);
+ if (ret < 0)
+ return ret;
+
+ if (rsize) {
+ /*
+ * Small data <= 4 bytes is returned as part of the reply
+ * message which is sent over the mailbox FIFO. Everything
+ * bigger has to be copied from SRAM which is mapped as
+ * Device memory.
+ */
+ if (rsize <= 4)
+ memcpy(rbuf, &rdata, rsize);
+ else
+ memcpy_fromio(rbuf, smc->shmem.iomem, rsize);
+ }
+
+ return ret;
+}
+
+int apple_smc_read(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+ guard(mutex)(&smc->mutex);
+
+ return apple_smc_rw_locked(smc, key, NULL, 0, buf, size);
+}
+EXPORT_SYMBOL(apple_smc_read);
+
+int apple_smc_write(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+ guard(mutex)(&smc->mutex);
+
+ return apple_smc_rw_locked(smc, key, buf, size, NULL, 0);
+}
+EXPORT_SYMBOL(apple_smc_write);
+
+int apple_smc_rw(struct apple_smc *smc, smc_key key, void *wbuf, size_t wsize,
+ void *rbuf, size_t rsize)
+{
+ guard(mutex)(&smc->mutex);
+
+ return apple_smc_rw_locked(smc, key, wbuf, wsize, rbuf, rsize);
+}
+EXPORT_SYMBOL(apple_smc_rw);
+
+int apple_smc_get_key_by_index(struct apple_smc *smc, int index, smc_key *key)
+{
+ int ret;
+
+ ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_BY_INDEX, index, 0, 0, key);
+
+ *key = swab32(*key);
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_get_key_by_index);
+
+int apple_smc_get_key_info(struct apple_smc *smc, smc_key key, struct apple_smc_key_info *info)
+{
+ u8 key_info[6];
+ int ret;
+
+ ret = apple_smc_cmd(smc, SMC_MSG_GET_KEY_INFO, key, 0, 0, NULL);
+ if (ret >= 0 && info) {
+ memcpy_fromio(key_info, smc->shmem.iomem, sizeof(key_info));
+ info->size = key_info[0];
+ info->type_code = get_unaligned_be32(&key_info[1]);
+ info->flags = key_info[5];
+ }
+ return ret;
+}
+EXPORT_SYMBOL(apple_smc_get_key_info);
+
+int apple_smc_enter_atomic(struct apple_smc *smc)
+{
+ guard(mutex)(&smc->mutex);
+
+ /*
+ * Disable notifications since this is called before shutdown and no
+ * notification handler will be able to handle the notification
+ * using atomic operations only. Also ignore any failure here
+ * because we're about to shut down or reboot anyway.
+ * We can't use apple_smc_write_flag here since that would try to lock
+ * smc->mutex again.
+ */
+ const u8 flag = 0;
+
+ apple_smc_rw_locked(smc, SMC_KEY(NTAP), &flag, sizeof(flag), NULL, 0);
+
+ smc->atomic_mode = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(apple_smc_enter_atomic);
+
+int apple_smc_write_atomic(struct apple_smc *smc, smc_key key, void *buf, size_t size)
+{
+ guard(spinlock_irqsave)(&smc->lock);
+ u8 result;
+ int ret;
+ u64 msg;
+
+ if (size > SMC_MAX_SIZE || size == 0)
+ return -EINVAL;
+
+ if (smc->boot_stage != APPLE_SMC_INITIALIZED)
+ return -EIO;
+ if (!smc->atomic_mode)
+ return -EIO;
+
+ memcpy_toio(smc->shmem.iomem, buf, size);
+ smc->msg_id = (smc->msg_id + 1) & 0xf;
+ msg = (FIELD_PREP(SMC_MSG, SMC_MSG_WRITE_KEY) |
+ FIELD_PREP(SMC_SIZE, size) |
+ FIELD_PREP(SMC_ID, smc->msg_id) |
+ FIELD_PREP(SMC_DATA, key));
+ smc->atomic_pending = true;
+
+ ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT, msg, NULL, true);
+ if (ret < 0) {
+ dev_err(smc->dev, "Failed to send command (%d)\n", ret);
+ return ret;
+ }
+
+ while (smc->atomic_pending) {
+ ret = apple_rtkit_poll(smc->rtk);
+ if (ret < 0) {
+ dev_err(smc->dev, "RTKit poll failed (%llx)", msg);
+ return ret;
+ }
+ udelay(100);
+ }
+
+ if (FIELD_GET(SMC_ID, smc->cmd_ret) != smc->msg_id) {
+ dev_err(smc->dev, "Command sequence mismatch (expected %d, got %d)\n",
+ smc->msg_id, (unsigned int)FIELD_GET(SMC_ID, smc->cmd_ret));
+ return -EIO;
+ }
+
+ result = FIELD_GET(SMC_RESULT, smc->cmd_ret);
+ if (result)
+ return -EIO;
+
+ return FIELD_GET(SMC_SIZE, smc->cmd_ret);
+}
+EXPORT_SYMBOL(apple_smc_write_atomic);
+
+static void apple_smc_rtkit_crashed(void *cookie, const void *bfr, size_t bfr_len)
+{
+ struct apple_smc *smc = cookie;
+
+ smc->boot_stage = APPLE_SMC_ERROR_CRASHED;
+ dev_err(smc->dev, "SMC crashed! Your system will reboot in a few seconds...\n");
+}
+
+static int apple_smc_rtkit_shmem_setup(void *cookie, struct apple_rtkit_shmem *bfr)
+{
+ struct apple_smc *smc = cookie;
+ size_t bfr_end;
+
+ if (!bfr->iova) {
+ dev_err(smc->dev, "RTKit wants a RAM buffer\n");
+ return -EIO;
+ }
+
+ if (check_add_overflow(bfr->iova, bfr->size - 1, &bfr_end))
+ return -EFAULT;
+
+ if (bfr->iova < smc->sram->start || bfr->iova > smc->sram->end ||
+ bfr_end > smc->sram->end) {
+ dev_err(smc->dev, "RTKit buffer request outside SRAM region: [0x%llx, 0x%llx]\n",
+ (unsigned long long)bfr->iova,
+ (unsigned long long)bfr_end);
+ return -EFAULT;
+ }
+
+ bfr->iomem = smc->sram_base + (bfr->iova - smc->sram->start);
+ bfr->is_mapped = true;
+
+ return 0;
+}
+
+static bool apple_smc_rtkit_recv_early(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_smc *smc = cookie;
+
+ if (endpoint != SMC_ENDPOINT) {
+ dev_warn(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint);
+ return false;
+ }
+
+ if (smc->boot_stage == APPLE_SMC_BOOTING) {
+ int ret;
+
+ smc->shmem.iova = message;
+ smc->shmem.size = SMC_SHMEM_SIZE;
+ ret = apple_smc_rtkit_shmem_setup(smc, &smc->shmem);
+ if (ret < 0) {
+ smc->boot_stage = APPLE_SMC_ERROR_NO_SHMEM;
+ dev_err(smc->dev, "Failed to initialize shared memory (%d)\n", ret);
+ } else {
+ smc->boot_stage = APPLE_SMC_INITIALIZED;
+ }
+ complete(&smc->init_done);
+ } else if (FIELD_GET(SMC_MSG, message) == SMC_MSG_NOTIFICATION) {
+ /* Handle these in the RTKit worker thread */
+ return false;
+ } else {
+ smc->cmd_ret = message;
+ if (smc->atomic_pending)
+ smc->atomic_pending = false;
+ else
+ complete(&smc->cmd_done);
+ }
+
+ return true;
+}
+
+static void apple_smc_rtkit_recv(void *cookie, u8 endpoint, u64 message)
+{
+ struct apple_smc *smc = cookie;
+
+ if (endpoint != SMC_ENDPOINT) {
+ dev_warn(smc->dev, "Received message for unknown endpoint 0x%x\n", endpoint);
+ return;
+ }
+
+ if (FIELD_GET(SMC_MSG, message) != SMC_MSG_NOTIFICATION) {
+ dev_warn(smc->dev, "Received unknown message from worker: 0x%llx\n", message);
+ return;
+ }
+
+ blocking_notifier_call_chain(&smc->event_handlers, FIELD_GET(SMC_DATA, message), NULL);
+}
+
+static const struct apple_rtkit_ops apple_smc_rtkit_ops = {
+ .crashed = apple_smc_rtkit_crashed,
+ .recv_message = apple_smc_rtkit_recv,
+ .recv_message_early = apple_smc_rtkit_recv_early,
+ .shmem_setup = apple_smc_rtkit_shmem_setup,
+};
+
+static void apple_smc_rtkit_shutdown(void *data)
+{
+ struct apple_smc *smc = data;
+
+ /* Shut down SMC firmware, if it's not completely wedged */
+ if (apple_rtkit_is_running(smc->rtk))
+ apple_rtkit_quiesce(smc->rtk);
+}
+
+static void apple_smc_disable_notifications(void *data)
+{
+ struct apple_smc *smc = data;
+
+ apple_smc_write_flag(smc, SMC_KEY(NTAP), false);
+}
+
+static int apple_smc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_smc *smc;
+ u32 count;
+ int ret;
+
+ smc = devm_kzalloc(dev, sizeof(*smc), GFP_KERNEL);
+ if (!smc)
+ return -ENOMEM;
+
+ smc->dev = &pdev->dev;
+ smc->sram_base = devm_platform_get_and_ioremap_resource(pdev, 1, &smc->sram);
+ if (IS_ERR(smc->sram_base))
+ return dev_err_probe(dev, PTR_ERR(smc->sram_base), "Failed to map SRAM region");
+
+ smc->rtk = devm_apple_rtkit_init(dev, smc, NULL, 0, &apple_smc_rtkit_ops);
+ if (IS_ERR(smc->rtk))
+ return dev_err_probe(dev, PTR_ERR(smc->rtk), "Failed to initialize RTKit");
+
+ smc->boot_stage = APPLE_SMC_BOOTING;
+ ret = apple_rtkit_wake(smc->rtk);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to wake up SMC");
+
+ ret = devm_add_action_or_reset(dev, apple_smc_rtkit_shutdown, smc);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register rtkit shutdown action");
+
+ ret = apple_rtkit_start_ep(smc->rtk, SMC_ENDPOINT);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to start SMC endpoint");
+
+ init_completion(&smc->init_done);
+ init_completion(&smc->cmd_done);
+
+ ret = apple_rtkit_send_message(smc->rtk, SMC_ENDPOINT,
+ FIELD_PREP(SMC_MSG, SMC_MSG_INITIALIZE), NULL, false);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to send init message");
+
+ if (wait_for_completion_timeout(&smc->init_done, msecs_to_jiffies(SMC_TIMEOUT_MS)) == 0) {
+ dev_err(dev, "Timed out initializing SMC");
+ return -ETIMEDOUT;
+ }
+
+ if (smc->boot_stage != APPLE_SMC_INITIALIZED) {
+ dev_err(dev, "SMC failed to boot successfully, boot stage=%d\n", smc->boot_stage);
+ return -EIO;
+ }
+
+ dev_set_drvdata(&pdev->dev, smc);
+ BLOCKING_INIT_NOTIFIER_HEAD(&smc->event_handlers);
+
+ ret = apple_smc_read_u32(smc, SMC_KEY(#KEY), &count);
+ if (ret)
+ return dev_err_probe(smc->dev, ret, "Failed to get key count");
+ smc->key_count = be32_to_cpu(count);
+
+ /* Enable notifications */
+ apple_smc_write_flag(smc, SMC_KEY(NTAP), true);
+ ret = devm_add_action_or_reset(dev, apple_smc_disable_notifications, smc);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register notification disable action");
+
+ ret = devm_mfd_add_devices(smc->dev, PLATFORM_DEVID_NONE,
+ apple_smc_devs, ARRAY_SIZE(apple_smc_devs),
+ NULL, 0, NULL);
+ if (ret)
+ return dev_err_probe(smc->dev, ret, "Failed to register sub-devices");
+
+
+ return 0;
+}
+
+static const struct of_device_id apple_smc_of_match[] = {
+ { .compatible = "apple,smc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_smc_of_match);
+
+static struct platform_driver apple_smc_driver = {
+ .driver = {
+ .name = "macsmc",
+ .of_match_table = apple_smc_of_match,
+ },
+ .probe = apple_smc_probe,
+};
+module_platform_driver(apple_smc_driver);
+
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
+MODULE_AUTHOR("Sven Peter <sven@kernel.org>");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC driver");
diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c
index ef03d6cec9ff..fc2daffc4352 100644
--- a/drivers/mfd/vexpress-sysreg.c
+++ b/drivers/mfd/vexpress-sysreg.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/stat.h>
@@ -37,22 +38,34 @@
/* The sysreg block is just a random collection of various functions... */
-static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = {
- .label = "sys_led",
- .base = -1,
- .ngpio = 8,
+static const struct property_entry vexpress_sysreg_sys_led_props[] = {
+ PROPERTY_ENTRY_STRING("label", "sys_led"),
+ PROPERTY_ENTRY_U32("ngpios", 8),
+ { }
};
-static struct bgpio_pdata vexpress_sysreg_sys_mci_pdata = {
- .label = "sys_mci",
- .base = -1,
- .ngpio = 2,
+static const struct software_node vexpress_sysreg_sys_led_swnode = {
+ .properties = vexpress_sysreg_sys_led_props,
};
-static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = {
- .label = "sys_flash",
- .base = -1,
- .ngpio = 1,
+static const struct property_entry vexpress_sysreg_sys_mci_props[] = {
+ PROPERTY_ENTRY_STRING("label", "sys_mci"),
+ PROPERTY_ENTRY_U32("ngpios", 2),
+ { }
+};
+
+static const struct software_node vexpress_sysreg_sys_mci_swnode = {
+ .properties = vexpress_sysreg_sys_mci_props,
+};
+
+static const struct property_entry vexpress_sysreg_sys_flash_props[] = {
+ PROPERTY_ENTRY_STRING("label", "sys_flash"),
+ PROPERTY_ENTRY_U32("ngpios", 1),
+ { }
+};
+
+static const struct software_node vexpress_sysreg_sys_flash_swnode = {
+ .properties = vexpress_sysreg_sys_flash_props,
};
static struct mfd_cell vexpress_sysreg_cells[] = {
@@ -61,22 +74,19 @@ static struct mfd_cell vexpress_sysreg_cells[] = {
.of_compatible = "arm,vexpress-sysreg,sys_led",
.num_resources = 1,
.resources = &DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"),
- .platform_data = &vexpress_sysreg_sys_led_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata),
+ .swnode = &vexpress_sysreg_sys_led_swnode,
}, {
.name = "basic-mmio-gpio",
.of_compatible = "arm,vexpress-sysreg,sys_mci",
.num_resources = 1,
.resources = &DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"),
- .platform_data = &vexpress_sysreg_sys_mci_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata),
+ .swnode = &vexpress_sysreg_sys_mci_swnode,
}, {
.name = "basic-mmio-gpio",
.of_compatible = "arm,vexpress-sysreg,sys_flash",
.num_resources = 1,
.resources = &DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"),
- .platform_data = &vexpress_sysreg_sys_flash_pdata,
- .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata),
+ .swnode = &vexpress_sysreg_sys_flash_swnode,
}, {
.name = "vexpress-syscfg",
.num_resources = 1,
diff --git a/drivers/misc/amd-sbi/rmi-core.c b/drivers/misc/amd-sbi/rmi-core.c
index b653a21a909e..3dec2fc00124 100644
--- a/drivers/misc/amd-sbi/rmi-core.c
+++ b/drivers/misc/amd-sbi/rmi-core.c
@@ -42,7 +42,6 @@
#define RD_MCA_CMD 0x86
/* CPUID MCAMSR mask & index */
-#define CPUID_MCA_THRD_MASK GENMASK(15, 0)
#define CPUID_MCA_THRD_INDEX 32
#define CPUID_MCA_FUNC_MASK GENMASK(31, 0)
#define CPUID_EXT_FUNC_INDEX 56
@@ -129,7 +128,7 @@ static int rmi_cpuid_read(struct sbrmi_data *data,
goto exit_unlock;
}
- thread = msg->cpu_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK;
+ thread = msg->cpu_in_out >> CPUID_MCA_THRD_INDEX;
/* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */
if (thread > 127) {
@@ -210,7 +209,7 @@ static int rmi_mca_msr_read(struct sbrmi_data *data,
goto exit_unlock;
}
- thread = msg->mcamsr_in_out << CPUID_MCA_THRD_INDEX & CPUID_MCA_THRD_MASK;
+ thread = msg->mcamsr_in_out >> CPUID_MCA_THRD_INDEX;
/* Thread > 127, Thread128 CS register, 1'b1 needs to be set to 1 */
if (thread > 127) {
@@ -321,6 +320,10 @@ int rmi_mailbox_xfer(struct sbrmi_data *data,
ret = regmap_read(data->regmap, SBRMI_OUTBNDMSG7, &ec);
if (ret || ec)
goto exit_clear_alert;
+
+ /* Clear the input value before updating the output data */
+ msg->mb_in_out = 0;
+
/*
* For a read operation, the initiator (BMC) reads the firmware
* response Command Data Out[31:0] from SBRMI::OutBndMsg_inst[4:1]
@@ -373,7 +376,8 @@ static int apml_rmi_reg_xfer(struct sbrmi_data *data,
mutex_unlock(&data->lock);
if (msg.rflag && !ret)
- return copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_reg_xfer_msg)))
+ return -EFAULT;
return ret;
}
@@ -391,7 +395,9 @@ static int apml_mailbox_xfer(struct sbrmi_data *data, struct apml_mbox_msg __use
if (ret && ret != -EPROTOTYPE)
return ret;
- return copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_mbox_msg)))
+ return -EFAULT;
+ return ret;
}
static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user *arg)
@@ -408,7 +414,9 @@ static int apml_cpuid_xfer(struct sbrmi_data *data, struct apml_cpuid_msg __user
if (ret && ret != -EPROTOTYPE)
return ret;
- return copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_cpuid_msg)))
+ return -EFAULT;
+ return ret;
}
static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __user *arg)
@@ -425,7 +433,9 @@ static int apml_mcamsr_xfer(struct sbrmi_data *data, struct apml_mcamsr_msg __us
if (ret && ret != -EPROTOTYPE)
return ret;
- return copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg));
+ if (copy_to_user(arg, &msg, sizeof(struct apml_mcamsr_msg)))
+ return -EFAULT;
+ return ret;
}
static long sbrmi_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 39468bd27b85..03ebe33185f9 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -8,7 +8,7 @@ lkdtm-$(CONFIG_LKDTM) += perms.o
lkdtm-$(CONFIG_LKDTM) += refcount.o
lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += usercopy.o
-lkdtm-$(CONFIG_LKDTM) += stackleak.o
+lkdtm-$(CONFIG_LKDTM) += kstack_erase.o
lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/kstack_erase.c
index f1d022160913..4fd9b0bfb874 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/kstack_erase.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* This code tests that the current task stack is properly erased (filled
- * with STACKLEAK_POISON).
+ * with KSTACK_ERASE_POISON).
*
* Authors:
* Alexander Popov <alex.popov@linux.com>
@@ -9,9 +9,9 @@
*/
#include "lkdtm.h"
-#include <linux/stackleak.h>
+#include <linux/kstack_erase.h>
-#if defined(CONFIG_GCC_PLUGIN_STACKLEAK)
+#if defined(CONFIG_KSTACK_ERASE)
/*
* Check that stackleak tracks the lowest stack pointer and erases the stack
* below this as expected.
@@ -85,7 +85,7 @@ static void noinstr check_stackleak_irqoff(void)
while (poison_low > task_stack_low) {
poison_low -= sizeof(unsigned long);
- if (*(unsigned long *)poison_low == STACKLEAK_POISON)
+ if (*(unsigned long *)poison_low == KSTACK_ERASE_POISON)
continue;
instrumentation_begin();
@@ -96,7 +96,7 @@ static void noinstr check_stackleak_irqoff(void)
}
instrumentation_begin();
- pr_info("stackleak stack usage:\n"
+ pr_info("kstack erase stack usage:\n"
" high offset: %lu bytes\n"
" current: %lu bytes\n"
" lowest: %lu bytes\n"
@@ -121,7 +121,7 @@ out:
instrumentation_end();
}
-static void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_KSTACK_ERASE(void)
{
unsigned long flags;
@@ -129,19 +129,19 @@ static void lkdtm_STACKLEAK_ERASING(void)
check_stackleak_irqoff();
local_irq_restore(flags);
}
-#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-static void lkdtm_STACKLEAK_ERASING(void)
+#else /* defined(CONFIG_KSTACK_ERASE) */
+static void lkdtm_KSTACK_ERASE(void)
{
- if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
- pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_KSTACK_ERASE)) {
+ pr_err("XFAIL: stackleak is not enabled (CONFIG_KSTACK_ERASE=n)\n");
} else {
- pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n");
+ pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_KSTACK_ERASE=n)\n");
}
}
-#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+#endif /* defined(CONFIG_KSTACK_ERASE) */
static struct crashtype crashtypes[] = {
- CRASHTYPE(STACKLEAK_ERASING),
+ CRASHTYPE(KSTACK_ERASE),
};
struct crashtype_category stackleak_crashtypes = {
diff --git a/drivers/misc/ti_fpc202.c b/drivers/misc/ti_fpc202.c
index f7cde245ac95..f84cae50e2c9 100644
--- a/drivers/misc/ti_fpc202.c
+++ b/drivers/misc/ti_fpc202.c
@@ -118,20 +118,17 @@ static void fpc202_set_enable(struct fpc202_priv *priv, int enable)
gpiod_set_value(priv->en_gpio, enable);
}
-static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static int fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
{
struct fpc202_priv *priv = gpiochip_get_data(chip);
int ret;
u8 val;
- if (fpc202_gpio_get_dir(offset) == GPIO_LINE_DIRECTION_IN)
- return;
-
ret = fpc202_read(priv, FPC202_REG_OUT_A_OUT_B_VAL);
if (ret < 0) {
dev_err(&priv->client->dev, "Failed to set GPIO %d value! err %d\n", offset, ret);
- return;
+ return ret;
}
val = (u8)ret;
@@ -141,7 +138,7 @@ static void fpc202_gpio_set(struct gpio_chip *chip, unsigned int offset,
else
val &= ~BIT(offset - FPC202_GPIO_P0_S0_OUT_A);
- fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val);
+ return fpc202_write(priv, FPC202_REG_OUT_A_OUT_B_VAL, val);
}
static int fpc202_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -336,7 +333,7 @@ static int fpc202_probe(struct i2c_client *client)
priv->gpio.base = -1;
priv->gpio.direction_input = fpc202_gpio_direction_input;
priv->gpio.direction_output = fpc202_gpio_direction_output;
- priv->gpio.set = fpc202_gpio_set;
+ priv->gpio.set_rv = fpc202_gpio_set;
priv->gpio.get = fpc202_gpio_get;
priv->gpio.ngpio = FPC202_GPIO_COUNT;
priv->gpio.parent = dev;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index b66b637e2d57..656601754966 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -161,7 +161,7 @@ static int sdio_bus_probe(struct device *dev)
if (!id)
return -ENODEV;
- ret = dev_pm_domain_attach(dev, false);
+ ret = dev_pm_domain_attach(dev, 0);
if (ret)
return ret;
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index def054ddd256..4fced9b36c80 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -503,7 +503,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
- dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data);
+ dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len,
+ dir_data);
return;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 13a84b9309e0..e3877a1c72a9 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -913,7 +913,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot)
{
return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
(dmi_match(DMI_BIOS_VENDOR, "LENOVO") ||
- dmi_match(DMI_SYS_VENDOR, "IRBIS"));
+ dmi_match(DMI_SYS_VENDOR, "IRBIS") ||
+ dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA"));
}
static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot)
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index 73385ff4c0f3..9e94998e8df7 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -613,7 +613,8 @@ static const struct sdhci_ops sdhci_am654_ops = {
static const struct sdhci_pltfm_data sdhci_am654_pdata = {
.ops = &sdhci_am654_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = {
@@ -643,7 +644,8 @@ static const struct sdhci_ops sdhci_j721e_8bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = {
.ops = &sdhci_j721e_8bit_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = {
@@ -667,7 +669,8 @@ static const struct sdhci_ops sdhci_j721e_4bit_ops = {
static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = {
.ops = &sdhci_j721e_4bit_ops,
.quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
- .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_DISABLE_HW_TIMEOUT,
};
static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = {
diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig
index 80f015cf6e54..c68132e38138 100644
--- a/drivers/mux/Kconfig
+++ b/drivers/mux/Kconfig
@@ -48,6 +48,7 @@ config MUX_GPIO
config MUX_MMIO
tristate "MMIO/Regmap register bitfield-controlled Multiplexer"
depends on OF
+ select REGMAP_MMIO
help
MMIO/Regmap register bitfield-controlled Multiplexer controller.
diff --git a/drivers/mux/core.c b/drivers/mux/core.c
index 02be4ba37257..a3840fe0995f 100644
--- a/drivers/mux/core.c
+++ b/drivers/mux/core.c
@@ -98,13 +98,12 @@ struct mux_chip *mux_chip_alloc(struct device *dev,
if (WARN_ON(!dev || !controllers))
return ERR_PTR(-EINVAL);
- mux_chip = kzalloc(sizeof(*mux_chip) +
- controllers * sizeof(*mux_chip->mux) +
- sizeof_priv, GFP_KERNEL);
+ mux_chip = kzalloc(size_add(struct_size(mux_chip, mux, controllers),
+ sizeof_priv),
+ GFP_KERNEL);
if (!mux_chip)
return ERR_PTR(-ENOMEM);
- mux_chip->mux = (struct mux_control *)(mux_chip + 1);
mux_chip->dev.class = &mux_class;
mux_chip->dev.type = &mux_type;
mux_chip->dev.parent = dev;
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index ea8c807af4d8..3913971125de 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -145,13 +145,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
EXPORT_SYMBOL_GPL(can_change_state);
/* CAN device restart for bus-off recovery */
-static void can_restart(struct net_device *dev)
+static int can_restart(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
int err;
+ if (!priv->do_set_mode)
+ return -EOPNOTSUPP;
+
if (netif_carrier_ok(dev))
netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n");
@@ -173,10 +176,14 @@ static void can_restart(struct net_device *dev)
if (err) {
netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err));
netif_carrier_off(dev);
+
+ return err;
} else {
netdev_dbg(dev, "Restarted\n");
priv->can_stats.restarts++;
}
+
+ return 0;
}
static void can_restart_work(struct work_struct *work)
@@ -201,9 +208,8 @@ int can_restart_now(struct net_device *dev)
return -EBUSY;
cancel_delayed_work_sync(&priv->restart_work);
- can_restart(dev);
- return 0;
+ return can_restart(dev);
}
/* CAN bus-off
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index a36842ace084..f0e3f0d538fb 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -285,6 +285,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART_MS]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow changing restart delay while running */
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -292,6 +298,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_CAN_RESTART]) {
+ if (!priv->do_set_mode) {
+ NL_SET_ERR_MSG(extack,
+ "Device doesn't support restart from Bus Off");
+ return -EOPNOTSUPP;
+ }
+
/* Do not allow a restart while not running */
if (!(dev->flags & IFF_UP))
return -EINVAL;
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index 8edaa339d590..39b0b5277b11 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -343,21 +343,19 @@ static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev)
of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio");
}
-static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
- const struct tcan4x5x_version_info *version_info)
+static int tcan4x5x_get_gpios(struct m_can_classdev *cdev)
{
struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
int ret;
- if (version_info->has_wake_pin) {
- tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
- GPIOD_OUT_HIGH);
- if (IS_ERR(tcan4x5x->device_wake_gpio)) {
- if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-wake",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(tcan4x5x->device_wake_gpio)) {
+ if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
- tcan4x5x_disable_wake(cdev);
- }
+ tcan4x5x->device_wake_gpio = NULL;
}
tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset",
@@ -369,14 +367,31 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
if (ret)
return ret;
- if (version_info->has_state_pin) {
- tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
- "device-state",
- GPIOD_IN);
- if (IS_ERR(tcan4x5x->device_state_gpio)) {
- tcan4x5x->device_state_gpio = NULL;
- tcan4x5x_disable_state(cdev);
- }
+ tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
+ "device-state",
+ GPIOD_IN);
+ if (IS_ERR(tcan4x5x->device_state_gpio))
+ tcan4x5x->device_state_gpio = NULL;
+
+ return 0;
+}
+
+static int tcan4x5x_check_gpios(struct m_can_classdev *cdev,
+ const struct tcan4x5x_version_info *version_info)
+{
+ struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev);
+ int ret;
+
+ if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) {
+ ret = tcan4x5x_disable_wake(cdev);
+ if (ret)
+ return ret;
+ }
+
+ if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) {
+ ret = tcan4x5x_disable_state(cdev);
+ if (ret)
+ return ret;
}
return 0;
@@ -468,15 +483,21 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_m_can_class_free_dev;
}
+ ret = tcan4x5x_get_gpios(mcan_class);
+ if (ret) {
+ dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
version_info = tcan4x5x_find_version(priv);
if (IS_ERR(version_info)) {
ret = PTR_ERR(version_info);
goto out_power;
}
- ret = tcan4x5x_get_gpios(mcan_class, version_info);
+ ret = tcan4x5x_check_gpios(mcan_class, version_info);
if (ret) {
- dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret));
+ dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret));
goto out_power;
}
diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c
index 0e5b8c21b9aa..1e58a4aeb9a0 100644
--- a/drivers/net/ethernet/airoha/airoha_npu.c
+++ b/drivers/net/ethernet/airoha/airoha_npu.c
@@ -401,12 +401,13 @@ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr)
return ERR_PTR(-ENODEV);
pdev = of_find_device_by_node(np);
- of_node_put(np);
if (!pdev) {
dev_err(dev, "cannot find device node %s\n", np->name);
+ of_node_put(np);
return ERR_PTR(-ENODEV);
}
+ of_node_put(np);
if (!try_module_get(THIS_MODULE)) {
dev_err(dev, "failed to get the device driver module\n");
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
index 0d61b8580d72..f832562bd7a3 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
@@ -818,6 +818,9 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
/* Tx SPB */
tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT),
TX_SPB_CTRL_XF_CTRL2);
+
+ if (intf->parent->tx_chan_offset)
+ tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR);
tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT);
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index b82f121cadad..0f4efd505332 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4666,12 +4666,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
return PTR_ERR(dpmac_dev);
}
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = priv->mc_io;
@@ -4705,6 +4712,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 147a93bf9fa9..4643a3380618 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1448,12 +1448,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
return PTR_ERR(dpmac_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR(dpmac_dev))
return 0;
+ if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) {
+ err = 0;
+ goto out_put_device;
+ }
+
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
- if (!mac)
- return -ENOMEM;
+ if (!mac) {
+ err = -ENOMEM;
+ goto out_put_device;
+ }
mac->mc_dev = dpmac_dev;
mac->mc_io = port_priv->ethsw_data->mc_io;
@@ -1483,6 +1490,8 @@ err_close_mac:
dpaa2_mac_close(mac);
err_free_mac:
kfree(mac);
+out_put_device:
+ put_device(&dpmac_dev->dev);
return err;
}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index dc35a23ec47f..d1aeb722d48f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1917,49 +1917,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv)
gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
-static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv,
+ unsigned int txqueue)
{
- struct gve_notify_block *block;
- struct gve_tx_ring *tx = NULL;
- struct gve_priv *priv;
- u32 last_nic_done;
- u32 current_time;
u32 ntfy_idx;
- netdev_info(dev, "Timeout on tx queue, %d", txqueue);
- priv = netdev_priv(dev);
if (txqueue > priv->tx_cfg.num_queues)
- goto reset;
+ return NULL;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
if (ntfy_idx >= priv->num_ntfy_blks)
- goto reset;
+ return NULL;
+
+ return &priv->ntfy_blocks[ntfy_idx];
+}
+
+static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv,
+ unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ u32 current_time;
- block = &priv->ntfy_blocks[ntfy_idx];
- tx = block->tx;
+ block = gve_get_tx_notify_block(priv, txqueue);
+
+ if (!block)
+ return false;
current_time = jiffies_to_msecs(jiffies);
- if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
- goto reset;
+ if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ return false;
- /* Check to see if there are missed completions, which will allow us to
- * kick the queue.
- */
- last_nic_done = gve_tx_load_event_counter(priv, tx);
- if (last_nic_done - tx->done) {
- netdev_info(dev, "Kicking queue %d", txqueue);
- iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
- napi_schedule(&block->napi);
- tx->last_kick_msec = current_time;
- goto out;
- } // Else reset.
+ netdev_info(priv->dev, "Kicking queue %d", txqueue);
+ napi_schedule(&block->napi);
+ block->tx->last_kick_msec = current_time;
+ return true;
+}
-reset:
- gve_schedule_reset(priv);
+static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct gve_notify_block *block;
+ struct gve_priv *priv;
-out:
- if (tx)
- tx->queue_timeout++;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+
+ if (!gve_tx_timeout_try_q_kick(priv, txqueue))
+ gve_schedule_reset(priv);
+
+ block = gve_get_tx_notify_block(priv, txqueue);
+ if (block)
+ block->tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b03b8758c777..aaa803563bd2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
@@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
{
u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size;
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_tx_spare *tx_spare;
struct page *page;
dma_addr_t dma;
@@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
tx_spare->buf = page_address(page);
tx_spare->len = PAGE_SIZE << order;
ring->tx_spare = tx_spare;
+ ring->tx_copybreak = priv->tx_copybreak;
return;
dma_mapping_error:
@@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
devm_kfree(&pdev->dev, priv->tqp_vector);
}
+static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv)
+{
+#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024)
+#define HNS3_MAX_PACKET_SIZE (64 * 1024)
+
+ struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev);
+ struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle);
+ struct hnae3_handle *handle = priv->ae_handle;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3)
+ return;
+
+ if (!(domain && iommu_is_dma_domain(domain)))
+ return;
+
+ priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE;
+ priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE;
+
+ if (priv->tx_copybreak < priv->min_tx_copybreak)
+ priv->tx_copybreak = priv->min_tx_copybreak;
+ if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size)
+ handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size;
+}
+
static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type)
{
@@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int i, j;
int ret;
+ hns3_update_tx_spare_buf_config(priv);
for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) {
@@ -5311,6 +5340,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ priv->min_tx_copybreak = 0;
+ priv->min_tx_spare_buf_size = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d36c4ed16d8d..caf7a4df8585 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -596,6 +596,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce rx_coal;
u32 tx_copybreak;
u32 rx_copybreak;
+ u32 min_tx_copybreak;
+ u32 min_tx_spare_buf_size;
};
union l3_hdr_info {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index a7de67699a01..30bdec1acb57 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9576,33 +9576,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
return false;
}
-int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport,
+ bool request_en)
{
- struct hclge_dev *hdev = vport->back;
bool need_en;
int ret;
- mutex_lock(&hdev->vport_lock);
-
- vport->req_vlan_fltr_en = request_en;
-
need_en = hclge_need_enable_vport_vlan_filter(vport);
- if (need_en == vport->cur_vlan_fltr_en) {
- mutex_unlock(&hdev->vport_lock);
+ if (need_en == vport->cur_vlan_fltr_en)
return 0;
- }
ret = hclge_set_vport_vlan_filter(vport, need_en);
- if (ret) {
- mutex_unlock(&hdev->vport_lock);
+ if (ret)
return ret;
- }
vport->cur_vlan_fltr_en = need_en;
+ return 0;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+ vport->req_vlan_fltr_en = request_en;
+ ret = __hclge_enable_vport_vlan_filter(vport, request_en);
mutex_unlock(&hdev->vport_lock);
- return 0;
+ return ret;
}
static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
@@ -10623,16 +10626,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
&vport->state))
continue;
- ret = hclge_enable_vport_vlan_filter(vport,
- vport->req_vlan_fltr_en);
+ mutex_lock(&hdev->vport_lock);
+ ret = __hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to sync vlan filter state for vport%u, ret = %d\n",
vport->vport_id, ret);
set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
&vport->state);
+ mutex_unlock(&hdev->vport_lock);
return;
}
+ mutex_unlock(&hdev->vport_lock);
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index ec581d4b696f..4bd52eab3914 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -497,14 +497,14 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init freq, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts mode, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
ktime_get_real_ts64(&ts);
@@ -512,7 +512,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init ts time, ret = %d\n", ret);
- goto out;
+ goto out_clear_int;
}
set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
@@ -520,6 +520,9 @@ int hclge_ptp_init(struct hclge_dev *hdev)
return 0;
+out_clear_int:
+ clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ hclge_ptp_int_en(hdev, false);
out:
hclge_ptp_destroy_clock(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index c4f35e8e2177..4cf6ac04662a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -3094,11 +3094,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
{
- struct hnae3_handle *nic = &hdev->nic;
- struct hnae3_knic_private_info *kinfo = &nic->kinfo;
-
- return min_t(u32, hdev->rss_size_max,
- hdev->num_tqps / kinfo->tc_info.num_tc);
+ return min(hdev->rss_size_max, hdev->num_tqps);
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 8294a7c4f122..ba331899d186 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -638,6 +638,9 @@
/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
#define NVM_SUM 0xBABA
+/* Uninitialized ("empty") checksum word value */
+#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF
+
/* PBA (printed board assembly) number words */
#define NVM_PBA_OFFSET_0 8
#define NVM_PBA_OFFSET_1 9
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 364378133526..df4e7d781cb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_update_nvm_checksum(hw);
if (ret_val)
return ret_val;
+ } else if (hw->mac.type == e1000_pch_tgp) {
+ return 0;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index e609f4df86f4..16369e6d245a 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
checksum += nvm_data;
}
+ if (hw->mac.type == e1000_pch_tgp &&
+ nvm_data == NVM_CHECKSUM_UNINITIALIZED) {
+ e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n");
+ return 0;
+ }
+
if (checksum != (u16)NVM_SUM) {
e_dbg("NVM Checksum Invalid\n");
return -E1000_ERR_NVM;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index 6119a4108838..65a2816142d9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -189,13 +189,14 @@ struct fm10k_q_vector {
struct fm10k_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
char name[IFNAMSIZ + 9];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
- struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index c67963bfe14e..7c600d6e66ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -945,6 +945,7 @@ struct i40e_q_vector {
u16 reg_idx; /* register index of the interrupt */
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
struct i40e_ring_container rx;
struct i40e_ring_container tx;
@@ -955,7 +956,6 @@ struct i40e_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
bool in_busy_poll;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 88e6bef69342..7ccfc1191ae5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -3137,10 +3137,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
const u8 *addr = al->list[i].addr;
/* Allow to delete VF primary MAC only if it was not set
- * administratively by PF or if VF is trusted.
+ * administratively by PF.
*/
if (ether_addr_equal(addr, vf->default_lan_addr.addr)) {
- if (i40e_can_vf_change_mac(vf))
+ if (!vf->pf_set_mac)
was_unimac_deleted = true;
else
continue;
@@ -5006,7 +5006,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
- vf_stats->tx_dropped = stats->tx_discards;
+ vf_stats->tx_dropped = stats->tx_errors;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 59323c019544..351824dc3c62 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -2301,6 +2301,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ if (!buf_copy)
+ return ICE_DDP_PKG_ERR;
state = ice_init_pkg(hw, buf_copy, len);
if (!ice_is_init_pkg_successful(state)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index 9fc0fd95a13d..cb71eca6a85b 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -606,7 +606,7 @@ void ice_debugfs_fwlog_init(struct ice_pf *pf)
pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf))
+ if (IS_ERR(pf->ice_debugfs_pf_fwlog))
goto err_create_module_files;
fw_modules_dir = debugfs_create_dir("modules",
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 2410aee59fb2..d132eb477551 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -2226,7 +2226,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
struct ice_lag *lag = pf->lag;
struct net_device *tmp_nd;
- if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
+ !lag || !lag->upper_netdev)
return false;
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 47311b134a7a..e6acd791bf64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -507,9 +507,10 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
cpumask_t affinity_mask;
int numa_node;
- struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index b1aeea7c4a91..e395ef5f356e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1947,8 +1947,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
pages_queue, token, force_polling);
- if (callback)
- return err;
+ if (callback && !err)
+ return 0;
if (err > 0) /* Failed in FW, command didn't execute */
err = deliv_status_to_err(err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 84b1ab8233b8..7462514c7f3d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1154,8 +1154,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
}
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
- u32 cqe_bcnt)
+static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
@@ -1205,6 +1206,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
&ipv6->daddr, check);
}
+
+ return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
}
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
@@ -1561,8 +1564,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
- skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
/* Subtract one since we already counted this as one
* "regular" packet in mlx5e_complete_rx_cqe()
*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0e3a977d5332..bee906661282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1182,19 +1182,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_handle **flows;
- /* total vports is the same for both e-switches */
- int nvports = esw->total_vports;
struct mlx5_flow_handle *flow;
+ struct mlx5_vport *peer_vport;
struct mlx5_flow_spec *spec;
- struct mlx5_vport *vport;
int err, pfindex;
unsigned long i;
void *misc;
- if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ if (!MLX5_VPORT_MANAGER(peer_dev) &&
+ !mlx5_core_is_ecpf_esw_manager(peer_dev))
return 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
@@ -1203,7 +1203,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1213,10 +1213,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, MLX5_VPORT_PF);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ esw_set_peer_miss_rule_source_port(esw, peer_esw, spec,
+ MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1224,11 +1224,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_pf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1236,13 +1236,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_ecpf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
esw_set_peer_miss_rule_source_port(esw,
- peer_dev->priv.eswitch,
- spec, vport->vport);
+ peer_esw,
+ spec, peer_vport->vport);
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
@@ -1250,22 +1251,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
err = PTR_ERR(flow);
goto add_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (i >= mlx5_core_max_ec_vfs(peer_dev))
- break;
- esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
- spec, vport->vport);
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ esw_set_peer_miss_rule_source_port(esw, peer_esw,
+ spec,
+ peer_vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
goto add_ec_vf_flow_err;
}
- flows[vport->index] = flow;
+ flows[peer_vport->index] = flow;
}
}
@@ -1282,25 +1283,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
return 0;
add_ec_vf_flow_err:
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_vf_flow_err:
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
- if (!flows[vport->index])
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev)) {
+ if (!flows[peer_vport->index])
continue;
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_ecpf_flow_err:
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
add_pf_flow_err:
esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
@@ -1313,37 +1316,34 @@ alloc_flows_err:
static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
struct mlx5_core_dev *peer_dev)
{
+ struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch;
u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
- struct mlx5_vport *vport;
+ struct mlx5_vport *peer_vport;
unsigned long i;
flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
if (!flows)
return;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
- /* The flow for a particular vport could be NULL if the other ECPF
- * has fewer or no VFs enabled
- */
- if (!flows[vport->index])
- continue;
- mlx5_del_flow_rules(flows[vport->index]);
- }
+ if (mlx5_core_ec_sriov_enabled(peer_dev)) {
+ mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_ec_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
- mlx5_del_flow_rules(flows[vport->index]);
+ mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport,
+ mlx5_core_max_vfs(peer_dev))
+ mlx5_del_flow_rules(flows[peer_vport->index]);
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_ecpf_vport_exists(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
- mlx5_del_flow_rules(flows[vport->index]);
+ if (mlx5_core_is_ecpf_esw_manager(peer_dev)) {
+ peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF);
+ mlx5_del_flow_rules(flows[peer_vport->index]);
}
kvfree(flows);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 41e8660c819c..9c1504d29d34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -2257,6 +2257,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
{ PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
{ PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */
+ { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 52cf7112762c..58f8ee710912 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <linux/export.h>
#include <net/mana/mana.h>
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index ccd2885c939e..faad1cb880f8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -10,6 +10,7 @@
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 9a47015254bb..ea33ae39be6b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -433,6 +433,12 @@ static int intel_crosststamp(ktime_t *device,
return -ETIMEDOUT;
}
+ *system = (struct system_counterval_t) {
+ .cycles = 0,
+ .cs_id = CSID_X86_ART,
+ .use_nsecs = false,
+ };
+
num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
@@ -448,7 +454,7 @@ static int intel_crosststamp(ktime_t *device,
}
system->cycles *= intel_priv->crossts_adj;
- system->cs_id = CSID_X86_ART;
+
priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index ddfd1c02a885..da53eb04b0a4 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
int i;
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initialized */
- for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) {
+
+ /* Configure buffer pools for forwarding buffers
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_SW_FWD_BUF_POOL_SIZE;
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
-
- for (i = PRUETH_NUM_BUF_POOLS;
- i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
- i++) {
- /* The driver only uses first 4 queues per PRU so only initialize them */
- if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
- addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* The driver only uses first 4 queues per PRU,
+ * so only initialize buffer for them
+ */
+ if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE)
+ < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_SW_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_SW_LI_BUF_POOL_SIZE;
} else {
- writel(0, &bpool_cfg[i].addr);
- writel(0, &bpool_cfg[i].len);
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
}
}
- if (!slice)
- addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to the host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to the host core
+ */
rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
- writel(addr - SZ_2K, &rxq_ctx->end);
+ addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE;
+ writel(addr, &rxq_ctx->end);
+
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
return 0;
}
@@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
u32 addr;
int i;
- /* Layout to have 64KB aligned buffer pool
- * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1|
- */
-
addr = lower_32_bits(prueth->msmcram.pa);
- if (slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ if (slice) {
+ if (prueth->pdata.banked_ms_ram)
+ addr += MSMC_RAM_BANK_SIZE;
+ else
+ addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE;
+ }
if (addr % SZ_64K) {
dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
@@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac)
}
bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
- /* workaround for f/w bug. bpool 0 needs to be initilalized */
- writel(addr, &bpool_cfg[0].addr);
- writel(0, &bpool_cfg[0].len);
- for (i = PRUETH_EMAC_BUF_POOL_START;
- i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS;
- i++) {
- writel(addr, &bpool_cfg[i].addr);
- writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
- addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ /* Configure buffer pools for forwarding buffers
+ * - in mac mode - no forwarding so initialize all pools to 0
+ * - 8 total pools per slice
+ */
+ for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
}
- if (!slice)
- addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
- else
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2;
+ /* Configure buffer pools for Local Injection buffers
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ */
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) {
+ int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE;
+
+ /* In EMAC mode, only first 4 buffers are used,
+ * as 1 slice needs to handle only 1 port
+ */
+ if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) {
+ writel(addr, &bpool_cfg[cfg_idx].addr);
+ writel(PRUETH_EMAC_LI_BUF_POOL_SIZE,
+ &bpool_cfg[cfg_idx].len);
+ addr += PRUETH_EMAC_LI_BUF_POOL_SIZE;
+ } else {
+ writel(0, &bpool_cfg[cfg_idx].addr);
+ writel(0, &bpool_cfg[cfg_idx].len);
+ }
+ }
- /* Pre-emptible RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ /* Express RX buffer queue
+ * - used by firmware to store express packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
- /* Express RX buffer queue */
- rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET;
+ /* Pre-emptible RX buffer queue
+ * - used by firmware to store preemptible packets to be transmitted
+ * to host core
+ */
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
for (i = 0; i < 3; i++)
writel(addr, &rxq_ctx->start[i]);
- addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE;
writel(addr, &rxq_ctx->end);
+ /* Set pointer for default dropped packet write
+ * - used by firmware to temporarily store packet to be dropped
+ */
+ rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET;
+ writel(addr, &rxq_ctx->start[0]);
+
return 0;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index c884e9fa099e..60d69744ffae 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -26,21 +26,71 @@ struct icssg_flow_cfg {
#define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */
#define PRUETH_RX_FLOW_DATA 0
-#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K
-#define PRUETH_EMAC_POOLS_PER_SLICE 24
-#define PRUETH_EMAC_BUF_POOL_START 8
-#define PRUETH_NUM_BUF_POOLS 8
-#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */
-#define MSMC_RAM_SIZE \
- (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
- PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
-
-#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K
-#define PRUETH_SW_NUM_BUF_POOLS_HOST 8
-#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
-#define MSMC_RAM_SIZE_SWITCH_MODE \
- (MSMC_RAM_SIZE + \
- (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
+/* Defines for forwarding path buffer pools:
+ * - used by firmware to store packets to be forwarded to other port
+ * - 8 total pools per slice
+ * - only used in switch mode (as no forwarding in mac mode)
+ */
+#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K)
+
+/* Defines for local injection path buffer pools:
+ * - used by firmware to store packets received from host core
+ * - 16 total pools per slice
+ * - 8 pools per port per slice and each slice handles both ports
+ * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG)
+ * - switch mode: 8 total pools used
+ * - mac mode: 4 total pools used
+ */
+#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16
+#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8
+#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8
+#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4
+#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4
+
+/* Defines for host egress path - express and preemptible buffers
+ * - used by firmware to store express and preemptible packets
+ * to be transmitted to host core
+ * - used by both mac/switch modes
+ */
+#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K
+#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K)
+#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE
+#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE
+
+/* Buffer used by firmware to temporarily store packet to be dropped */
+#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K
+#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE
+
+/* Total switch mode memory usage for buffers per slice */
+#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_SW_DROP_PKT_BUF_SIZE)
+
+/* Total switch mode memory usage for all buffers */
+#define PRUETH_SW_TOTAL_BUF_SIZE \
+ (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Total mac mode memory usage for buffers per slice */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \
+ (PRUETH_EMAC_LI_BUF_POOL_SIZE * \
+ PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \
+ PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \
+ PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \
+ PRUETH_EMAC_DROP_PKT_BUF_SIZE)
+
+/* Total mac mode memory usage for all buffers */
+#define PRUETH_EMAC_TOTAL_BUF_SIZE \
+ (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE)
+
+/* Size of 1 bank of MSMC/OC_SRAM memory */
+#define MSMC_RAM_BANK_SIZE SZ_256K
#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 86fc1278127c..2f5c4335dec3 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -1764,10 +1764,15 @@ static int prueth_probe(struct platform_device *pdev)
goto put_mem;
}
- msmc_ram_size = MSMC_RAM_SIZE;
prueth->is_switchmode_supported = prueth->pdata.switch_mode;
- if (prueth->is_switchmode_supported)
- msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
+ if (prueth->pdata.banked_ms_ram) {
+ /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */
+ msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE);
+ } else {
+ msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE;
+ }
/* NOTE: FW bug needs buffer base to be 64KB aligned */
prueth->msmcram.va =
@@ -1924,7 +1929,8 @@ put_iep0:
free_pool:
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va, msmc_ram_size);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
put_mem:
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -1976,8 +1982,8 @@ static void prueth_remove(struct platform_device *pdev)
icss_iep_put(prueth->iep0);
gen_pool_free(prueth->sram_pool,
- (unsigned long)prueth->msmcram.va,
- MSMC_RAM_SIZE);
+ (unsigned long)prueth->msmcram.va,
+ prueth->msmcram.size);
pruss_release_mem_region(prueth->pruss, &prueth->shram);
@@ -1994,12 +2000,14 @@ static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 0,
};
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
.quirk_10m_link_issue = 1,
.switch_mode = 1,
+ .banked_ms_ram = 1,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 23c465f1ce7f..3bb9fd8c3804 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -251,11 +251,13 @@ struct prueth_emac {
* @fdqring_mode: Free desc queue mode
* @quirk_10m_link_issue: 10M link detect errata
* @switch_mode: switch firmware support
+ * @banked_ms_ram: banked memory support
*/
struct prueth_pdata {
enum k3_ring_mode fdqring_mode;
u32 quirk_10m_link_issue:1;
u32 switch_mode:1;
+ u32 banked_ms_ram:1;
};
struct icssg_firmwares {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
index 490a9cc06fb0..7e053b8af3ec 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h
@@ -180,6 +180,9 @@
/* Used to notify the FW of the current link speed */
#define PORT_LINK_SPEED_OFFSET 0x00A8
+/* 2k memory pointer reserved for default writes by PRU0*/
+#define DEFAULT_MSMC_Q_OFFSET 0x00AC
+
/* TAS gate mask for windows list0 */
#define TAS_GATE_MASK_LIST0 0x0100
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 0f4be72116b8..f0823aa1ede6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1912,7 +1912,6 @@ static void wx_configure_rx_ring(struct wx *wx,
struct wx_ring *ring)
{
u16 reg_idx = ring->reg_idx;
- union wx_rx_desc *rx_desc;
u64 rdba = ring->dma;
u32 rxdctl;
@@ -1942,9 +1941,9 @@ static void wx_configure_rx_ring(struct wx *wx,
memset(ring->rx_buffer_info, 0,
sizeof(struct wx_rx_buffer) * ring->count);
- /* initialize Rx descriptor 0 */
- rx_desc = WX_RX_DESC(ring, 0);
- rx_desc->wb.upper.length = 0;
+ /* reset ntu and ntc to place SW in sync with hardware */
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
/* enable receive descriptor ring */
wr32m(wx, WX_PX_RR_CFG(reg_idx),
@@ -2778,6 +2777,8 @@ void wx_update_stats(struct wx *wx)
hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
}
+ /* qmprc is not cleared on read, manual reset it */
+ hwstats->qmprc = 0;
for (i = wx->num_vfs * wx->num_rx_queues_per_pool;
i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 55e252789db3..0213ad5a6ceb 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -174,10 +174,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring,
skb_frag_off(frag),
skb_frag_size(frag),
DMA_FROM_DEVICE);
-
- /* If the page was released, just unmap it. */
- if (unlikely(WX_CB(skb)->page_released))
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
}
static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
@@ -227,10 +223,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring,
struct sk_buff *skb,
int rx_buffer_pgcnt)
{
- if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
- /* the page has been released from the ring */
- WX_CB(skb)->page_released = true;
-
/* clear contents of rx_buffer */
rx_buffer->page = NULL;
rx_buffer->skb = NULL;
@@ -315,7 +307,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
return false;
dma = page_pool_get_dma_addr(page);
- bi->page_dma = dma;
+ bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
@@ -352,7 +344,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
DMA_FROM_DEVICE);
rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
+ cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -365,6 +357,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
/* clear the status bits for the next_to_use descriptor */
rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);
@@ -2423,9 +2417,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
if (rx_buffer->skb) {
struct sk_buff *skb = rx_buffer->skb;
- if (WX_CB(skb)->page_released)
- page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
-
dev_kfree_skb(skb);
}
@@ -2449,6 +2440,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring)
}
}
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index c363379126c0..1a90fcede86b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -909,7 +909,6 @@ enum wx_reset_type {
struct wx_cb {
dma_addr_t dma;
u16 append_cnt; /* number of skb's appended */
- bool page_released;
bool dma_released;
};
@@ -998,7 +997,6 @@ struct wx_tx_buffer {
struct wx_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- dma_addr_t page_dma;
struct page *page;
unsigned int page_offset;
};
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index ecf47107146d..4719d40a63ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -286,7 +286,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
/* Read the remaining data */
for (; length > 0; length--)
- *to_u8_ptr = *from_u8_ptr;
+ *to_u8_ptr++ = *from_u8_ptr++;
}
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c41a025c66f0..8be9bce66a4e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2317,8 +2317,11 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev)
if (!ndev)
return NOTIFY_DONE;
- /* set slave flag before open to prevent IPv6 addrconf */
+ /* Set slave flag and no addrconf flag before open
+ * to prevent IPv6 addrconf.
+ */
vf_netdev->flags |= IFF_SLAVE;
+ vf_netdev->priv_flags |= IFF_NO_ADDRCONF;
return NOTIFY_DONE;
}
diff --git a/drivers/net/ovpn/io.c b/drivers/net/ovpn/io.c
index ebf1e849506b..3e9e7f8444b3 100644
--- a/drivers/net/ovpn/io.c
+++ b/drivers/net/ovpn/io.c
@@ -62,6 +62,13 @@ static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
unsigned int pkt_len;
int ret;
+ /*
+ * GSO state from the transport layer is not valid for the tunnel/data
+ * path. Reset all GSO fields to prevent any further GSO processing
+ * from entering an inconsistent state.
+ */
+ skb_gso_reset(skb);
+
/* we can't guarantee the packet wasn't corrupted before entering the
* VPN, therefore we give other layers a chance to check that
*/
diff --git a/drivers/net/ovpn/netlink-gen.c b/drivers/net/ovpn/netlink-gen.c
index 58e1a4342378..14298188c5f1 100644
--- a/drivers/net/ovpn/netlink-gen.c
+++ b/drivers/net/ovpn/netlink-gen.c
@@ -29,6 +29,22 @@ const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1] =
[OVPN_A_KEYCONF_DECRYPT_DIR] = NLA_POLICY_NESTED(ovpn_keydir_nl_policy),
};
+const struct nla_policy ovpn_keyconf_del_input_nl_policy[OVPN_A_KEYCONF_SLOT + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+};
+
+const struct nla_policy ovpn_keyconf_get_nl_policy[OVPN_A_KEYCONF_CIPHER_ALG + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+ [OVPN_A_KEYCONF_SLOT] = NLA_POLICY_MAX(NLA_U32, 1),
+ [OVPN_A_KEYCONF_KEY_ID] = NLA_POLICY_MAX(NLA_U32, 7),
+ [OVPN_A_KEYCONF_CIPHER_ALG] = NLA_POLICY_MAX(NLA_U32, 2),
+};
+
+const struct nla_policy ovpn_keyconf_swap_input_nl_policy[OVPN_A_KEYCONF_PEER_ID + 1] = {
+ [OVPN_A_KEYCONF_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_keyconf_peer_id_range),
+};
+
const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1] = {
[OVPN_A_KEYDIR_CIPHER_KEY] = NLA_POLICY_MAX_LEN(256),
[OVPN_A_KEYDIR_NONCE_TAIL] = NLA_POLICY_EXACT_LEN(OVPN_NONCE_TAIL_SIZE),
@@ -60,16 +76,49 @@ const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1] = {
[OVPN_A_PEER_LINK_TX_PACKETS] = { .type = NLA_UINT, },
};
+const struct nla_policy ovpn_peer_del_input_nl_policy[OVPN_A_PEER_ID + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+};
+
+const struct nla_policy ovpn_peer_new_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_SOCKET] = { .type = NLA_U32, },
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+};
+
+const struct nla_policy ovpn_peer_set_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1] = {
+ [OVPN_A_PEER_ID] = NLA_POLICY_FULL_RANGE(NLA_U32, &ovpn_a_peer_id_range),
+ [OVPN_A_PEER_REMOTE_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_REMOTE_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_REMOTE_IPV6_SCOPE_ID] = { .type = NLA_U32, },
+ [OVPN_A_PEER_REMOTE_PORT] = NLA_POLICY_MIN(NLA_BE16, 1),
+ [OVPN_A_PEER_VPN_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_VPN_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_LOCAL_IPV4] = { .type = NLA_BE32, },
+ [OVPN_A_PEER_LOCAL_IPV6] = NLA_POLICY_EXACT_LEN(16),
+ [OVPN_A_PEER_KEEPALIVE_INTERVAL] = { .type = NLA_U32, },
+ [OVPN_A_PEER_KEEPALIVE_TIMEOUT] = { .type = NLA_U32, },
+};
+
/* OVPN_CMD_PEER_NEW - do */
static const struct nla_policy ovpn_peer_new_nl_policy[OVPN_A_PEER + 1] = {
[OVPN_A_IFINDEX] = { .type = NLA_U32, },
- [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_new_input_nl_policy),
};
/* OVPN_CMD_PEER_SET - do */
static const struct nla_policy ovpn_peer_set_nl_policy[OVPN_A_PEER + 1] = {
[OVPN_A_IFINDEX] = { .type = NLA_U32, },
- [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_set_input_nl_policy),
};
/* OVPN_CMD_PEER_GET - do */
@@ -86,7 +135,7 @@ static const struct nla_policy ovpn_peer_get_dump_nl_policy[OVPN_A_IFINDEX + 1]
/* OVPN_CMD_PEER_DEL - do */
static const struct nla_policy ovpn_peer_del_nl_policy[OVPN_A_PEER + 1] = {
[OVPN_A_IFINDEX] = { .type = NLA_U32, },
- [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_nl_policy),
+ [OVPN_A_PEER] = NLA_POLICY_NESTED(ovpn_peer_del_input_nl_policy),
};
/* OVPN_CMD_KEY_NEW - do */
@@ -98,19 +147,19 @@ static const struct nla_policy ovpn_key_new_nl_policy[OVPN_A_KEYCONF + 1] = {
/* OVPN_CMD_KEY_GET - do */
static const struct nla_policy ovpn_key_get_nl_policy[OVPN_A_KEYCONF + 1] = {
[OVPN_A_IFINDEX] = { .type = NLA_U32, },
- [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_get_nl_policy),
};
/* OVPN_CMD_KEY_SWAP - do */
static const struct nla_policy ovpn_key_swap_nl_policy[OVPN_A_KEYCONF + 1] = {
[OVPN_A_IFINDEX] = { .type = NLA_U32, },
- [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_swap_input_nl_policy),
};
/* OVPN_CMD_KEY_DEL - do */
static const struct nla_policy ovpn_key_del_nl_policy[OVPN_A_KEYCONF + 1] = {
[OVPN_A_IFINDEX] = { .type = NLA_U32, },
- [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_nl_policy),
+ [OVPN_A_KEYCONF] = NLA_POLICY_NESTED(ovpn_keyconf_del_input_nl_policy),
};
/* Ops table for ovpn */
diff --git a/drivers/net/ovpn/netlink-gen.h b/drivers/net/ovpn/netlink-gen.h
index 66a4e4a0a055..220b5b2fdd4f 100644
--- a/drivers/net/ovpn/netlink-gen.h
+++ b/drivers/net/ovpn/netlink-gen.h
@@ -13,8 +13,14 @@
/* Common nested types */
extern const struct nla_policy ovpn_keyconf_nl_policy[OVPN_A_KEYCONF_DECRYPT_DIR + 1];
+extern const struct nla_policy ovpn_keyconf_del_input_nl_policy[OVPN_A_KEYCONF_SLOT + 1];
+extern const struct nla_policy ovpn_keyconf_get_nl_policy[OVPN_A_KEYCONF_CIPHER_ALG + 1];
+extern const struct nla_policy ovpn_keyconf_swap_input_nl_policy[OVPN_A_KEYCONF_PEER_ID + 1];
extern const struct nla_policy ovpn_keydir_nl_policy[OVPN_A_KEYDIR_NONCE_TAIL + 1];
extern const struct nla_policy ovpn_peer_nl_policy[OVPN_A_PEER_LINK_TX_PACKETS + 1];
+extern const struct nla_policy ovpn_peer_del_input_nl_policy[OVPN_A_PEER_ID + 1];
+extern const struct nla_policy ovpn_peer_new_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1];
+extern const struct nla_policy ovpn_peer_set_input_nl_policy[OVPN_A_PEER_KEEPALIVE_TIMEOUT + 1];
int ovpn_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
struct genl_info *info);
diff --git a/drivers/net/ovpn/netlink.c b/drivers/net/ovpn/netlink.c
index a4ec53def46e..c7f382437630 100644
--- a/drivers/net/ovpn/netlink.c
+++ b/drivers/net/ovpn/netlink.c
@@ -352,7 +352,7 @@ int ovpn_nl_peer_new_doit(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
- ovpn_peer_nl_policy, info->extack);
+ ovpn_peer_new_input_nl_policy, info->extack);
if (ret)
return ret;
@@ -476,7 +476,7 @@ int ovpn_nl_peer_set_doit(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
- ovpn_peer_nl_policy, info->extack);
+ ovpn_peer_set_input_nl_policy, info->extack);
if (ret)
return ret;
@@ -654,7 +654,7 @@ int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info)
struct ovpn_peer *peer;
struct sk_buff *msg;
u32 peer_id;
- int ret;
+ int ret, i;
if (GENL_REQ_ATTR_CHECK(info, OVPN_A_PEER))
return -EINVAL;
@@ -668,6 +668,23 @@ int ovpn_nl_peer_get_doit(struct sk_buff *skb, struct genl_info *info)
OVPN_A_PEER_ID))
return -EINVAL;
+ /* OVPN_CMD_PEER_GET expects only the PEER_ID, therefore
+ * ensure that the user hasn't specified any other attribute.
+ *
+ * Unfortunately this check cannot be performed via netlink
+ * spec/policy and must be open-coded.
+ */
+ for (i = 0; i < OVPN_A_PEER_MAX + 1; i++) {
+ if (i == OVPN_A_PEER_ID)
+ continue;
+
+ if (attrs[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected attribute %u", i);
+ return -EINVAL;
+ }
+ }
+
peer_id = nla_get_u32(attrs[OVPN_A_PEER_ID]);
peer = ovpn_peer_get_by_id(ovpn, peer_id);
if (!peer) {
@@ -768,7 +785,7 @@ int ovpn_nl_peer_del_doit(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
ret = nla_parse_nested(attrs, OVPN_A_PEER_MAX, info->attrs[OVPN_A_PEER],
- ovpn_peer_nl_policy, info->extack);
+ ovpn_peer_del_input_nl_policy, info->extack);
if (ret)
return ret;
@@ -969,14 +986,14 @@ int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info)
struct ovpn_peer *peer;
struct sk_buff *msg;
u32 peer_id;
- int ret;
+ int ret, i;
if (GENL_REQ_ATTR_CHECK(info, OVPN_A_KEYCONF))
return -EINVAL;
ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
info->attrs[OVPN_A_KEYCONF],
- ovpn_keyconf_nl_policy, info->extack);
+ ovpn_keyconf_get_nl_policy, info->extack);
if (ret)
return ret;
@@ -988,6 +1005,24 @@ int ovpn_nl_key_get_doit(struct sk_buff *skb, struct genl_info *info)
OVPN_A_KEYCONF_SLOT))
return -EINVAL;
+ /* OVPN_CMD_KEY_GET expects only the PEER_ID and the SLOT, therefore
+ * ensure that the user hasn't specified any other attribute.
+ *
+ * Unfortunately this check cannot be performed via netlink
+ * spec/policy and must be open-coded.
+ */
+ for (i = 0; i < OVPN_A_KEYCONF_MAX + 1; i++) {
+ if (i == OVPN_A_KEYCONF_PEER_ID ||
+ i == OVPN_A_KEYCONF_SLOT)
+ continue;
+
+ if (attrs[i]) {
+ NL_SET_ERR_MSG_FMT_MOD(info->extack,
+ "unexpected attribute %u", i);
+ return -EINVAL;
+ }
+ }
+
peer_id = nla_get_u32(attrs[OVPN_A_KEYCONF_PEER_ID]);
peer = ovpn_peer_get_by_id(ovpn, peer_id);
if (!peer) {
@@ -1037,7 +1072,7 @@ int ovpn_nl_key_swap_doit(struct sk_buff *skb, struct genl_info *info)
ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
info->attrs[OVPN_A_KEYCONF],
- ovpn_keyconf_nl_policy, info->extack);
+ ovpn_keyconf_swap_input_nl_policy, info->extack);
if (ret)
return ret;
@@ -1074,7 +1109,7 @@ int ovpn_nl_key_del_doit(struct sk_buff *skb, struct genl_info *info)
ret = nla_parse_nested(attrs, OVPN_A_KEYCONF_MAX,
info->attrs[OVPN_A_KEYCONF],
- ovpn_keyconf_nl_policy, info->extack);
+ ovpn_keyconf_del_input_nl_policy, info->extack);
if (ret)
return ret;
diff --git a/drivers/net/ovpn/udp.c b/drivers/net/ovpn/udp.c
index bff00946eae2..60435a21f29c 100644
--- a/drivers/net/ovpn/udp.c
+++ b/drivers/net/ovpn/udp.c
@@ -344,6 +344,7 @@ void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
int ret;
skb->dev = peer->ovpn->dev;
+ skb->mark = READ_ONCE(sk->sk_mark);
/* no checksum performed at this layer */
skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 73f9cb2e2844..f76ee8489504 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3416,7 +3416,8 @@ static int phy_probe(struct device *dev)
/* Get the LEDs from the device tree, and instantiate standard
* LEDs for them.
*/
- if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
+ if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
+ !phy_driver_is_genphy_10g(phydev))
err = of_phy_leds(phydev);
out:
@@ -3433,7 +3434,8 @@ static int phy_remove(struct device *dev)
cancel_delayed_work_sync(&phydev->state_queue);
- if (IS_ENABLED(CONFIG_PHYLIB_LEDS))
+ if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) &&
+ !phy_driver_is_genphy_10g(phydev))
phy_leds_unregister(phydev);
phydev->state = PHY_DOWN;
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index c30ca415d1d3..36c73db44f77 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -689,6 +689,10 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
status);
return -ENODEV;
}
+ if (!dev->status) {
+ dev_err(&dev->udev->dev, "No status endpoint found");
+ return -ENODEV;
+ }
/* Initialize sierra private data */
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5d674eb9a0f2..82b4a2a2b8c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -7059,7 +7059,7 @@ static int virtnet_probe(struct virtio_device *vdev)
otherwise get link status from config. */
netif_carrier_off(dev);
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
- virtnet_config_changed_work(&vi->config_work);
+ virtio_config_changed(vi->vdev);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
virtnet_update_settings(vi);
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index 57648febc4a4..bd95dc88f9b2 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -1060,7 +1060,6 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
}
rx_tid = &peer->rx_tid[tid];
- paddr_aligned = rx_tid->qbuf.paddr_aligned;
/* Update the tid queue if it is already setup */
if (rx_tid->active) {
ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
@@ -1072,6 +1071,7 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
}
if (!ab->hw_params->reoq_lut_support) {
+ paddr_aligned = rx_tid->qbuf.paddr_aligned;
ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
peer_mac,
paddr_aligned, tid,
@@ -1098,6 +1098,7 @@ int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_
return ret;
}
+ paddr_aligned = rx_tid->qbuf.paddr_aligned;
if (ab->hw_params->reoq_lut_support) {
/* Update the REO queue LUT at the corresponding peer id
* and tid with qaddr.
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 5cdc09d465d4..e90f3187e55c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -754,7 +754,7 @@ struct iwl_lari_config_change_cmd_v10 {
* according to the BIOS definitions.
* For LARI cmd version 11 - bits 0:4 are supported.
* For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are
- * reserved. No need to mask out the reserved bits.
+ * reserved.
* @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
* Each bit represents a set of channels in a specific band that should be
* disabled
@@ -787,6 +787,7 @@ struct iwl_lari_config_change_cmd {
/* Activate UNII-1 (5.2GHz) for World Wide */
#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F
+#define CHAN_STATE_ACTIVE_BITMAP_CMD_V12 0x7F
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 74b90bd92c48..ebfba981cf89 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -614,6 +614,7 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
if (!ret) {
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12;
if (cmd_ver < 8)
value &= ~ACTIVATE_5G2_IN_WW_MASK;
diff --git a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
index 326c300470ea..436219d1ec5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/mld/regulatory.c
@@ -251,8 +251,10 @@ void iwl_mld_configure_lari(struct iwl_mld *mld)
cpu_to_le32(value &= DSM_UNII4_ALLOW_BITMAP);
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
- if (!ret)
+ if (!ret) {
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12;
cmd.chan_state_active_bitmap = cpu_to_le32(value);
+ }
ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
if (!ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index c8f4f3a1d2eb..5a9c3b7976a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -546,8 +546,10 @@ again:
}
if (WARN_ON(trans->do_top_reset &&
- trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC))
- return -EINVAL;
+ trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC)) {
+ ret = -EINVAL;
+ goto out;
+ }
/* we need to wait later - set state */
if (trans->do_top_reset)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index bb467e2b1779..eee55428749c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -2101,10 +2101,10 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(len | (sta_id << 12));
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + write_ptr].tfd_offset = bc_ent;
+ scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + write_ptr].tfd_offset = bc_ent;
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
+ scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + write_ptr].tfd_offset =
bc_ent;
}
@@ -2328,10 +2328,10 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
bc_ent = cpu_to_le16(1 | (sta_id << 12));
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + read_ptr].tfd_offset = bc_ent;
+ scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + read_ptr].tfd_offset = bc_ent;
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id * BC_TABLE_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
+ scd_bc_tbl[txq_id * TFD_QUEUE_BC_SIZE + TFD_QUEUE_SIZE_MAX + read_ptr].tfd_offset =
bc_ent;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 423dcd190906..2a1aa32e6693 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1506,7 +1506,7 @@ static int btt_blk_init(struct btt *btt)
int rc;
if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
- lim.integrity.tuple_size = btt_meta_size(btt);
+ lim.integrity.metadata_size = btt_meta_size(btt);
lim.integrity.tag_size = btt_meta_size(btt);
}
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index b1fddfa33ab9..1286c31320e6 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -301,8 +301,8 @@ static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
/*
- * This lock here doesn't make much sense at a first glace but
- * removing it will result in occasional missed completetion
+ * This lock here doesn't make much sense at a first glance but
+ * removing it will result in occasional missed completion
* interrupts even though the commands still appear on the CQ.
* It's unclear why this happens but our best guess is that
* there is a bug in the firmware triggered when a new command
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index 1a0058be5821..dc90df9e13a2 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -133,7 +133,7 @@ static const char * const nvme_statuses[] = {
[NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached",
[NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported",
[NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid",
- [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress",
+ [NVME_SC_SELF_TEST_IN_PROGRESS] = "Device Self-test In Progress",
[NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited",
[NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier",
[NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State",
@@ -145,7 +145,7 @@ static const char * const nvme_statuses[] = {
[NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes",
[NVME_SC_INVALID_PI] = "Invalid Protection Information",
[NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range",
- [NVME_SC_CMD_SIZE_LIM_EXCEEDED ] = "Command Size Limits Exceeded",
+ [NVME_SC_CMD_SIZE_LIM_EXCEEDED] = "Command Size Limits Exceeded",
[NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error",
[NVME_SC_ZONE_FULL] = "Zone Is Full",
[NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only",
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7493e5aa984c..9d988f4cb87a 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -381,12 +381,12 @@ static void nvme_log_err_passthru(struct request *req)
nr->status & NVME_SC_MASK, /* Status Code */
nr->status & NVME_STATUS_MORE ? "MORE " : "",
nr->status & NVME_STATUS_DNR ? "DNR " : "",
- nr->cmd->common.cdw10,
- nr->cmd->common.cdw11,
- nr->cmd->common.cdw12,
- nr->cmd->common.cdw13,
- nr->cmd->common.cdw14,
- nr->cmd->common.cdw15);
+ le32_to_cpu(nr->cmd->common.cdw10),
+ le32_to_cpu(nr->cmd->common.cdw11),
+ le32_to_cpu(nr->cmd->common.cdw12),
+ le32_to_cpu(nr->cmd->common.cdw13),
+ le32_to_cpu(nr->cmd->common.cdw14),
+ le32_to_cpu(nr->cmd->common.cdw15));
}
enum nvme_disposition {
@@ -764,6 +764,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
+
+ if (!(rq->rq_flags & RQF_DONTPREP))
+ nvme_clear_nvme_request(rq);
+
return nvme_host_path_error(rq);
}
EXPORT_SYMBOL_GPL(nvme_fail_nonready_command);
@@ -1866,8 +1870,11 @@ static bool nvme_init_integrity(struct nvme_ns_head *head,
break;
}
- bi->tuple_size = head->ms;
- bi->pi_offset = info->pi_offset;
+ bi->metadata_size = head->ms;
+ if (bi->csum_type) {
+ bi->pi_tuple_size = head->pi_size;
+ bi->pi_offset = info->pi_offset;
+ }
return true;
}
@@ -2404,22 +2411,24 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns,
else
lim.write_stream_granularity = 0;
- ret = queue_limits_commit_update(ns->disk->queue, &lim);
- if (ret) {
- blk_mq_unfreeze_queue(ns->disk->queue, memflags);
- goto out;
- }
-
- set_capacity_and_notify(ns->disk, capacity);
-
/*
* Only set the DEAC bit if the device guarantees that reads from
* deallocated data return zeroes. While the DEAC bit does not
* require that, it must be a no-op if reads from deallocated data
* do not return zeroes.
*/
- if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3)))
+ if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) {
ns->head->features |= NVME_NS_DEAC;
+ lim.max_hw_wzeroes_unmap_sectors = lim.max_write_zeroes_sectors;
+ }
+
+ ret = queue_limits_commit_update(ns->disk->queue, &lim);
+ if (ret) {
+ blk_mq_unfreeze_queue(ns->disk->queue, memflags);
+ goto out;
+ }
+
+ set_capacity_and_notify(ns->disk, capacity);
set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info));
set_bit(NVME_NS_READY, &ns->flags);
blk_mq_unfreeze_queue(ns->disk->queue, memflags);
@@ -3537,15 +3546,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret)
goto out_free;
}
-
- if (le16_to_cpu(id->awupf) != ctrl->subsys->awupf) {
- dev_err_ratelimited(ctrl->device,
- "inconsistent AWUPF, controller not added (%u/%u).\n",
- le16_to_cpu(id->awupf), ctrl->subsys->awupf);
- ret = -EINVAL;
- goto out_free;
- }
-
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));
@@ -4077,7 +4077,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
return;
}
}
- list_add(&ns->list, &ns->ctrl->namespaces);
+ list_add_rcu(&ns->list, &ns->ctrl->namespaces);
}
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info)
@@ -4300,7 +4300,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
/*
- * If available try to use the Command Set Idependent Identify Namespace
+ * If available try to use the Command Set Independent Identify Namespace
* data structure to find all the generic information that is needed to
* set up a namespace. If not fall back to the legacy version.
*/
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 014b387f1e8b..08a5ea3e9383 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -899,7 +899,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
* may crash.
*
* As such:
- * Wrapper all the dma routines and check the dev pointer.
+ * Wrap all the dma routines and check the dev pointer.
*
* If simple mappings (return just a dma address, we'll noop them,
* returning a dma address of 0.
@@ -1955,8 +1955,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
}
/*
- * For the linux implementation, if we have an unsucceesful
- * status, they blk-mq layer can typically be called with the
+ * For the linux implementation, if we have an unsuccessful
+ * status, the blk-mq layer can typically be called with the
* non-zero status and the content of the cqe isn't important.
*/
if (status)
@@ -2429,7 +2429,7 @@ static bool nvme_fc_terminate_exchange(struct request *req, void *data)
/*
* This routine runs through all outstanding commands on the association
- * and aborts them. This routine is typically be called by the
+ * and aborts them. This routine is typically called by the
* delete_association routine. It is also called due to an error during
* reconnect. In that scenario, it is most likely a command that initializes
* the controller, including fabric Connect commands on io queues, that
@@ -2622,7 +2622,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
* as part of the exchange. The CQE is the last thing for the io,
* which is transferred (explicitly or implicitly) with the RSP IU
* sent on the exchange. After the CQE is received, the FC exchange is
- * terminaed and the Exchange may be used on a different io.
+ * terminated and the Exchange may be used on a different io.
*
* The transport to LLDD api has the transport making a request for a
* new fcp io request to the LLDD. The LLDD then allocates a FC exchange
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7df2ea21851f..cfd2b5b90b91 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -69,7 +69,7 @@ enum nvme_quirks {
NVME_QUIRK_IDENTIFY_CNS = (1 << 1),
/*
- * The controller deterministically returns O's on reads to
+ * The controller deterministically returns 0's on reads to
* logical blocks that deallocate was called on.
*/
NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2),
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 320aaa41ec39..071efec25346 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -7,7 +7,7 @@
#include <linux/acpi.h>
#include <linux/async.h>
#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
+#include <linux/blk-mq-dma.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
@@ -27,7 +27,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/sed-opal.h>
-#include <linux/pci-p2pdma.h>
#include "trace.h"
#include "nvme.h"
@@ -39,20 +38,17 @@
#define NVME_SMALL_POOL_SIZE 256
/*
- * These can be higher, but we need to ensure that any command doesn't
- * require an sg allocation that needs more than a page of data.
+ * Arbitrary upper bound.
*/
-#define NVME_MAX_KB_SZ 8192
+#define NVME_MAX_BYTES SZ_8M
#define NVME_MAX_NR_DESCRIPTORS 5
/*
- * For data SGLs we support a single descriptors worth of SGL entries, but for
- * now we also limit it to avoid an allocation larger than PAGE_SIZE for the
- * scatterlist.
+ * For data SGLs we support a single descriptors worth of SGL entries.
+ * For PRPs, segments don't matter at all.
*/
#define NVME_MAX_SEGS \
- min(NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc), \
- (PAGE_SIZE / sizeof(struct scatterlist)))
+ (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
/*
* For metadata SGLs, only the small descriptor is supported, and the first
@@ -61,6 +57,21 @@
#define NVME_MAX_META_SEGS \
((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1)
+/*
+ * The last entry is used to link to the next descriptor.
+ */
+#define PRPS_PER_PAGE \
+ (((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1)
+
+/*
+ * I/O could be non-aligned both at the beginning and end.
+ */
+#define MAX_PRP_RANGE \
+ (NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1))
+
+static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <=
+ (1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE));
+
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444);
@@ -97,7 +108,7 @@ static int io_queue_count_set(const char *val, const struct kernel_param *kp)
int ret;
ret = kstrtouint(val, 10, &n);
- if (ret != 0 || n > num_possible_cpus())
+ if (ret != 0 || n > blk_mq_num_possible_queues(0))
return -EINVAL;
return param_set_uint(val, kp);
}
@@ -162,7 +173,7 @@ struct nvme_dev {
bool hmb;
struct sg_table *hmb_sgt;
- mempool_t *iod_mempool;
+ mempool_t *dmavec_mempool;
mempool_t *iod_meta_mempool;
/* shadow doorbell buffer support: */
@@ -246,7 +257,15 @@ enum nvme_iod_flags {
IOD_ABORTED = 1U << 0,
/* uses the small descriptor pool */
- IOD_SMALL_DESCRIPTOR = 1U << 1,
+ IOD_SMALL_DESCRIPTOR = 1U << 1,
+
+ /* single segment dma mapping */
+ IOD_SINGLE_SEGMENT = 1U << 2,
+};
+
+struct nvme_dma_vec {
+ dma_addr_t addr;
+ unsigned int len;
};
/*
@@ -257,13 +276,16 @@ struct nvme_iod {
struct nvme_command cmd;
u8 flags;
u8 nr_descriptors;
- unsigned int dma_len; /* length of single DMA segment mapping */
- dma_addr_t first_dma;
+
+ unsigned int total_len;
+ struct dma_iova_state dma_state;
+ void *descriptors[NVME_MAX_NR_DESCRIPTORS];
+ struct nvme_dma_vec *dma_vecs;
+ unsigned int nr_dma_vecs;
+
dma_addr_t meta_dma;
- struct sg_table sgt;
struct sg_table meta_sgt;
struct nvme_sgl_desc *meta_descriptor;
- void *descriptors[NVME_MAX_NR_DESCRIPTORS];
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -406,18 +428,6 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
return true;
}
-/*
- * Will slightly overestimate the number of pages needed. This is OK
- * as it only leads to a small amount of wasted memory for the lifetime of
- * the I/O.
- */
-static __always_inline int nvme_pci_npages_prp(void)
-{
- unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
- unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
- return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
-}
-
static struct nvme_descriptor_pools *
nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node)
{
@@ -578,32 +588,49 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
spin_unlock(&nvmeq->sq_lock);
}
-static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev,
- struct request *req)
+enum nvme_use_sgl {
+ SGL_UNSUPPORTED,
+ SGL_SUPPORTED,
+ SGL_FORCED,
+};
+
+static inline bool nvme_pci_metadata_use_sgls(struct request *req)
{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+
if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl))
return false;
return req->nr_integrity_segments > 1 ||
nvme_req(req)->flags & NVME_REQ_USERCMD;
}
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
- int nseg)
+static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev,
+ struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- unsigned int avg_seg_size;
- avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
+ if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) {
+ if (nvme_req(req)->flags & NVME_REQ_USERCMD)
+ return SGL_FORCED;
+ if (req->nr_integrity_segments > 1)
+ return SGL_FORCED;
+ return SGL_SUPPORTED;
+ }
- if (!nvme_ctrl_sgl_supported(&dev->ctrl))
- return false;
- if (!nvmeq->qid)
- return false;
- if (nvme_pci_metadata_use_sgls(dev, req))
- return true;
- if (!sgl_threshold || avg_seg_size < sgl_threshold)
- return nvme_req(req)->flags & NVME_REQ_USERCMD;
- return true;
+ return SGL_UNSUPPORTED;
+}
+
+static unsigned int nvme_pci_avg_seg_size(struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ unsigned int nseg;
+
+ if (blk_rq_dma_map_coalesce(&iod->dma_state))
+ nseg = 1;
+ else
+ nseg = blk_rq_nr_phys_segments(req);
+ return DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
}
static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
@@ -614,11 +641,25 @@ static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq,
return nvmeq->descriptor_pools.large;
}
-static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req)
+static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd)
+{
+ return cmd->common.flags &
+ (NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG);
+}
+
+static inline dma_addr_t nvme_pci_first_desc_dma_addr(struct nvme_command *cmd)
{
+ if (nvme_pci_cmd_use_sgl(cmd))
+ return le64_to_cpu(cmd->common.dptr.sgl.addr);
+ return le64_to_cpu(cmd->common.dptr.prp2);
+}
+
+static void nvme_free_descriptors(struct request *req)
+{
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_addr_t dma_addr = iod->first_dma;
+ dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd);
int i;
if (iod->nr_descriptors == 1) {
@@ -637,68 +678,130 @@ static void nvme_free_descriptors(struct nvme_queue *nvmeq, struct request *req)
}
}
-static void nvme_unmap_data(struct nvme_dev *dev, struct nvme_queue *nvmeq,
- struct request *req)
+static void nvme_free_prps(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ unsigned int i;
- if (iod->dma_len) {
- dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
- rq_dma_dir(req));
+ for (i = 0; i < iod->nr_dma_vecs; i++)
+ dma_unmap_page(nvmeq->dev->dev, iod->dma_vecs[i].addr,
+ iod->dma_vecs[i].len, rq_dma_dir(req));
+ mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool);
+}
+
+static void nvme_free_sgls(struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct device *dma_dev = nvmeq->dev->dev;
+ dma_addr_t sqe_dma_addr = le64_to_cpu(iod->cmd.common.dptr.sgl.addr);
+ unsigned int sqe_dma_len = le32_to_cpu(iod->cmd.common.dptr.sgl.length);
+ struct nvme_sgl_desc *sg_list = iod->descriptors[0];
+ enum dma_data_direction dir = rq_dma_dir(req);
+
+ if (iod->nr_descriptors) {
+ unsigned int nr_entries = sqe_dma_len / sizeof(*sg_list), i;
+
+ for (i = 0; i < nr_entries; i++)
+ dma_unmap_page(dma_dev, le64_to_cpu(sg_list[i].addr),
+ le32_to_cpu(sg_list[i].length), dir);
+ } else {
+ dma_unmap_page(dma_dev, sqe_dma_addr, sqe_dma_len, dir);
+ }
+}
+
+static void nvme_unmap_data(struct request *req)
+{
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct device *dma_dev = nvmeq->dev->dev;
+
+ if (iod->flags & IOD_SINGLE_SEGMENT) {
+ static_assert(offsetof(union nvme_data_ptr, prp1) ==
+ offsetof(union nvme_data_ptr, sgl.addr));
+ dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1),
+ iod->total_len, rq_dma_dir(req));
return;
}
- WARN_ON_ONCE(!iod->sgt.nents);
+ if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len)) {
+ if (nvme_pci_cmd_use_sgl(&iod->cmd))
+ nvme_free_sgls(req);
+ else
+ nvme_free_prps(req);
+ }
- dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
- nvme_free_descriptors(nvmeq, req);
- mempool_free(iod->sgt.sgl, dev->iod_mempool);
+ if (iod->nr_descriptors)
+ nvme_free_descriptors(req);
}
-static void nvme_print_sgl(struct scatterlist *sgl, int nents)
+static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
+ struct blk_dma_iter *iter)
{
- int i;
- struct scatterlist *sg;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- for_each_sg(sgl, sg, nents, i) {
- dma_addr_t phys = sg_phys(sg);
- pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
- "dma_address:%pad dma_length:%d\n",
- i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
- sg_dma_len(sg));
+ if (iter->len)
+ return true;
+ if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter))
+ return false;
+ if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
+ iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
+ iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
+ iod->nr_dma_vecs++;
}
+ return true;
}
-static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq,
- struct request *req, struct nvme_rw_command *cmnd)
+static blk_status_t nvme_pci_setup_data_prp(struct request *req,
+ struct blk_dma_iter *iter)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- int length = blk_rq_payload_bytes(req);
- struct scatterlist *sg = iod->sgt.sgl;
- int dma_len = sg_dma_len(sg);
- u64 dma_addr = sg_dma_address(sg);
- int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ unsigned int length = blk_rq_payload_bytes(req);
+ dma_addr_t prp1_dma, prp2_dma = 0;
+ unsigned int prp_len, i;
__le64 *prp_list;
- dma_addr_t prp_dma;
- int i;
- length -= (NVME_CTRL_PAGE_SIZE - offset);
- if (length <= 0) {
- iod->first_dma = 0;
- goto done;
+ if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) {
+ iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
+ GFP_ATOMIC);
+ if (!iod->dma_vecs)
+ return BLK_STS_RESOURCE;
+ iod->dma_vecs[0].addr = iter->addr;
+ iod->dma_vecs[0].len = iter->len;
+ iod->nr_dma_vecs = 1;
}
- dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
- if (dma_len) {
- dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
- } else {
- sg = sg_next(sg);
- dma_addr = sg_dma_address(sg);
- dma_len = sg_dma_len(sg);
+ /*
+ * PRP1 always points to the start of the DMA transfers.
+ *
+ * This is the only PRP (except for the list entries) that could be
+ * non-aligned.
+ */
+ prp1_dma = iter->addr;
+ prp_len = min(length, NVME_CTRL_PAGE_SIZE -
+ (iter->addr & (NVME_CTRL_PAGE_SIZE - 1)));
+ iod->total_len += prp_len;
+ iter->addr += prp_len;
+ iter->len -= prp_len;
+ length -= prp_len;
+ if (!length)
+ goto done;
+
+ if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
+ if (WARN_ON_ONCE(!iter->status))
+ goto bad_sgl;
+ goto done;
}
+ /*
+ * PRP2 is usually a list, but can point to data if all data to be
+ * transferred fits into PRP1 + PRP2:
+ */
if (length <= NVME_CTRL_PAGE_SIZE) {
- iod->first_dma = dma_addr;
+ prp2_dma = iter->addr;
+ iod->total_len += length;
goto done;
}
@@ -707,58 +810,80 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_queue *nvmeq,
iod->flags |= IOD_SMALL_DESCRIPTOR;
prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC,
- &prp_dma);
- if (!prp_list)
- return BLK_STS_RESOURCE;
+ &prp2_dma);
+ if (!prp_list) {
+ iter->status = BLK_STS_RESOURCE;
+ goto done;
+ }
iod->descriptors[iod->nr_descriptors++] = prp_list;
- iod->first_dma = prp_dma;
+
i = 0;
for (;;) {
+ prp_list[i++] = cpu_to_le64(iter->addr);
+ prp_len = min(length, NVME_CTRL_PAGE_SIZE);
+ if (WARN_ON_ONCE(iter->len < prp_len))
+ goto bad_sgl;
+
+ iod->total_len += prp_len;
+ iter->addr += prp_len;
+ iter->len -= prp_len;
+ length -= prp_len;
+ if (!length)
+ break;
+
+ if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) {
+ if (WARN_ON_ONCE(!iter->status))
+ goto bad_sgl;
+ goto done;
+ }
+
+ /*
+ * If we've filled the entire descriptor, allocate a new that is
+ * pointed to be the last entry in the previous PRP list. To
+ * accommodate for that move the last actual entry to the new
+ * descriptor.
+ */
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
+ dma_addr_t prp_list_dma;
prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large,
- GFP_ATOMIC, &prp_dma);
- if (!prp_list)
- goto free_prps;
+ GFP_ATOMIC, &prp_list_dma);
+ if (!prp_list) {
+ iter->status = BLK_STS_RESOURCE;
+ goto done;
+ }
iod->descriptors[iod->nr_descriptors++] = prp_list;
+
prp_list[0] = old_prp_list[i - 1];
- old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ old_prp_list[i - 1] = cpu_to_le64(prp_list_dma);
i = 1;
}
- prp_list[i++] = cpu_to_le64(dma_addr);
- dma_len -= NVME_CTRL_PAGE_SIZE;
- dma_addr += NVME_CTRL_PAGE_SIZE;
- length -= NVME_CTRL_PAGE_SIZE;
- if (length <= 0)
- break;
- if (dma_len > 0)
- continue;
- if (unlikely(dma_len < 0))
- goto bad_sgl;
- sg = sg_next(sg);
- dma_addr = sg_dma_address(sg);
- dma_len = sg_dma_len(sg);
}
+
done:
- cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
- cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
- return BLK_STS_OK;
-free_prps:
- nvme_free_descriptors(nvmeq, req);
- return BLK_STS_RESOURCE;
+ /*
+ * nvme_unmap_data uses the DPT field in the SQE to tear down the
+ * mapping, so initialize it even for failures.
+ */
+ iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma);
+ iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma);
+ if (unlikely(iter->status))
+ nvme_unmap_data(req);
+ return iter->status;
+
bad_sgl:
- WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
- "Invalid SGL for payload:%d nents:%d\n",
- blk_rq_payload_bytes(req), iod->sgt.nents);
+ dev_err_once(nvmeq->dev->dev,
+ "Incorrectly formed request for payload:%d nents:%d\n",
+ blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req));
return BLK_STS_IOERR;
}
static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge,
- struct scatterlist *sg)
+ struct blk_dma_iter *iter)
{
- sge->addr = cpu_to_le64(sg_dma_address(sg));
- sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->addr = cpu_to_le64(iter->addr);
+ sge->length = cpu_to_le32(iter->len);
sge->type = NVME_SGL_FMT_DATA_DESC << 4;
}
@@ -770,21 +895,22 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
}
-static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq,
- struct request *req, struct nvme_rw_command *cmd)
+static blk_status_t nvme_pci_setup_data_sgl(struct request *req,
+ struct blk_dma_iter *iter)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ unsigned int entries = blk_rq_nr_phys_segments(req);
struct nvme_sgl_desc *sg_list;
- struct scatterlist *sg = iod->sgt.sgl;
- unsigned int entries = iod->sgt.nents;
dma_addr_t sgl_dma;
- int i = 0;
+ unsigned int mapped = 0;
- /* setting the transfer type as SGL */
- cmd->flags = NVME_CMD_SGL_METABUF;
+ /* set the transfer type as SGL */
+ iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
- if (entries == 1) {
- nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg);
+ if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) {
+ nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter);
+ iod->total_len += iter->len;
return BLK_STS_OK;
}
@@ -796,119 +922,104 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_queue *nvmeq,
if (!sg_list)
return BLK_STS_RESOURCE;
iod->descriptors[iod->nr_descriptors++] = sg_list;
- iod->first_dma = sgl_dma;
- nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
do {
- nvme_pci_sgl_set_data(&sg_list[i++], sg);
- sg = sg_next(sg);
- } while (--entries > 0);
+ if (WARN_ON_ONCE(mapped == entries)) {
+ iter->status = BLK_STS_IOERR;
+ break;
+ }
+ nvme_pci_sgl_set_data(&sg_list[mapped++], iter);
+ iod->total_len += iter->len;
+ } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state,
+ iter));
- return BLK_STS_OK;
+ nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped);
+ if (unlikely(iter->status))
+ nvme_free_sgls(req);
+ return iter->status;
}
-static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmnd,
- struct bio_vec *bv)
+static blk_status_t nvme_pci_setup_data_simple(struct request *req,
+ enum nvme_use_sgl use_sgl)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
- unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
-
- iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->first_dma))
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct bio_vec bv = req_bvec(req);
+ unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+ bool prp_possible = prp1_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2;
+ dma_addr_t dma_addr;
+
+ if (!use_sgl && !prp_possible)
+ return BLK_STS_AGAIN;
+ if (is_pci_p2pdma_page(bv.bv_page))
+ return BLK_STS_AGAIN;
+
+ dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(nvmeq->dev->dev, dma_addr))
return BLK_STS_RESOURCE;
- iod->dma_len = bv->bv_len;
-
- cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
- if (bv->bv_len > first_prp_len)
- cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
- else
- cmnd->dptr.prp2 = 0;
- return BLK_STS_OK;
-}
-
-static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev,
- struct request *req, struct nvme_rw_command *cmnd,
- struct bio_vec *bv)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ iod->total_len = bv.bv_len;
+ iod->flags |= IOD_SINGLE_SEGMENT;
+
+ if (use_sgl == SGL_FORCED || !prp_possible) {
+ iod->cmd.common.flags = NVME_CMD_SGL_METABUF;
+ iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr);
+ iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len);
+ iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
+ } else {
+ unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset;
- iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->first_dma))
- return BLK_STS_RESOURCE;
- iod->dma_len = bv->bv_len;
+ iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr);
+ iod->cmd.common.dptr.prp2 = 0;
+ if (bv.bv_len > first_prp_len)
+ iod->cmd.common.dptr.prp2 =
+ cpu_to_le64(dma_addr + first_prp_len);
+ }
- cmnd->flags = NVME_CMD_SGL_METABUF;
- cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
- cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
- cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4;
return BLK_STS_OK;
}
-static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
- struct nvme_command *cmnd)
+static blk_status_t nvme_map_data(struct request *req)
{
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- blk_status_t ret = BLK_STS_RESOURCE;
- int rc;
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+ enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req);
+ struct blk_dma_iter iter;
+ blk_status_t ret;
+ /*
+ * Try to skip the DMA iterator for single segment requests, as that
+ * significantly improves performances for small I/O sizes.
+ */
if (blk_rq_nr_phys_segments(req) == 1) {
- struct bio_vec bv = req_bvec(req);
-
- if (!is_pci_p2pdma_page(bv.bv_page)) {
- if (!nvme_pci_metadata_use_sgls(dev, req) &&
- (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
- bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
- return nvme_setup_prp_simple(dev, req,
- &cmnd->rw, &bv);
-
- if (nvmeq->qid && sgl_threshold &&
- nvme_ctrl_sgl_supported(&dev->ctrl))
- return nvme_setup_sgl_simple(dev, req,
- &cmnd->rw, &bv);
- }
+ ret = nvme_pci_setup_data_simple(req, use_sgl);
+ if (ret != BLK_STS_AGAIN)
+ return ret;
}
- iod->dma_len = 0;
- iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
- if (!iod->sgt.sgl)
- return BLK_STS_RESOURCE;
- sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
- iod->sgt.orig_nents = blk_rq_map_sg(req, iod->sgt.sgl);
- if (!iod->sgt.orig_nents)
- goto out_free_sg;
+ if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter))
+ return iter.status;
- rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
- DMA_ATTR_NO_WARN);
- if (rc) {
- if (rc == -EREMOTEIO)
- ret = BLK_STS_TARGET;
- goto out_free_sg;
- }
-
- if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
- ret = nvme_pci_setup_sgls(nvmeq, req, &cmnd->rw);
- else
- ret = nvme_pci_setup_prps(nvmeq, req, &cmnd->rw);
- if (ret != BLK_STS_OK)
- goto out_unmap_sg;
- return BLK_STS_OK;
+ if (use_sgl == SGL_FORCED ||
+ (use_sgl == SGL_SUPPORTED &&
+ (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold)))
+ return nvme_pci_setup_data_sgl(req, &iter);
+ return nvme_pci_setup_data_prp(req, &iter);
+}
-out_unmap_sg:
- dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
-out_free_sg:
- mempool_free(iod->sgt.sgl, dev->iod_mempool);
- return ret;
+static void nvme_pci_sgl_set_data_sg(struct nvme_sgl_desc *sge,
+ struct scatterlist *sg)
+{
+ sge->addr = cpu_to_le64(sg_dma_address(sg));
+ sge->length = cpu_to_le32(sg_dma_len(sg));
+ sge->type = NVME_SGL_FMT_DATA_DESC << 4;
}
-static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev,
- struct request *req)
+static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_rw_command *cmnd = &iod->cmd.rw;
struct nvme_sgl_desc *sg_list;
struct scatterlist *sgl, *sg;
unsigned int entries;
@@ -939,19 +1050,19 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev,
iod->meta_descriptor = sg_list;
iod->meta_dma = sgl_dma;
- cmnd->flags = NVME_CMD_SGL_METASEG;
- cmnd->metadata = cpu_to_le64(sgl_dma);
+ iod->cmd.common.flags = NVME_CMD_SGL_METASEG;
+ iod->cmd.common.metadata = cpu_to_le64(sgl_dma);
sgl = iod->meta_sgt.sgl;
if (entries == 1) {
- nvme_pci_sgl_set_data(sg_list, sgl);
+ nvme_pci_sgl_set_data_sg(sg_list, sgl);
return BLK_STS_OK;
}
sgl_dma += sizeof(*sg_list);
nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries);
for_each_sg(sgl, sg, entries, i)
- nvme_pci_sgl_set_data(&sg_list[i + 1], sg);
+ nvme_pci_sgl_set_data_sg(&sg_list[i + 1], sg);
return BLK_STS_OK;
@@ -962,38 +1073,37 @@ out_free_sg:
return BLK_STS_RESOURCE;
}
-static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev,
- struct request *req)
+static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = rq_integrity_vec(req);
- struct nvme_command *cmnd = &iod->cmd;
- iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0);
- if (dma_mapping_error(dev->dev, iod->meta_dma))
+ iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
+ if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR;
- cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
+ iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma);
return BLK_STS_OK;
}
-static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_map_metadata(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
- nvme_pci_metadata_use_sgls(dev, req))
- return nvme_pci_setup_meta_sgls(dev, req);
- return nvme_pci_setup_meta_mptr(dev, req);
+ nvme_pci_metadata_use_sgls(req))
+ return nvme_pci_setup_meta_sgls(req);
+ return nvme_pci_setup_meta_mptr(req);
}
-static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
+static blk_status_t nvme_prep_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
iod->flags = 0;
iod->nr_descriptors = 0;
- iod->sgt.nents = 0;
+ iod->total_len = 0;
iod->meta_sgt.nents = 0;
ret = nvme_setup_cmd(req->q->queuedata, req);
@@ -1001,13 +1111,13 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
return ret;
if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, &iod->cmd);
+ ret = nvme_map_data(req);
if (ret)
goto out_free_cmd;
}
if (blk_integrity_rq(req)) {
- ret = nvme_map_metadata(dev, req);
+ ret = nvme_map_metadata(req);
if (ret)
goto out_unmap_data;
}
@@ -1016,7 +1126,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
return BLK_STS_OK;
out_unmap_data:
if (blk_rq_nr_phys_segments(req))
- nvme_unmap_data(dev, req->mq_hctx->driver_data, req);
+ nvme_unmap_data(req);
out_free_cmd:
nvme_cleanup_cmd(req);
return ret;
@@ -1041,7 +1151,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
return nvme_fail_nonready_command(&dev->ctrl, req);
- ret = nvme_prep_rq(dev, req);
+ ret = nvme_prep_rq(req);
if (unlikely(ret))
return ret;
spin_lock(&nvmeq->sq_lock);
@@ -1079,7 +1189,7 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
return false;
- return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+ return nvme_prep_rq(req) == BLK_STS_OK;
}
static void nvme_queue_rqs(struct rq_list *rqlist)
@@ -1105,11 +1215,11 @@ static void nvme_queue_rqs(struct rq_list *rqlist)
*rqlist = requeue_list;
}
-static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev,
- struct nvme_queue *nvmeq,
- struct request *req)
+static __always_inline void nvme_unmap_metadata(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
if (!iod->meta_sgt.nents) {
dma_unmap_page(dev->dev, iod->meta_dma,
@@ -1126,14 +1236,10 @@ static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev,
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
- struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
-
if (blk_integrity_rq(req))
- nvme_unmap_metadata(dev, nvmeq, req);
-
+ nvme_unmap_metadata(req);
if (blk_rq_nr_phys_segments(req))
- nvme_unmap_data(dev, nvmeq, req);
+ nvme_unmap_data(req);
}
static void nvme_pci_complete_rq(struct request *req)
@@ -1958,8 +2064,28 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
* might be pointing at!
*/
result = nvme_disable_ctrl(&dev->ctrl, false);
- if (result < 0)
- return result;
+ if (result < 0) {
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ /*
+ * The NVMe Controller Reset method did not get an expected
+ * CSTS.RDY transition, so something with the device appears to
+ * be stuck. Use the lower level and bigger hammer PCIe
+ * Function Level Reset to attempt restoring the device to its
+ * initial state, and try again.
+ */
+ result = pcie_reset_flr(pdev, false);
+ if (result < 0)
+ return result;
+
+ pci_restore_state(pdev);
+ result = nvme_disable_ctrl(&dev->ctrl, false);
+ if (result < 0)
+ return result;
+
+ dev_info(dev->ctrl.device,
+ "controller reset completed after pcie flr\n");
+ }
result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (result)
@@ -2331,7 +2457,7 @@ static ssize_t cmb_show(struct device *dev, struct device_attribute *attr,
{
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
- return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n",
+ return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n",
ndev->cmbloc, ndev->cmbsz);
}
static DEVICE_ATTR_RO(cmb);
@@ -2518,7 +2644,8 @@ static unsigned int nvme_max_io_queues(struct nvme_dev *dev)
*/
if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS)
return 1;
- return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues;
+ return blk_mq_num_possible_queues(0) + dev->nr_write_queues +
+ dev->nr_poll_queues;
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
@@ -2913,13 +3040,13 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1);
- size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS;
+ size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS;
- dev->iod_mempool = mempool_create_node(1,
+ dev->dmavec_mempool = mempool_create_node(1,
mempool_kmalloc, mempool_kfree,
(void *)alloc_size, GFP_KERNEL,
dev_to_node(dev->dev));
- if (!dev->iod_mempool)
+ if (!dev->dmavec_mempool)
return -ENOMEM;
dev->iod_meta_mempool = mempool_create_node(1,
@@ -2928,10 +3055,9 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
dev_to_node(dev->dev));
if (!dev->iod_meta_mempool)
goto free;
-
return 0;
free:
- mempool_destroy(dev->iod_mempool);
+ mempool_destroy(dev->dmavec_mempool);
return -ENOMEM;
}
@@ -3272,7 +3398,8 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
* over a single page.
*/
dev->ctrl.max_hw_sectors = min_t(u32,
- NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
+ NVME_MAX_BYTES >> SECTOR_SHIFT,
+ dma_opt_mapping_size(&pdev->dev) >> 9);
dev->ctrl.max_segments = NVME_MAX_SEGS;
dev->ctrl.max_integrity_segments = 1;
return dev;
@@ -3380,7 +3507,7 @@ out_disable:
nvme_dbbuf_dma_free(dev);
nvme_free_queues(dev, 0);
out_release_iod_mempool:
- mempool_destroy(dev->iod_mempool);
+ mempool_destroy(dev->dmavec_mempool);
mempool_destroy(dev->iod_meta_mempool);
out_dev_unmap:
nvme_dev_unmap(dev);
@@ -3444,7 +3571,7 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_dev_remove_admin(dev);
nvme_dbbuf_dma_free(dev);
nvme_free_queues(dev, 0);
- mempool_destroy(dev->iod_mempool);
+ mempool_destroy(dev->dmavec_mempool);
mempool_destroy(dev->iod_meta_mempool);
nvme_release_descriptor_pools(dev);
nvme_dev_unmap(dev);
@@ -3847,7 +3974,6 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
- BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_DESCRIPTORS);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 9bd3646568d0..190a4cfa8a5e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -877,7 +877,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
/*
* Only start IO queues for which we have allocated the tagset
- * and limitted it to the available queues. On reconnects, the
+ * and limited it to the available queues. On reconnects, the
* queue number might have changed.
*/
nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index d924008c3949..9233f088fac8 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1745,9 +1745,14 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl,
qid, ret);
tls_handshake_cancel(queue->sock->sk);
} else {
- dev_dbg(nctrl->device,
- "queue %d: TLS handshake complete, error %d\n",
- qid, queue->tls_err);
+ if (queue->tls_err) {
+ dev_err(nctrl->device,
+ "queue %d: TLS handshake complete, error %d\n",
+ qid, queue->tls_err);
+ } else {
+ dev_dbg(nctrl->device,
+ "queue %d: TLS handshake complete\n", qid);
+ }
ret = queue->tls_err;
}
return ret;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 175c5b6d4dd5..884286f90688 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -581,8 +581,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ns->enabled)
goto out_unlock;
- ret = -EMFILE;
-
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index eba42df2f821..8d246b8ca604 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -46,6 +46,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
id->npda = id->npdg;
/* NOWS = Namespace Optimal Write Size */
id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev));
+
+ /* Set WZDS and DRB if device supports unmapped write zeroes */
+ if (bdev_write_zeroes_unmap_sectors(bdev))
+ id->dlfeat = (1 << 3) | 0x1;
}
void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
@@ -65,7 +69,7 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns)
return;
if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) {
- ns->metadata_size = bi->tuple_size;
+ ns->metadata_size = bi->metadata_size;
if (bi->flags & BLK_INTEGRITY_REF_TAG)
ns->pi_type = NVME_NS_DPS_PI_TYPE1;
else
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b7515c53829b..3b4b0df8f879 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -106,7 +106,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
pctrl->max_hw_sectors);
/*
- * nvmet_passthru_map_sg is limitted to using a single bio so limit
+ * nvmet_passthru_map_sg is limited to using a single bio so limit
* the mdts based on BIO_MAX_VECS as well
*/
max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT,
@@ -147,7 +147,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
* When passthru controller is setup using nvme-loop transport it will
* export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in
* the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl()
- * code path with duplicate ctr subsynqn. In order to prevent that we
+ * code path with duplicate ctrl subsysnqn. In order to prevent that we
* mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn.
*/
memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn));
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index a4295a5b8d28..2e78397a7373 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -1242,8 +1242,11 @@ static void nvmet_pci_epf_queue_response(struct nvmet_req *req)
iod->status = le16_to_cpu(req->cqe->status) >> 1;
- /* If we have no data to transfer, directly complete the command. */
- if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
+ /*
+ * If the command failed or we have no data to transfer, complete the
+ * command immediately.
+ */
+ if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) {
nvmet_pci_epf_complete_iod(iod);
return;
}
@@ -1604,8 +1607,13 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
goto complete;
}
+ /*
+ * If nvmet_req_init() fails (e.g., unsupported opcode) it will call
+ * __nvmet_req_complete() internally which will call
+ * nvmet_pci_epf_queue_response() and will complete the command directly.
+ */
if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops))
- goto complete;
+ return;
iod->data_len = nvmet_req_transfer_len(req);
if (iod->data_len) {
@@ -1643,10 +1651,11 @@ static void nvmet_pci_epf_exec_iod_work(struct work_struct *work)
wait_for_completion(&iod->done);
- if (iod->status == NVME_SC_SUCCESS) {
- WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
- nvmet_pci_epf_transfer_iod_data(iod);
- }
+ if (iod->status != NVME_SC_SUCCESS)
+ return;
+
+ WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE);
+ nvmet_pci_epf_transfer_iod_data(iod);
complete:
nvmet_pci_epf_complete_iod(iod);
@@ -1860,7 +1869,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
- ctrl->io_sqes, sizeof(struct nvme_completion));
+ ctrl->io_cqes, sizeof(struct nvme_completion));
goto err;
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 688033b88d38..470bf37e5a63 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1928,10 +1928,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct sock *sk = queue->sock->sk;
/* Restore the default callbacks before starting upcall */
- read_lock_bh(&sk->sk_callback_lock);
+ write_lock_bh(&sk->sk_callback_lock);
sk->sk_user_data = NULL;
sk->sk_data_ready = port->data_ready;
- read_unlock_bh(&sk->sk_callback_lock);
+ write_unlock_bh(&sk->sk_callback_lock);
if (!nvmet_tcp_try_peek_pdu(queue)) {
if (!nvmet_tcp_tls_handshake(queue))
return;
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 29a60fabfcc8..15a579cf528c 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -541,7 +541,7 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req)
struct bio *bio;
int sg_cnt;
- /* Request is completed on len mismatch in nvmet_check_transter_len() */
+ /* Request is completed on len mismatch in nvmet_check_transfer_len() */
if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req)))
return;
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index ca6dd71d8a2e..7807ec0e2d18 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/if_ether.h> /* ETH_ALEN */
enum fuse_type {
FUSE_FSB = BIT(0),
@@ -118,9 +119,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
int i;
/* Deal with some post processing of nvmem cell data */
- if (id && !strcmp(id, "mac-address"))
+ if (id && !strcmp(id, "mac-address")) {
+ bytes = min(bytes, ETH_ALEN);
for (i = 0; i < bytes / 2; i++)
swap(buf[i], buf[bytes - i - 1]);
+ }
return 0;
}
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 79dd4fda0329..7bf7656d4f96 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/if_ether.h> /* ETH_ALEN */
#define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the
* OTP Bank0 Word0
@@ -227,9 +228,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index,
int i;
/* Deal with some post processing of nvmem cell data */
- if (id && !strcmp(id, "mac-address"))
+ if (id && !strcmp(id, "mac-address")) {
+ bytes = min(bytes, ETH_ALEN);
for (i = 0; i < bytes / 2; i++)
swap(buf[i], buf[bytes - i - 1]);
+ }
return 0;
}
diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c
index 436426d4e8f9..a27eeb08146f 100644
--- a/drivers/nvmem/layouts/u-boot-env.c
+++ b/drivers/nvmem/layouts/u-boot-env.c
@@ -92,7 +92,7 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
size_t crc32_data_offset;
size_t crc32_data_len;
size_t crc32_offset;
- __le32 *crc32_addr;
+ uint32_t *crc32_addr;
size_t data_offset;
size_t data_len;
size_t dev_size;
@@ -143,12 +143,12 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem,
goto err_kfree;
}
- crc32_addr = (__le32 *)(buf + crc32_offset);
- crc32 = le32_to_cpu(*crc32_addr);
+ crc32_addr = (uint32_t *)(buf + crc32_offset);
+ crc32 = *crc32_addr;
crc32_data_len = dev_size - crc32_data_offset;
data_len = dev_size - data_offset;
- calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
+ calc = crc32_le(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L;
if (calc != crc32) {
dev_err(dev, "Invalid calculated CRC32: 0x%08x (expected: 0x%08x)\n", calc, crc32);
err = -EINVAL;
diff --git a/drivers/pci/controller/pci-hyperv-intf.c b/drivers/pci/controller/pci-hyperv-intf.c
index cc96be450360..28b3e93d31c0 100644
--- a/drivers/pci/controller/pci-hyperv-intf.c
+++ b/drivers/pci/controller/pci-hyperv-intf.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/hyperv.h>
+#include <linux/export.h>
struct hyperv_pci_block_ops hvpci_block_ops;
EXPORT_SYMBOL_GPL(hvpci_block_ops);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ef5d655a0052..13680363ff19 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -600,7 +600,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
#define hv_msi_prepare pci_msi_prepare
/**
- * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
+ * hv_irq_retarget_interrupt() - "Unmask" the IRQ by setting its current
* affinity.
* @data: Describes the IRQ
*
@@ -609,7 +609,7 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data)
* is built out of this PCI bus's instance GUID and the function
* number of the device.
*/
-static void hv_arch_irq_unmask(struct irq_data *data)
+static void hv_irq_retarget_interrupt(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
struct hv_retarget_device_interrupt *params;
@@ -714,6 +714,20 @@ out:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
}
+
+static void hv_arch_irq_unmask(struct irq_data *data)
+{
+ if (hv_root_partition())
+ /*
+ * In case of the nested root partition, the nested hypervisor
+ * is taking care of interrupt remapping and thus the
+ * MAP_DEVICE_INTERRUPT hypercall is required instead of
+ * RETARGET_INTERRUPT.
+ */
+ (void)hv_map_msi_interrupt(data, NULL);
+ else
+ hv_irq_retarget_interrupt(data);
+}
#elif defined(CONFIG_ARM64)
/*
* SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
@@ -4144,6 +4158,9 @@ static int __init init_hv_pci_drv(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ if (hv_root_partition() && !hv_nested)
+ return -ENODEV;
+
ret = hv_pci_irqchip_init();
if (ret)
return ret;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 67db34fd10ee..b853585cb1f8 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -708,6 +708,8 @@ static int pci_pm_prepare(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ dev_pm_set_strict_midlayer(dev, true);
+
if (pm && pm->prepare) {
int error = pm->prepare(dev);
if (error < 0)
@@ -749,6 +751,8 @@ static void pci_pm_complete(struct device *dev)
if (pci_dev->current_state < pre_sleep_state)
pm_request_resume(dev);
}
+
+ dev_pm_set_strict_midlayer(dev, false);
}
#else /* !CONFIG_PM_SLEEP */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 4b8693ec9e4c..e6a34db77826 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2508,6 +2508,7 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
}
EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
+#if IS_ENABLED(CONFIG_PCI_PWRCTRL)
static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
{
struct pci_host_bridge *host = pci_find_host_bridge(bus);
@@ -2537,6 +2538,12 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in
return pdev;
}
+#else
+static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
+{
+ return NULL;
+}
+#endif
/*
* Read the config data for a PCI device, sanity-check it,
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8e2daea81666..04a5a34e7a95 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -994,7 +994,8 @@ struct phy *phy_create(struct device *dev, struct device_node *node,
}
device_initialize(&phy->dev);
- mutex_init(&phy->mutex);
+ lockdep_register_key(&phy->lockdep_key);
+ mutex_init_with_key(&phy->mutex, &phy->lockdep_key);
phy->dev.class = &phy_class;
phy->dev.parent = dev;
@@ -1259,6 +1260,8 @@ static void phy_release(struct device *dev)
dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
debugfs_remove_recursive(phy->debugfs);
regulator_put(phy->pwr);
+ mutex_destroy(&phy->mutex);
+ lockdep_unregister_key(&phy->lockdep_key);
ida_free(&phy_ida, phy->id);
kfree(phy);
}
diff --git a/drivers/phy/phy-snps-eusb2.c b/drivers/phy/phy-snps-eusb2.c
index b73a1d7e57b3..751b6d8ba2be 100644
--- a/drivers/phy/phy-snps-eusb2.c
+++ b/drivers/phy/phy-snps-eusb2.c
@@ -567,9 +567,11 @@ static int snps_eusb2_hsphy_probe(struct platform_device *pdev)
}
}
- if (IS_ERR_OR_NULL(phy->ref_clk))
- return dev_err_probe(dev, PTR_ERR(phy->ref_clk),
+ if (IS_ERR_OR_NULL(phy->ref_clk)) {
+ ret = phy->ref_clk ? PTR_ERR(phy->ref_clk) : -ENOENT;
+ return dev_err_probe(dev, ret,
"failed to get ref clk\n");
+ }
num = ARRAY_SIZE(phy->vregs);
for (i = 0; i < num; i++)
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 23a23f2d64e5..e818f6c3980e 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
udelay(100);
}
- if (padctl->soc->trk_hw_mode) {
- value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
- value |= USB2_TRK_HW_MODE;
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ if (padctl->soc->trk_update_on_idle)
value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
- padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
- } else {
+ if (padctl->soc->trk_hw_mode)
+ value |= USB2_TRK_HW_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+
+ if (!padctl->soc->trk_hw_mode)
clk_disable_unprepare(priv->usb2_trk_clk);
- }
}
static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
@@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl,
}
static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
- bool status)
+ struct tegra_xusb_usb2_port *port, bool status)
{
- u32 value;
+ u32 value, id_override;
+ int err = 0;
dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear");
value = padctl_readl(padctl, USB2_VBUS_ID);
+ id_override = value & ID_OVERRIDE(~0);
if (status) {
if (value & VBUS_OVERRIDE) {
@@ -799,15 +802,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl,
value = padctl_readl(padctl, USB2_VBUS_ID);
}
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_GROUNDED;
+ if (id_override != ID_OVERRIDE_GROUNDED) {
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_GROUNDED;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+
+ err = regulator_enable(port->supply);
+ if (err) {
+ dev_err(padctl->dev, "Failed to enable regulator: %d\n", err);
+ return err;
+ }
+ }
} else {
- value &= ~ID_OVERRIDE(~0);
- value |= ID_OVERRIDE_FLOATING;
+ if (id_override == ID_OVERRIDE_GROUNDED) {
+ /*
+ * The regulator is disabled only when the role transitions
+ * from USB_ROLE_HOST to USB_ROLE_NONE.
+ */
+ err = regulator_disable(port->supply);
+ if (err) {
+ dev_err(padctl->dev, "Failed to disable regulator: %d\n", err);
+ return err;
+ }
+
+ value &= ~ID_OVERRIDE(~0);
+ value |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, value, USB2_VBUS_ID);
+ }
}
- padctl_writel(padctl, value, USB2_VBUS_ID);
-
return 0;
}
@@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode,
if (mode == PHY_MODE_USB_OTG) {
if (submode == USB_ROLE_HOST) {
- tegra186_xusb_padctl_id_override(padctl, true);
-
- err = regulator_enable(port->supply);
+ err = tegra186_xusb_padctl_id_override(padctl, port, true);
+ if (err)
+ goto out;
} else if (submode == USB_ROLE_DEVICE) {
tegra186_xusb_padctl_vbus_override(padctl, true);
} else if (submode == USB_ROLE_NONE) {
- /*
- * When port is peripheral only or role transitions to
- * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not
- * enabled.
- */
- if (regulator_is_enabled(port->supply))
- regulator_disable(port->supply);
-
- tegra186_xusb_padctl_id_override(padctl, false);
+ err = tegra186_xusb_padctl_id_override(padctl, port, false);
+ if (err)
+ goto out;
tegra186_xusb_padctl_vbus_override(padctl, false);
}
}
-
+out:
mutex_unlock(&padctl->lock);
-
return err;
}
@@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
.supports_gen2 = true,
.poll_trk_completed = true,
- .trk_hw_mode = true,
+ .trk_hw_mode = false,
+ .trk_update_on_idle = true,
.supports_lp_cfg_en = true,
};
EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index 6e45d194c689..d2b5f9565132 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc {
bool need_fake_usb3_port;
bool poll_trk_completed;
bool trk_hw_mode;
+ bool trk_update_on_idle;
bool supports_lp_cfg_en;
};
diff --git a/drivers/platform/arm64/huawei-gaokun-ec.c b/drivers/platform/arm64/huawei-gaokun-ec.c
index 7e5aa7ca2403..7170f8eb76f7 100644
--- a/drivers/platform/arm64/huawei-gaokun-ec.c
+++ b/drivers/platform/arm64/huawei-gaokun-ec.c
@@ -662,6 +662,7 @@ static void gaokun_aux_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
+ of_node_put(dev->of_node);
kfree(adev);
}
@@ -693,6 +694,7 @@ static int gaokun_aux_init(struct device *parent, const char *name,
ret = auxiliary_device_init(adev);
if (ret) {
+ of_node_put(adev->dev.of_node);
kfree(adev);
return ret;
}
diff --git a/drivers/platform/cznic/turris-omnia-mcu-gpio.c b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
index c2df24ea8686..77184c8b42ea 100644
--- a/drivers/platform/cznic/turris-omnia-mcu-gpio.c
+++ b/drivers/platform/cznic/turris-omnia-mcu-gpio.c
@@ -439,27 +439,28 @@ static int omnia_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
return 0;
}
-static void omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+static int omnia_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
{
const struct omnia_gpio *gpio = &omnia_gpios[offset];
struct omnia_mcu *mcu = gpiochip_get_data(gc);
u16 val, mask;
if (!gpio->ctl_cmd)
- return;
+ return -EINVAL;
mask = BIT(gpio->ctl_bit);
val = value ? mask : 0;
- omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask);
+ return omnia_ctl_cmd(mcu, gpio->ctl_cmd, val, mask);
}
-static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
unsigned long ctl = 0, ctl_mask = 0, ext_ctl = 0, ext_ctl_mask = 0;
struct omnia_mcu *mcu = gpiochip_get_data(gc);
unsigned int i;
+ int err;
for_each_set_bit(i, mask, ARRAY_SIZE(omnia_gpios)) {
unsigned long *field, *field_mask;
@@ -488,13 +489,21 @@ static void omnia_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
guard(mutex)(&mcu->lock);
- if (ctl_mask)
- omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL,
- ctl, ctl_mask);
+ if (ctl_mask) {
+ err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_GENERAL_CONTROL,
+ ctl, ctl_mask);
+ if (err)
+ return err;
+ }
+
+ if (ext_ctl_mask) {
+ err = omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL,
+ ext_ctl, ext_ctl_mask);
+ if (err)
+ return err;
+ }
- if (ext_ctl_mask)
- omnia_ctl_cmd_locked(mcu, OMNIA_CMD_EXT_CONTROL,
- ext_ctl, ext_ctl_mask);
+ return 0;
}
static bool omnia_gpio_available(struct omnia_mcu *mcu,
@@ -1015,8 +1024,8 @@ int omnia_mcu_register_gpiochip(struct omnia_mcu *mcu)
mcu->gc.direction_output = omnia_gpio_direction_output;
mcu->gc.get = omnia_gpio_get;
mcu->gc.get_multiple = omnia_gpio_get_multiple;
- mcu->gc.set = omnia_gpio_set;
- mcu->gc.set_multiple = omnia_gpio_set_multiple;
+ mcu->gc.set_rv = omnia_gpio_set;
+ mcu->gc.set_multiple_rv = omnia_gpio_set_multiple;
mcu->gc.init_valid_mask = omnia_gpio_init_valid_mask;
mcu->gc.can_sleep = true;
mcu->gc.names = omnia_mcu_gpio_names;
diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c
index a1c529f1ff1a..4776013e0764 100644
--- a/drivers/platform/mellanox/mlxbf-pmc.c
+++ b/drivers/platform/mellanox/mlxbf-pmc.c
@@ -15,6 +15,7 @@
#include <linux/hwmon.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/string_helpers.h>
#include <uapi/linux/psci.h>
#define MLXBF_PMC_WRITE_REG_32 0x82000009
@@ -1222,7 +1223,7 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt)
return -ENODEV;
}
-/* Get the event number given the name */
+/* Get the event name given the number */
static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt)
{
const struct mlxbf_pmc_events *events;
@@ -1784,6 +1785,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
attr, struct mlxbf_pmc_attribute, dev_attr);
unsigned int blk_num, cnt_num;
bool is_l3 = false;
+ char *evt_name;
int evt_num;
int err;
@@ -1791,14 +1793,23 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev,
cnt_num = attr_event->index;
if (isalpha(buf[0])) {
+ /* Remove the trailing newline character if present */
+ evt_name = kstrdup_and_replace(buf, '\n', '\0', GFP_KERNEL);
+ if (!evt_name)
+ return -ENOMEM;
+
evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num],
- buf);
+ evt_name);
+ kfree(evt_name);
if (evt_num < 0)
return -EINVAL;
} else {
err = kstrtouint(buf, 0, &evt_num);
if (err < 0)
return err;
+
+ if (!mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num))
+ return -EINVAL;
}
if (strstr(pmc->block_name[blk_num], "l3cache"))
@@ -1879,13 +1890,14 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
{
struct mlxbf_pmc_attribute *attr_enable = container_of(
attr, struct mlxbf_pmc_attribute, dev_attr);
- unsigned int en, blk_num;
+ unsigned int blk_num;
u32 word;
int err;
+ bool en;
blk_num = attr_enable->nr;
- err = kstrtouint(buf, 0, &en);
+ err = kstrtobool(buf, &en);
if (err < 0)
return err;
@@ -1905,14 +1917,11 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev,
MLXBF_PMC_CRSPACE_PERFMON_CTL(pmc->block[blk_num].counters),
MLXBF_PMC_WRITE_REG_32, word);
} else {
- if (en && en != 1)
- return -EINVAL;
-
err = mlxbf_pmc_config_l3_counters(blk_num, false, !!en);
if (err)
return err;
- if (en == 1) {
+ if (en) {
err = mlxbf_pmc_config_l3_counters(blk_num, true, false);
if (err)
return err;
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index abbc2644ff6d..bea87a85ae75 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -58,6 +58,8 @@ obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP) += hp/
# Hewlett Packard Enterprise
obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o
+obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
+
# IBM Thinkpad and Lenovo
obj-$(CONFIG_IBM_RTL) += ibm_rtl.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
@@ -128,7 +130,6 @@ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o
obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o
# Platform drivers
-obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o
obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o
obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o
obj-$(CONFIG_WIRELESS_HOTKEY) += wireless-hotkey.o
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index 20ec122a9fe0..b58cf74197f0 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -90,6 +90,14 @@ static struct awcc_quirks empty_quirks;
static const struct dmi_system_id awcc_dmi_table[] __initconst = {
{
+ .ident = "Alienware Area-51m",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware Area-51m"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
.ident = "Alienware Area-51m R2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -98,6 +106,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
.driver_data = &generic_quirks,
},
{
+ .ident = "Alienware m15 R5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alienware m15 R5"),
+ },
+ .driver_data = &generic_quirks,
+ },
+ {
.ident = "Alienware m15 R7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
@@ -233,6 +249,7 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = {
},
.driver_data = &g_series_quirks,
},
+ {}
};
enum AWCC_GET_FAN_SENSORS_OPERATIONS {
diff --git a/drivers/platform/x86/dell/dell-lis3lv02d.c b/drivers/platform/x86/dell/dell-lis3lv02d.c
index 0791118dd6b7..732de5f556f8 100644
--- a/drivers/platform/x86/dell/dell-lis3lv02d.c
+++ b/drivers/platform/x86/dell/dell-lis3lv02d.c
@@ -49,6 +49,7 @@ static const struct dmi_system_id lis3lv02d_devices[] __initconst = {
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6330", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Latitude E6430", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision 3540", 0x29),
+ DELL_LIS3LV02D_DMI_ENTRY("Precision 3551", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Precision M6800", 0x29),
DELL_LIS3LV02D_DMI_ENTRY("Vostro V131", 0x1d),
DELL_LIS3LV02D_DMI_ENTRY("Vostro 5568", 0x29),
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index 67f3d7158403..62e3d060f038 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -689,9 +689,13 @@ static int dell_wmi_ddv_battery_translate(struct dell_wmi_ddv_data *data,
dev_dbg(&data->wdev->dev, "Translation cache miss\n");
- /* Perform a translation between a ACPI battery and a battery index */
-
- ret = power_supply_get_property(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val);
+ /*
+ * Perform a translation between a ACPI battery and a battery index.
+ * We have to use power_supply_get_property_direct() here because this
+ * function will also get called from the callbacks of the power supply
+ * extension.
+ */
+ ret = power_supply_get_property_direct(battery, POWER_SUPPLY_PROP_SERIAL_NUMBER, &val);
if (ret < 0)
return ret;
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b5e4da6a6779..edb9d2fb02ec 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1669,7 +1669,7 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
- priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED;
+ priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN;
err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led);
if (err)
@@ -1728,7 +1728,7 @@ static int ideapad_fn_lock_led_init(struct ideapad_private *priv)
priv->fn_lock.led.name = "platform::" LED_FUNCTION_FNLOCK;
priv->fn_lock.led.brightness_get = ideapad_fn_lock_led_cdev_get;
priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set;
- priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED;
+ priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN;
err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led);
if (err)
diff --git a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
index 89153afd7015..7b9bad1978ff 100644
--- a/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
+++ b/drivers/platform/x86/lenovo-wmi-hotkey-utilities.c
@@ -122,26 +122,35 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct
return -EIO;
union acpi_object *obj __free(kfree) = output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- led_version = obj->integer.value;
- else
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
return -EIO;
- wpriv->cdev[led_type].max_brightness = LED_ON;
- wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME;
+ led_version = obj->integer.value;
+
+ /*
+ * Output parameters define: 0 means mute LED is not supported, Non-zero means
+ * mute LED can be supported.
+ */
+ if (led_version == 0)
+ return 0;
+
switch (led_type) {
case MIC_MUTE:
- if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER)
- return -EIO;
+ if (led_version != WMI_LUD_SUPPORT_MICMUTE_LED_VER) {
+ pr_warn("The MIC_MUTE LED of this device isn't supported.\n");
+ return 0;
+ }
wpriv->cdev[led_type].name = "platform::micmute";
wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_micmute_led_set;
wpriv->cdev[led_type].default_trigger = "audio-micmute";
break;
case AUDIO_MUTE:
- if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER)
- return -EIO;
+ if (led_version != WMI_LUD_SUPPORT_AUDIOMUTE_LED_VER) {
+ pr_warn("The AUDIO_MUTE LED of this device isn't supported.\n");
+ return 0;
+ }
wpriv->cdev[led_type].name = "platform::mute";
wpriv->cdev[led_type].brightness_set_blocking = &lsh_wmi_audiomute_led_set;
@@ -152,6 +161,9 @@ static int lenovo_super_hotkey_wmi_led_init(enum mute_led_type led_type, struct
return -EINVAL;
}
+ wpriv->cdev[led_type].max_brightness = LED_ON;
+ wpriv->cdev[led_type].flags = LED_CORE_SUSPENDRESUME;
+
err = devm_led_classdev_register(dev, &wpriv->cdev[led_type]);
if (err < 0) {
dev_err(dev, "Could not register mute LED %d : %d\n", led_type, err);
diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c
index c1e148657c87..39359811a930 100644
--- a/drivers/pmdomain/governor.c
+++ b/drivers/pmdomain/governor.c
@@ -8,6 +8,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
+#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/ktime.h>
@@ -349,6 +350,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
struct cpuidle_device *dev;
ktime_t domain_wakeup, next_hrtimer;
ktime_t now = ktime_get();
+ struct device *cpu_dev;
+ s64 cpu_constraint, global_constraint;
s64 idle_duration_ns;
int cpu, i;
@@ -359,6 +362,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
return true;
+ global_constraint = cpu_latency_qos_limit();
/*
* Find the next wakeup for any of the online CPUs within the PM domain
* and its subdomains. Note, we only need the genpd->cpus, as it already
@@ -372,8 +376,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
if (ktime_before(next_hrtimer, domain_wakeup))
domain_wakeup = next_hrtimer;
}
+
+ cpu_dev = get_cpu_device(cpu);
+ if (cpu_dev) {
+ cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev);
+ if (cpu_constraint < global_constraint)
+ global_constraint = cpu_constraint;
+ }
}
+ global_constraint *= NSEC_PER_USEC;
/* The minimum idle duration is from now - until the next wakeup. */
idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
if (idle_duration_ns <= 0)
@@ -389,8 +401,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
*/
i = genpd->state_idx;
do {
- if (idle_duration_ns >= (genpd->states[i].residency_ns +
- genpd->states[i].power_off_latency_ns)) {
+ if ((idle_duration_ns >= (genpd->states[i].residency_ns +
+ genpd->states[i].power_off_latency_ns)) &&
+ (global_constraint >= (genpd->states[i].power_on_latency_ns +
+ genpd->states[i].power_off_latency_ns))) {
genpd->state_idx = i;
genpd->gd->last_enter = now;
genpd->gd->reflect_residency = true;
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index e71f0af4e378..733d81262159 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -128,6 +128,15 @@ config POWER_RESET_LINKSTATION
Say Y here if you have a Buffalo LinkStation LS421D/E.
+config POWER_RESET_MACSMC
+ tristate "Apple SMC reset/power-off driver"
+ depends on MFD_MACSMC
+ help
+ This driver supports reset and power-off on Apple Mac machines
+ that implement this functionality via the SMC.
+
+ Say Y here if you have an Apple Silicon Mac.
+
config POWER_RESET_MSM
bool "Qualcomm MSM power-off driver"
depends on ARCH_QCOM
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1b9b63a1a873..b7c2b5940be9 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o
obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
obj-$(CONFIG_POWER_RESET_LINKSTATION) += linkstation-poweroff.o
+obj-$(CONFIG_POWER_RESET_MACSMC) += macsmc-reboot.o
obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
diff --git a/drivers/power/reset/macsmc-reboot.c b/drivers/power/reset/macsmc-reboot.c
new file mode 100644
index 000000000000..e9702acdd366
--- /dev/null
+++ b/drivers/power/reset/macsmc-reboot.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC Reboot/Poweroff Handler
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+struct macsmc_reboot_nvmem {
+ struct nvmem_cell *shutdown_flag;
+ struct nvmem_cell *boot_stage;
+ struct nvmem_cell *boot_error_count;
+ struct nvmem_cell *panic_count;
+};
+
+static const char * const nvmem_names[] = {
+ "shutdown_flag",
+ "boot_stage",
+ "boot_error_count",
+ "panic_count",
+};
+
+enum boot_stage {
+ BOOT_STAGE_SHUTDOWN = 0x00, /* Clean shutdown */
+ BOOT_STAGE_IBOOT_DONE = 0x2f, /* Last stage of bootloader */
+ BOOT_STAGE_KERNEL_STARTED = 0x30, /* Normal OS booting */
+};
+
+struct macsmc_reboot {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct notifier_block reboot_notify;
+
+ union {
+ struct macsmc_reboot_nvmem nvm;
+ struct nvmem_cell *nvm_cells[ARRAY_SIZE(nvmem_names)];
+ };
+};
+
+/* Helpers to read/write a u8 given a struct nvmem_cell */
+static int nvmem_cell_get_u8(struct nvmem_cell *cell)
+{
+ size_t len;
+ void *bfr;
+ u8 val;
+
+ bfr = nvmem_cell_read(cell, &len);
+ if (IS_ERR(bfr))
+ return PTR_ERR(bfr);
+
+ if (len < 1) {
+ kfree(bfr);
+ return -EINVAL;
+ }
+
+ val = *(u8 *)bfr;
+ kfree(bfr);
+ return val;
+}
+
+static int nvmem_cell_set_u8(struct nvmem_cell *cell, u8 val)
+{
+ return nvmem_cell_write(cell, &val, sizeof(val));
+}
+
+/*
+ * SMC 'MBSE' key actions:
+ *
+ * 'offw' - shutdown warning
+ * 'slpw' - sleep warning
+ * 'rest' - restart warning
+ * 'off1' - shutdown (needs PMU bit set to stay on)
+ * 'susp' - suspend
+ * 'phra' - restart ("PE Halt Restart Action"?)
+ * 'panb' - panic beginning
+ * 'pane' - panic end
+ */
+
+static int macsmc_prepare_atomic(struct sys_off_data *data)
+{
+ struct macsmc_reboot *reboot = data->cb_data;
+
+ dev_info(reboot->dev, "Preparing SMC for atomic mode\n");
+
+ apple_smc_enter_atomic(reboot->smc);
+ return NOTIFY_OK;
+}
+
+static int macsmc_power_off(struct sys_off_data *data)
+{
+ struct macsmc_reboot *reboot = data->cb_data;
+
+ dev_info(reboot->dev, "Issuing power off (off1)\n");
+
+ if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(off1)) < 0) {
+ dev_err(reboot->dev, "Failed to issue MBSE = off1 (power_off)\n");
+ } else {
+ mdelay(100);
+ WARN_ONCE(1, "Unable to power off system\n");
+ }
+
+ return NOTIFY_OK;
+}
+
+static int macsmc_restart(struct sys_off_data *data)
+{
+ struct macsmc_reboot *reboot = data->cb_data;
+
+ dev_info(reboot->dev, "Issuing restart (phra)\n");
+
+ if (apple_smc_write_u32_atomic(reboot->smc, SMC_KEY(MBSE), SMC_KEY(phra)) < 0) {
+ dev_err(reboot->dev, "Failed to issue MBSE = phra (restart)\n");
+ } else {
+ mdelay(100);
+ WARN_ONCE(1, "Unable to restart system\n");
+ }
+
+ return NOTIFY_OK;
+}
+
+static int macsmc_reboot_notify(struct notifier_block *this, unsigned long action, void *data)
+{
+ struct macsmc_reboot *reboot = container_of(this, struct macsmc_reboot, reboot_notify);
+ u8 shutdown_flag;
+ u32 val;
+
+ switch (action) {
+ case SYS_RESTART:
+ val = SMC_KEY(rest);
+ shutdown_flag = 0;
+ break;
+ case SYS_POWER_OFF:
+ val = SMC_KEY(offw);
+ shutdown_flag = 1;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ dev_info(reboot->dev, "Preparing for reboot (%p4ch)\n", &val);
+
+ /* On the Mac Mini, this will turn off the LED for power off */
+ if (apple_smc_write_u32(reboot->smc, SMC_KEY(MBSE), val) < 0)
+ dev_err(reboot->dev, "Failed to issue MBSE = %p4ch (reboot_prepare)\n", &val);
+
+ /* Set the boot_stage to 0, which means we're doing a clean shutdown/reboot. */
+ if (reboot->nvm.boot_stage &&
+ nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_SHUTDOWN) < 0)
+ dev_err(reboot->dev, "Failed to write boot_stage\n");
+
+ /*
+ * Set the PMU flag to actually reboot into the off state.
+ * Without this, the device will just reboot. We make it optional in case it is no longer
+ * necessary on newer hardware.
+ */
+ if (reboot->nvm.shutdown_flag &&
+ nvmem_cell_set_u8(reboot->nvm.shutdown_flag, shutdown_flag) < 0)
+ dev_err(reboot->dev, "Failed to write shutdown_flag\n");
+
+ return NOTIFY_OK;
+}
+
+static void macsmc_power_init_error_counts(struct macsmc_reboot *reboot)
+{
+ int boot_error_count, panic_count;
+
+ if (!reboot->nvm.boot_error_count || !reboot->nvm.panic_count)
+ return;
+
+ boot_error_count = nvmem_cell_get_u8(reboot->nvm.boot_error_count);
+ if (boot_error_count < 0) {
+ dev_err(reboot->dev, "Failed to read boot_error_count (%d)\n", boot_error_count);
+ return;
+ }
+
+ panic_count = nvmem_cell_get_u8(reboot->nvm.panic_count);
+ if (panic_count < 0) {
+ dev_err(reboot->dev, "Failed to read panic_count (%d)\n", panic_count);
+ return;
+ }
+
+ if (!boot_error_count && !panic_count)
+ return;
+
+ dev_warn(reboot->dev, "PMU logged %d boot error(s) and %d panic(s)\n",
+ boot_error_count, panic_count);
+
+ if (nvmem_cell_set_u8(reboot->nvm.panic_count, 0) < 0)
+ dev_err(reboot->dev, "Failed to reset panic_count\n");
+ if (nvmem_cell_set_u8(reboot->nvm.boot_error_count, 0) < 0)
+ dev_err(reboot->dev, "Failed to reset boot_error_count\n");
+}
+
+static int macsmc_reboot_probe(struct platform_device *pdev)
+{
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ struct macsmc_reboot *reboot;
+ int ret, i;
+
+ reboot = devm_kzalloc(&pdev->dev, sizeof(*reboot), GFP_KERNEL);
+ if (!reboot)
+ return -ENOMEM;
+
+ reboot->dev = &pdev->dev;
+ reboot->smc = smc;
+
+ platform_set_drvdata(pdev, reboot);
+
+ for (i = 0; i < ARRAY_SIZE(nvmem_names); i++) {
+ struct nvmem_cell *cell;
+
+ cell = devm_nvmem_cell_get(&pdev->dev,
+ nvmem_names[i]);
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_warn(&pdev->dev, "Missing NVMEM cell %s (%ld)\n",
+ nvmem_names[i], PTR_ERR(cell));
+ /* Non fatal, we'll deal with it */
+ cell = NULL;
+ }
+ reboot->nvm_cells[i] = cell;
+ }
+
+ /* Set the boot_stage to indicate we're running the OS kernel */
+ if (reboot->nvm.boot_stage &&
+ nvmem_cell_set_u8(reboot->nvm.boot_stage, BOOT_STAGE_KERNEL_STARTED) < 0)
+ dev_err(reboot->dev, "Failed to write boot_stage\n");
+
+ /* Display and clear the error counts */
+ macsmc_power_init_error_counts(reboot);
+
+ reboot->reboot_notify.notifier_call = macsmc_reboot_notify;
+
+ ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_HIGH, macsmc_prepare_atomic, reboot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register power-off prepare handler\n");
+ ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_HIGH,
+ macsmc_power_off, reboot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register power-off handler\n");
+
+ ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART_PREPARE,
+ SYS_OFF_PRIO_HIGH, macsmc_prepare_atomic, reboot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register restart prepare handler\n");
+ ret = devm_register_sys_off_handler(&pdev->dev, SYS_OFF_MODE_RESTART, SYS_OFF_PRIO_HIGH,
+ macsmc_restart, reboot);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register restart handler\n");
+
+ ret = devm_register_reboot_notifier(&pdev->dev, &reboot->reboot_notify);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to register reboot notifier\n");
+
+ dev_info(&pdev->dev, "Handling reboot and poweroff requests via SMC\n");
+
+ return 0;
+}
+
+static const struct of_device_id macsmc_reboot_of_table[] = {
+ { .compatible = "apple,smc-reboot", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, macsmc_reboot_of_table);
+
+static struct platform_driver macsmc_reboot_driver = {
+ .driver = {
+ .name = "macsmc-reboot",
+ .of_match_table = macsmc_reboot_of_table,
+ },
+ .probe = macsmc_reboot_probe,
+};
+module_platform_driver(macsmc_reboot_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Apple SMC reboot/poweroff driver");
+MODULE_AUTHOR("Hector Martin <marcan@marcan.st>");
diff --git a/drivers/power/sequencing/Kconfig b/drivers/power/sequencing/Kconfig
index ddcc42a98492..280f92beb5d0 100644
--- a/drivers/power/sequencing/Kconfig
+++ b/drivers/power/sequencing/Kconfig
@@ -16,7 +16,7 @@ if POWER_SEQUENCING
config POWER_SEQUENCING_QCOM_WCN
tristate "Qualcomm WCN family PMU driver"
default m if ARCH_QCOM
- depends on OF
+ depends on OF || COMPILE_TEST
help
Say Y here to enable the power sequencing driver for Qualcomm
WCN Bluetooth/WLAN chipsets.
@@ -27,4 +27,12 @@ config POWER_SEQUENCING_QCOM_WCN
this driver is needed for correct power control or else we'd risk not
respecting the required delays between enabling Bluetooth and WLAN.
+config POWER_SEQUENCING_TH1520_GPU
+ tristate "T-HEAD TH1520 GPU power sequencing driver"
+ depends on (ARCH_THEAD && AUXILIARY_BUS) || COMPILE_TEST
+ help
+ Say Y here to enable the power sequencing driver for the TH1520 SoC
+ GPU. This driver handles the complex clock and reset sequence
+ required to power on the Imagination BXM GPU on this platform.
+
endif
diff --git a/drivers/power/sequencing/Makefile b/drivers/power/sequencing/Makefile
index 2eec2df7912d..96c1cf0a98ac 100644
--- a/drivers/power/sequencing/Makefile
+++ b/drivers/power/sequencing/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_POWER_SEQUENCING) += pwrseq-core.o
pwrseq-core-y := core.o
obj-$(CONFIG_POWER_SEQUENCING_QCOM_WCN) += pwrseq-qcom-wcn.o
+obj-$(CONFIG_POWER_SEQUENCING_TH1520_GPU) += pwrseq-thead-gpu.o
diff --git a/drivers/power/sequencing/core.c b/drivers/power/sequencing/core.c
index 0ffc259c6bb6..190564e55988 100644
--- a/drivers/power/sequencing/core.c
+++ b/drivers/power/sequencing/core.c
@@ -628,7 +628,7 @@ static int pwrseq_match_device(struct device *pwrseq_dev, void *data)
return 0;
ret = pwrseq->match(pwrseq, match_data->dev);
- if (ret <= 0)
+ if (ret == PWRSEQ_NO_MATCH || ret < 0)
return ret;
/* We got the matching device, let's find the right target. */
@@ -651,7 +651,7 @@ static int pwrseq_match_device(struct device *pwrseq_dev, void *data)
match_data->desc->pwrseq = pwrseq_device_get(pwrseq);
- return 1;
+ return PWRSEQ_MATCH_OK;
}
/**
@@ -684,7 +684,7 @@ struct pwrseq_desc *pwrseq_get(struct device *dev, const char *target)
pwrseq_match_device);
if (ret < 0)
return ERR_PTR(ret);
- if (ret == 0)
+ if (ret == PWRSEQ_NO_MATCH)
/* No device matched. */
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/power/sequencing/pwrseq-qcom-wcn.c b/drivers/power/sequencing/pwrseq-qcom-wcn.c
index e8f5030f2639..663d9a537065 100644
--- a/drivers/power/sequencing/pwrseq-qcom-wcn.c
+++ b/drivers/power/sequencing/pwrseq-qcom-wcn.c
@@ -155,7 +155,7 @@ static const struct pwrseq_unit_data pwrseq_qcom_wcn_bt_unit_data = {
};
static const struct pwrseq_unit_data pwrseq_qcom_wcn6855_bt_unit_data = {
- .name = "wlan-enable",
+ .name = "bluetooth-enable",
.deps = pwrseq_qcom_wcn6855_unit_deps,
.enable = pwrseq_qcom_wcn_bt_enable,
.disable = pwrseq_qcom_wcn_bt_disable,
@@ -341,12 +341,12 @@ static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq,
* device.
*/
if (!of_property_present(dev_node, "vddaon-supply"))
- return 0;
+ return PWRSEQ_NO_MATCH;
struct device_node *reg_node __free(device_node) =
of_parse_phandle(dev_node, "vddaon-supply", 0);
if (!reg_node)
- return 0;
+ return PWRSEQ_NO_MATCH;
/*
* `reg_node` is the PMU AON regulator, its parent is the `regulators`
@@ -355,9 +355,9 @@ static int pwrseq_qcom_wcn_match(struct pwrseq_device *pwrseq,
*/
if (!reg_node->parent || !reg_node->parent->parent ||
reg_node->parent->parent != ctx->of_node)
- return 0;
+ return PWRSEQ_NO_MATCH;
- return 1;
+ return PWRSEQ_MATCH_OK;
}
static int pwrseq_qcom_wcn_probe(struct platform_device *pdev)
diff --git a/drivers/power/sequencing/pwrseq-thead-gpu.c b/drivers/power/sequencing/pwrseq-thead-gpu.c
new file mode 100644
index 000000000000..7c82a10ca9f6
--- /dev/null
+++ b/drivers/power/sequencing/pwrseq-thead-gpu.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * T-HEAD TH1520 GPU Power Sequencer Driver
+ *
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * Author: Michal Wilczynski <m.wilczynski@samsung.com>
+ *
+ * This driver implements the power sequence for the Imagination BXM-4-64
+ * GPU on the T-HEAD TH1520 SoC. The sequence requires coordinating resources
+ * from both the sequencer's parent device node (clkgen_reset) and the GPU's
+ * device node (clocks and core reset).
+ *
+ * The `match` function is used to acquire the GPU's resources when the
+ * GPU driver requests the "gpu-power" sequence target.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pwrseq/provider.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/power/thead,th1520-power.h>
+
+struct pwrseq_thead_gpu_ctx {
+ struct pwrseq_device *pwrseq;
+ struct reset_control *clkgen_reset;
+ struct device_node *aon_node;
+
+ /* Consumer resources */
+ struct device_node *consumer_node;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct reset_control *gpu_reset;
+};
+
+static int pwrseq_thead_gpu_enable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+ int ret;
+
+ if (!ctx->clks || !ctx->gpu_reset)
+ return -ENODEV;
+
+ ret = clk_bulk_prepare_enable(ctx->num_clks, ctx->clks);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(ctx->clkgen_reset);
+ if (ret)
+ goto err_disable_clks;
+
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ ret = reset_control_deassert(ctx->gpu_reset);
+ if (ret)
+ goto err_assert_clkgen;
+
+ return 0;
+
+err_assert_clkgen:
+ reset_control_assert(ctx->clkgen_reset);
+err_disable_clks:
+ clk_bulk_disable_unprepare(ctx->num_clks, ctx->clks);
+ return ret;
+}
+
+static int pwrseq_thead_gpu_disable(struct pwrseq_device *pwrseq)
+{
+ struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+ int ret = 0, err;
+
+ if (!ctx->clks || !ctx->gpu_reset)
+ return -ENODEV;
+
+ err = reset_control_assert(ctx->gpu_reset);
+ if (err)
+ ret = err;
+
+ err = reset_control_assert(ctx->clkgen_reset);
+ if (err && !ret)
+ ret = err;
+
+ clk_bulk_disable_unprepare(ctx->num_clks, ctx->clks);
+
+ /* ret stores values of the first error code */
+ return ret;
+}
+
+static const struct pwrseq_unit_data pwrseq_thead_gpu_unit = {
+ .name = "gpu-power-sequence",
+ .enable = pwrseq_thead_gpu_enable,
+ .disable = pwrseq_thead_gpu_disable,
+};
+
+static const struct pwrseq_target_data pwrseq_thead_gpu_target = {
+ .name = "gpu-power",
+ .unit = &pwrseq_thead_gpu_unit,
+};
+
+static const struct pwrseq_target_data *pwrseq_thead_gpu_targets[] = {
+ &pwrseq_thead_gpu_target,
+ NULL
+};
+
+static int pwrseq_thead_gpu_match(struct pwrseq_device *pwrseq,
+ struct device *dev)
+{
+ struct pwrseq_thead_gpu_ctx *ctx = pwrseq_device_get_drvdata(pwrseq);
+ static const char *const clk_names[] = { "core", "sys" };
+ struct of_phandle_args pwr_spec;
+ int i, ret;
+
+ /* We only match the specific T-HEAD TH1520 GPU compatible */
+ if (!of_device_is_compatible(dev->of_node, "thead,th1520-gpu"))
+ return PWRSEQ_NO_MATCH;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells", 0, &pwr_spec);
+ if (ret)
+ return PWRSEQ_NO_MATCH;
+
+ /* Additionally verify consumer device has AON as power-domain */
+ if (pwr_spec.np != ctx->aon_node || pwr_spec.args[0] != TH1520_GPU_PD) {
+ of_node_put(pwr_spec.np);
+ return PWRSEQ_NO_MATCH;
+ }
+
+ of_node_put(pwr_spec.np);
+
+ /* If a consumer is already bound, only allow a re-match from it */
+ if (ctx->consumer_node)
+ return ctx->consumer_node == dev->of_node ?
+ PWRSEQ_MATCH_OK : PWRSEQ_NO_MATCH;
+
+ ctx->num_clks = ARRAY_SIZE(clk_names);
+ ctx->clks = kcalloc(ctx->num_clks, sizeof(*ctx->clks), GFP_KERNEL);
+ if (!ctx->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ctx->num_clks; i++)
+ ctx->clks[i].id = clk_names[i];
+
+ ret = clk_bulk_get(dev, ctx->num_clks, ctx->clks);
+ if (ret)
+ goto err_free_clks;
+
+ ctx->gpu_reset = reset_control_get_shared(dev, NULL);
+ if (IS_ERR(ctx->gpu_reset)) {
+ ret = PTR_ERR(ctx->gpu_reset);
+ goto err_put_clks;
+ }
+
+ ctx->consumer_node = of_node_get(dev->of_node);
+
+ return PWRSEQ_MATCH_OK;
+
+err_put_clks:
+ clk_bulk_put(ctx->num_clks, ctx->clks);
+err_free_clks:
+ kfree(ctx->clks);
+ ctx->clks = NULL;
+
+ return ret;
+}
+
+static int pwrseq_thead_gpu_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct device *parent_dev = dev->parent;
+ struct pwrseq_thead_gpu_ctx *ctx;
+ struct pwrseq_config config = {};
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->aon_node = parent_dev->of_node;
+
+ ctx->clkgen_reset =
+ devm_reset_control_get_exclusive(parent_dev, "gpu-clkgen");
+ if (IS_ERR(ctx->clkgen_reset))
+ return dev_err_probe(
+ dev, PTR_ERR(ctx->clkgen_reset),
+ "Failed to get GPU clkgen reset from parent\n");
+
+ config.parent = dev;
+ config.owner = THIS_MODULE;
+ config.drvdata = ctx;
+ config.match = pwrseq_thead_gpu_match;
+ config.targets = pwrseq_thead_gpu_targets;
+
+ ctx->pwrseq = devm_pwrseq_device_register(dev, &config);
+ if (IS_ERR(ctx->pwrseq))
+ return dev_err_probe(dev, PTR_ERR(ctx->pwrseq),
+ "Failed to register power sequencer\n");
+
+ auxiliary_set_drvdata(adev, ctx);
+
+ return 0;
+}
+
+static void pwrseq_thead_gpu_remove(struct auxiliary_device *adev)
+{
+ struct pwrseq_thead_gpu_ctx *ctx = auxiliary_get_drvdata(adev);
+
+ if (ctx->gpu_reset)
+ reset_control_put(ctx->gpu_reset);
+
+ if (ctx->clks) {
+ clk_bulk_put(ctx->num_clks, ctx->clks);
+ kfree(ctx->clks);
+ }
+
+ if (ctx->consumer_node)
+ of_node_put(ctx->consumer_node);
+}
+
+static const struct auxiliary_device_id pwrseq_thead_gpu_id_table[] = {
+ { .name = "th1520_pm_domains.pwrseq-gpu" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, pwrseq_thead_gpu_id_table);
+
+static struct auxiliary_driver pwrseq_thead_gpu_driver = {
+ .driver = {
+ .name = "pwrseq-thead-gpu",
+ },
+ .probe = pwrseq_thead_gpu_probe,
+ .remove = pwrseq_thead_gpu_remove,
+ .id_table = pwrseq_thead_gpu_id_table,
+};
+module_auxiliary_driver(pwrseq_thead_gpu_driver);
+
+MODULE_AUTHOR("Michal Wilczynski <m.wilczynski@samsung.com>");
+MODULE_DESCRIPTION("T-HEAD TH1520 GPU power sequencer driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 33a5bfce4604..cfb0e3e0d4aa 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1235,9 +1235,8 @@ bool power_supply_has_property(struct power_supply *psy,
return false;
}
-int power_supply_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static int __power_supply_get_property(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val, bool use_extensions)
{
struct power_supply_ext_registration *reg;
@@ -1247,10 +1246,14 @@ int power_supply_get_property(struct power_supply *psy,
return -ENODEV;
}
- scoped_guard(rwsem_read, &psy->extensions_sem) {
- power_supply_for_each_extension(reg, psy) {
- if (power_supply_ext_has_property(reg->ext, psp))
+ if (use_extensions) {
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (!power_supply_ext_has_property(reg->ext, psp))
+ continue;
+
return reg->ext->get_property(psy, reg->ext, reg->data, psp, val);
+ }
}
}
@@ -1261,20 +1264,49 @@ int power_supply_get_property(struct power_supply *psy,
else
return -EINVAL;
}
+
+int power_supply_get_property(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ return __power_supply_get_property(psy, psp, val, true);
+}
EXPORT_SYMBOL_GPL(power_supply_get_property);
-int power_supply_set_property(struct power_supply *psy,
- enum power_supply_property psp,
- const union power_supply_propval *val)
+/**
+ * power_supply_get_property_direct - Read a power supply property without checking for extensions
+ * @psy: The power supply
+ * @psp: The power supply property to read
+ * @val: The resulting value of the power supply property
+ *
+ * Read a power supply property without taking into account any power supply extensions registered
+ * on the given power supply. This is mostly useful for power supply extensions that want to access
+ * their own power supply as using power_supply_get_property() directly will result in a potential
+ * deadlock.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int power_supply_get_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ return __power_supply_get_property(psy, psp, val, false);
+}
+EXPORT_SYMBOL_GPL(power_supply_get_property_direct);
+
+
+static int __power_supply_set_property(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val, bool use_extensions)
{
struct power_supply_ext_registration *reg;
if (atomic_read(&psy->use_cnt) <= 0)
return -ENODEV;
- scoped_guard(rwsem_read, &psy->extensions_sem) {
- power_supply_for_each_extension(reg, psy) {
- if (power_supply_ext_has_property(reg->ext, psp)) {
+ if (use_extensions) {
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
+ power_supply_for_each_extension(reg, psy) {
+ if (!power_supply_ext_has_property(reg->ext, psp))
+ continue;
+
if (reg->ext->set_property)
return reg->ext->set_property(psy, reg->ext, reg->data,
psp, val);
@@ -1289,8 +1321,34 @@ int power_supply_set_property(struct power_supply *psy,
return psy->desc->set_property(psy, psp, val);
}
+
+int power_supply_set_property(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ return __power_supply_set_property(psy, psp, val, true);
+}
EXPORT_SYMBOL_GPL(power_supply_set_property);
+/**
+ * power_supply_set_property_direct - Write a power supply property without checking for extensions
+ * @psy: The power supply
+ * @psp: The power supply property to write
+ * @val: The value to write to the power supply property
+ *
+ * Write a power supply property without taking into account any power supply extensions registered
+ * on the given power supply. This is mostly useful for power supply extensions that want to access
+ * their own power supply as using power_supply_set_property() directly will result in a potential
+ * deadlock.
+ *
+ * Return: 0 on success or negative error code on failure.
+ */
+int power_supply_set_property_direct(struct power_supply *psy, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ return __power_supply_set_property(psy, psp, val, false);
+}
+EXPORT_SYMBOL_GPL(power_supply_set_property_direct);
+
int power_supply_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 5bfdfcf6013b..2c0e9ad820c0 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -259,6 +259,7 @@ static const struct power_supply_config test_power_configs[] = {
static int test_power_battery_extmanufacture_year = 1234;
static int test_power_battery_exttemp_max = 1000;
static const enum power_supply_property test_power_battery_extprops[] = {
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_MANUFACTURE_YEAR,
POWER_SUPPLY_PROP_TEMP_MAX,
};
@@ -270,6 +271,9 @@ static int test_power_battery_extget_property(struct power_supply *psy,
union power_supply_propval *val)
{
switch (psp) {
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ return power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ val);
case POWER_SUPPLY_PROP_MANUFACTURE_YEAR:
val->intval = test_power_battery_extmanufacture_year;
break;
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 6b6f51b21550..99390ec1481f 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -96,6 +96,8 @@ static u64 get_pd_power_uw(struct dtpm *dtpm)
int i;
pd = em_cpu_get(dtpm_cpu->cpu);
+ if (!pd)
+ return 0;
pd_mask = em_span_cpus(pd);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index faa0b6bc5b53..c7e7f9bf5313 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1277,6 +1277,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_VFM(INTEL_RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, &rapl_defaults_core),
+ X86_MATCH_VFM(INTEL_BARTLETTLAKE, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_METEORLAKE, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_METEORLAKE_L, &rapl_defaults_core),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 8ad2115d65f6..4ed06c71a3ac 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -150,6 +150,7 @@ static const struct x86_cpu_id pl4_support_ids[] = {
X86_MATCH_VFM(INTEL_METEORLAKE_L, NULL),
X86_MATCH_VFM(INTEL_ARROWLAKE_U, NULL),
X86_MATCH_VFM(INTEL_ARROWLAKE_H, NULL),
+ X86_MATCH_VFM(INTEL_PANTHERLAKE_L, NULL),
{}
};
diff --git a/drivers/pwm/pwm-adp5585.c b/drivers/pwm/pwm-adp5585.c
index d79106d12181..dc2860979e24 100644
--- a/drivers/pwm/pwm-adp5585.c
+++ b/drivers/pwm/pwm-adp5585.c
@@ -33,21 +33,33 @@
#define ADP5585_PWM_MIN_PERIOD_NS (2ULL * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ)
#define ADP5585_PWM_MAX_PERIOD_NS (2ULL * 0xffff * NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ)
+struct adp5585_pwm_chip {
+ unsigned int pwm_cfg;
+ unsigned int pwm_offt_low;
+ unsigned int pwm_ont_low;
+};
+
+struct adp5585_pwm {
+ const struct adp5585_pwm_chip *info;
+ struct regmap *regmap;
+ unsigned int ext_cfg;
+};
+
static int pwm_adp5585_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct regmap *regmap = pwmchip_get_drvdata(chip);
+ struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip);
/* Configure the R3 pin as PWM output. */
- return regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C,
+ return regmap_update_bits(adp5585_pwm->regmap, adp5585_pwm->ext_cfg,
ADP5585_R3_EXTEND_CFG_MASK,
ADP5585_R3_EXTEND_CFG_PWM_OUT);
}
static void pwm_adp5585_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- struct regmap *regmap = pwmchip_get_drvdata(chip);
+ struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip);
- regmap_update_bits(regmap, ADP5585_PIN_CONFIG_C,
+ regmap_update_bits(adp5585_pwm->regmap, adp5585_pwm->ext_cfg,
ADP5585_R3_EXTEND_CFG_MASK,
ADP5585_R3_EXTEND_CFG_GPIO4);
}
@@ -56,15 +68,16 @@ static int pwm_adp5585_apply(struct pwm_chip *chip,
struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct regmap *regmap = pwmchip_get_drvdata(chip);
+ struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip);
+ const struct adp5585_pwm_chip *info = adp5585_pwm->info;
+ struct regmap *regmap = adp5585_pwm->regmap;
u64 period, duty_cycle;
u32 on, off;
__le16 val;
int ret;
if (!state->enabled) {
- regmap_clear_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN);
- regmap_clear_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN);
+ regmap_clear_bits(regmap, info->pwm_cfg, ADP5585_PWM_EN);
return 0;
}
@@ -85,45 +98,43 @@ static int pwm_adp5585_apply(struct pwm_chip *chip,
off = div_u64(period, NSEC_PER_SEC / ADP5585_PWM_OSC_FREQ_HZ) - on;
val = cpu_to_le16(off);
- ret = regmap_bulk_write(regmap, ADP5585_PWM_OFFT_LOW, &val, 2);
+ ret = regmap_bulk_write(regmap, info->pwm_offt_low, &val, 2);
if (ret)
return ret;
val = cpu_to_le16(on);
- ret = regmap_bulk_write(regmap, ADP5585_PWM_ONT_LOW, &val, 2);
+ ret = regmap_bulk_write(regmap, info->pwm_ont_low, &val, 2);
if (ret)
return ret;
/* Enable PWM in continuous mode and no external AND'ing. */
- ret = regmap_update_bits(regmap, ADP5585_PWM_CFG,
+ ret = regmap_update_bits(regmap, info->pwm_cfg,
ADP5585_PWM_IN_AND | ADP5585_PWM_MODE |
ADP5585_PWM_EN, ADP5585_PWM_EN);
if (ret)
return ret;
- ret = regmap_set_bits(regmap, ADP5585_GENERAL_CFG, ADP5585_OSC_EN);
- if (ret)
- return ret;
-
- return regmap_set_bits(regmap, ADP5585_PWM_CFG, ADP5585_PWM_EN);
+ return regmap_set_bits(regmap, info->pwm_cfg, ADP5585_PWM_EN);
}
static int pwm_adp5585_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
struct pwm_state *state)
{
- struct regmap *regmap = pwmchip_get_drvdata(chip);
+ struct adp5585_pwm *adp5585_pwm = pwmchip_get_drvdata(chip);
+ const struct adp5585_pwm_chip *info = adp5585_pwm->info;
+ struct regmap *regmap = adp5585_pwm->regmap;
unsigned int on, off;
unsigned int val;
__le16 on_off;
int ret;
- ret = regmap_bulk_read(regmap, ADP5585_PWM_OFFT_LOW, &on_off, 2);
+ ret = regmap_bulk_read(regmap, info->pwm_offt_low, &on_off, 2);
if (ret)
return ret;
off = le16_to_cpu(on_off);
- ret = regmap_bulk_read(regmap, ADP5585_PWM_ONT_LOW, &on_off, 2);
+ ret = regmap_bulk_read(regmap, info->pwm_ont_low, &on_off, 2);
if (ret)
return ret;
on = le16_to_cpu(on_off);
@@ -133,7 +144,7 @@ static int pwm_adp5585_get_state(struct pwm_chip *chip,
state->polarity = PWM_POLARITY_NORMAL;
- regmap_read(regmap, ADP5585_PWM_CFG, &val);
+ regmap_read(regmap, info->pwm_cfg, &val);
state->enabled = !!(val & ADP5585_PWM_EN);
return 0;
@@ -148,18 +159,28 @@ static const struct pwm_ops adp5585_pwm_ops = {
static int adp5585_pwm_probe(struct platform_device *pdev)
{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
struct device *dev = &pdev->dev;
struct adp5585_dev *adp5585 = dev_get_drvdata(dev->parent);
+ struct adp5585_pwm *adp5585_pwm;
struct pwm_chip *chip;
int ret;
- chip = devm_pwmchip_alloc(dev, ADP5585_PWM_CHAN_NUM, 0);
+ chip = devm_pwmchip_alloc(dev, ADP5585_PWM_CHAN_NUM,
+ sizeof(*adp5585_pwm));
if (IS_ERR(chip))
return PTR_ERR(chip);
+ adp5585_pwm = pwmchip_get_drvdata(chip);
+ adp5585_pwm->regmap = adp5585->regmap;
+ adp5585_pwm->ext_cfg = adp5585->regs->ext_cfg;
+
+ adp5585_pwm->info = (const struct adp5585_pwm_chip *)id->driver_data;
+ if (!adp5585_pwm->info)
+ return -ENODEV;
+
device_set_of_node_from_dev(dev, dev->parent);
- pwmchip_set_drvdata(chip, adp5585->regmap);
chip->ops = &adp5585_pwm_ops;
ret = devm_pwmchip_add(dev, chip);
@@ -169,8 +190,21 @@ static int adp5585_pwm_probe(struct platform_device *pdev)
return 0;
}
+static const struct adp5585_pwm_chip adp5589_pwm_chip_info = {
+ .pwm_cfg = ADP5585_PWM_CFG,
+ .pwm_offt_low = ADP5585_PWM_OFFT_LOW,
+ .pwm_ont_low = ADP5585_PWM_ONT_LOW,
+};
+
+static const struct adp5585_pwm_chip adp5585_pwm_chip_info = {
+ .pwm_cfg = ADP5589_PWM_CFG,
+ .pwm_offt_low = ADP5589_PWM_OFFT_LOW,
+ .pwm_ont_low = ADP5589_PWM_ONT_LOW,
+};
+
static const struct platform_device_id adp5585_pwm_id_table[] = {
- { "adp5585-pwm" },
+ { "adp5585-pwm", (kernel_ulong_t)&adp5585_pwm_chip_info },
+ { "adp5589-pwm", (kernel_ulong_t)&adp5589_pwm_chip_info },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(platform, adp5585_pwm_id_table);
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 6ee36adcbdba..bece5e635ee9 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -479,7 +479,7 @@ static int rpmsg_dev_probe(struct device *dev)
struct rpmsg_endpoint *ept = NULL;
int err;
- err = dev_pm_domain_attach(dev, true);
+ err = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (err)
goto out;
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index b7f15f303ea2..967dc4f9eea8 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -130,6 +130,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
struct ism_req_hdr *req = cmd;
struct ism_resp_hdr *resp = cmd;
+ spin_lock(&ism->cmd_lock);
__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
__ism_write_cmd(ism, req, 0, sizeof(*req));
@@ -143,6 +144,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd)
}
__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
out:
+ spin_unlock(&ism->cmd_lock);
return resp->ret;
}
@@ -606,6 +608,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
spin_lock_init(&ism->lock);
+ spin_lock_init(&ism->cmd_lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 7b4e7a61965a..adb9e7a94785 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -559,8 +559,8 @@ static int sas_ata_prereset(struct ata_link *link, unsigned long deadline)
}
static struct ata_port_operations sas_sata_ops = {
- .prereset = sas_ata_prereset,
- .hardreset = sas_ata_hard_reset,
+ .reset.prereset = sas_ata_prereset,
+ .reset.hardreset = sas_ata_hard_reset,
.error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9179f8aee964..615e06fd4ee8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5971,7 +5971,8 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
else
instance->iopoll_q_count = 0;
- num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+ num_msix_req = blk_mq_num_online_queues(0) +
+ instance->low_latency_index_start;
instance->msix_vectors = min(num_msix_req,
instance->msix_vectors);
@@ -5987,7 +5988,8 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
/* Disable Balanced IOPS mode and try realloc vectors */
instance->perf_mode = MR_LATENCY_PERF_MODE;
instance->low_latency_index_start = 1;
- num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+ num_msix_req = blk_mq_num_online_queues(0) +
+ instance->low_latency_index_start;
instance->msix_vectors = min(num_msix_req,
instance->msix_vectors);
@@ -6243,7 +6245,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
true : false;
if (intr_coalescing &&
- (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
+ (blk_mq_num_online_queues(0) >= MR_HIGH_IOPS_QUEUE_COUNT) &&
(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
instance->perf_mode = MR_BALANCED_PERF_MODE;
else
@@ -6287,7 +6289,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
else
instance->low_latency_index_start = 1;
- num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+ num_msix_req = blk_mq_num_online_queues(0) +
+ instance->low_latency_index_start;
instance->msix_vectors = min(num_msix_req,
instance->msix_vectors);
@@ -6319,8 +6322,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
megasas_setup_reply_map(instance);
dev_info(&instance->pdev->dev,
- "current msix/online cpus\t: (%d/%d)\n",
- instance->msix_vectors, (unsigned int)num_online_cpus());
+ "current msix/max num queues\t: (%d/%u)\n",
+ instance->msix_vectors, blk_mq_num_online_queues(0));
dev_info(&instance->pdev->dev,
"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index fe98c76e9be3..c4c6b5c6658c 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -4533,13 +4533,13 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
/* user wants to control IRQ setting for target mode */
ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
- min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
- PCI_IRQ_MSIX);
+ blk_mq_num_online_queues(ha->msix_count) + min_vecs,
+ PCI_IRQ_MSIX);
} else
ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
- min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
- &desc);
+ blk_mq_num_online_queues(ha->msix_count) + min_vecs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &desc);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index eeaa6af294b8..7ce49d8f21c9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1141,6 +1141,11 @@ static void sd_config_write_same(struct scsi_disk *sdkp,
out:
lim->max_write_zeroes_sectors =
sdkp->max_ws_blocks * (logical_block_size >> SECTOR_SHIFT);
+
+ if (sdkp->zeroing_mode == SD_ZERO_WS16_UNMAP ||
+ sdkp->zeroing_mode == SD_ZERO_WS10_UNMAP)
+ lim->max_hw_wzeroes_unmap_sectors =
+ lim->max_write_zeroes_sectors;
}
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index ae6ce6f5d622..ff4217fef93b 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -52,7 +52,8 @@ void sd_dif_config_host(struct scsi_disk *sdkp, struct queue_limits *lim)
if (type != T10_PI_TYPE3_PROTECTION)
bi->flags |= BLK_INTEGRITY_REF_TAG;
- bi->tuple_size = sizeof(struct t10_pi_tuple);
+ bi->metadata_size = sizeof(struct t10_pi_tuple);
+ bi->pi_tuple_size = bi->metadata_size;
if (dif && type) {
bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 3d40a63e378d..125944941601 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5294,15 +5294,14 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
if (is_kdump_kernel()) {
num_queue_groups = 1;
} else {
- int num_cpus;
int max_queue_groups;
max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
ctrl_info->max_outbound_queues - 1);
max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
- num_cpus = num_online_cpus();
- num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+ num_queue_groups =
+ blk_mq_num_online_queues(ctrl_info->max_msix_vectors);
num_queue_groups = min(num_queue_groups, max_queue_groups);
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 21ce3e940192..96a69edddbe5 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -919,6 +919,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
/* We need to know how many queues before we allocate. */
num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
num_queues = min_t(unsigned int, nr_cpu_ids, num_queues);
+ num_queues = blk_mq_num_possible_queues(num_queues);
num_targets = virtscsi_config_get(vdev, max_target) + 1;
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
index 5fffd0f003dc..b8d4da147d23 100644
--- a/drivers/soc/apple/rtkit.c
+++ b/drivers/soc/apple/rtkit.c
@@ -279,8 +279,7 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
buffer->size, &buffer->iova);
- if (buffer->iova &&
- (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
+ if (buffer->iova && !rtk->ops->shmem_setup) {
err = -EINVAL;
goto error;
}
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index ef8f355589a5..fc3a2c41cc10 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -58,6 +58,7 @@ struct aspeed_lpc_snoop_model_data {
};
struct aspeed_lpc_snoop_channel {
+ bool enabled;
struct kfifo fifo;
wait_queue_head_t wq;
struct miscdevice miscdev;
@@ -190,6 +191,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
const struct aspeed_lpc_snoop_model_data *model_data =
of_device_get_match_data(dev);
+ if (WARN_ON(lpc_snoop->chan[channel].enabled))
+ return -EBUSY;
+
init_waitqueue_head(&lpc_snoop->chan[channel].wq);
/* Create FIFO datastructure */
rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
@@ -236,6 +240,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
regmap_update_bits(lpc_snoop->regmap, HICRB,
hicrb_en, hicrb_en);
+ lpc_snoop->chan[channel].enabled = true;
+
return 0;
err_misc_deregister:
@@ -248,6 +254,9 @@ err_free_fifo:
static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
int channel)
{
+ if (!lpc_snoop->chan[channel].enabled)
+ return;
+
switch (channel) {
case 0:
regmap_update_bits(lpc_snoop->regmap, HICR5,
@@ -263,8 +272,10 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
return;
}
- kfifo_free(&lpc_snoop->chan[channel].fifo);
+ lpc_snoop->chan[channel].enabled = false;
+ /* Consider improving safety wrt concurrent reader(s) */
misc_deregister(&lpc_snoop->chan[channel].miscdev);
+ kfifo_free(&lpc_snoop->chan[channel].fifo);
}
static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index dfdff186c805..dc52a2197d24 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -145,7 +145,7 @@ static int am33xx_do_sram_idle(u32 wfi_flags)
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
-static int __init am43xx_map_gic(void)
+static int am43xx_map_gic(void)
{
gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
index a12c68b93b1c..7a671a786197 100644
--- a/drivers/soundwire/amd_manager.c
+++ b/drivers/soundwire/amd_manager.c
@@ -238,7 +238,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo
if (sts & AMD_SDW_IMM_RES_VALID) {
dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance);
- writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
+ writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS);
}
writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD);
writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD);
@@ -1209,6 +1209,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
}
if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) {
+ cancel_work_sync(&amd_manager->amd_sdw_work);
amd_sdw_wake_enable(amd_manager, false);
if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
ret = amd_sdw_host_wake_enable(amd_manager, false);
@@ -1219,6 +1220,7 @@ static int __maybe_unused amd_suspend(struct device *dev)
if (ret)
return ret;
} else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) {
+ cancel_work_sync(&amd_manager->amd_sdw_work);
amd_sdw_wake_enable(amd_manager, false);
if (amd_manager->acp_rev >= ACP70_PCI_REV_ID) {
ret = amd_sdw_host_wake_enable(amd_manager, false);
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 75d6f16efced..bc1e653080d9 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -101,7 +101,7 @@ static int sdw_drv_probe(struct device *dev)
/*
* attach to power domain but don't turn on (last arg)
*/
- ret = dev_pm_domain_attach(dev, false);
+ ret = dev_pm_domain_attach(dev, 0);
if (ret)
return ret;
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 295a46dc2be7..0f45e3404756 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -156,7 +156,6 @@ struct qcom_swrm_port_config {
u8 word_length;
u8 blk_group_count;
u8 lane_control;
- u8 ch_mask;
};
/*
@@ -1049,13 +1048,9 @@ static int qcom_swrm_port_enable(struct sdw_bus *bus,
{
u32 reg = SWRM_DP_PORT_CTRL_BANK(enable_ch->port_num, bank);
struct qcom_swrm_ctrl *ctrl = to_qcom_sdw(bus);
- struct qcom_swrm_port_config *pcfg;
u32 val;
- pcfg = &ctrl->pconfig[enable_ch->port_num];
ctrl->reg_read(ctrl, reg, &val);
- if (pcfg->ch_mask != SWR_INVALID_PARAM && pcfg->ch_mask != 0)
- enable_ch->ch_mask = pcfg->ch_mask;
if (enable_ch->enable)
val |= (enable_ch->ch_mask << SWRM_DP_PORT_CTRL_EN_CHAN_SHFT);
@@ -1275,26 +1270,6 @@ static void *qcom_swrm_get_sdw_stream(struct snd_soc_dai *dai, int direction)
return ctrl->sruntime[dai->id];
}
-static int qcom_swrm_set_channel_map(struct snd_soc_dai *dai,
- unsigned int tx_num, const unsigned int *tx_slot,
- unsigned int rx_num, const unsigned int *rx_slot)
-{
- struct qcom_swrm_ctrl *ctrl = dev_get_drvdata(dai->dev);
- int i;
-
- if (tx_slot) {
- for (i = 0; i < tx_num; i++)
- ctrl->pconfig[i].ch_mask = tx_slot[i];
- }
-
- if (rx_slot) {
- for (i = 0; i < rx_num; i++)
- ctrl->pconfig[i].ch_mask = rx_slot[i];
- }
-
- return 0;
-}
-
static int qcom_swrm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -1331,7 +1306,6 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.shutdown = qcom_swrm_shutdown,
.set_stream = qcom_swrm_set_sdw_stream,
.get_stream = qcom_swrm_get_sdw_stream,
- .set_channel_map = qcom_swrm_set_channel_map,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
index f2e1a27b410d..3b757e3d00c0 100644
--- a/drivers/spi/spi-qpic-snand.c
+++ b/drivers/spi/spi-qpic-snand.c
@@ -308,7 +308,7 @@ static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand)
ecc_cfg->bch_enabled = true;
ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size;
- ecc_cfg->steps = 4;
+ ecc_cfg->steps = cwperpage;
ecc_cfg->cw_data = 516;
ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes;
bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 1bc0fdbb1bd7..79d2f5d625d6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -427,7 +427,7 @@ static int spi_probe(struct device *dev)
if (spi->irq < 0)
spi->irq = 0;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
return ret;
@@ -4138,10 +4138,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
xfer->tx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
- !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
return -EINVAL;
if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
- !(spi->mode & SPI_TX_QUAD))
+ !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
+ !(spi->mode & SPI_TX_OCTAL))
return -EINVAL;
}
/* Check transfer rx_nbits */
@@ -4154,10 +4157,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
xfer->rx_nbits != SPI_NBITS_OCTAL)
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
- !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
- !(spi->mode & SPI_RX_QUAD))
+ !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
+ !(spi->mode & SPI_RX_OCTAL))
return -EINVAL;
}
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/staging/gpib/common/gpib_os.c
index a193d64db033..38c6abc2ffd2 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/staging/gpib/common/gpib_os.c
@@ -610,7 +610,7 @@ int ibclose(struct inode *inode, struct file *filep)
long ibioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
- unsigned int minor = iminor(filep->f_path.dentry->d_inode);
+ unsigned int minor = iminor(file_inode(filep));
struct gpib_board *board;
struct gpib_file_private *file_priv = filep->private_data;
long retval = -ENOTTY;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 5dbf8d53db09..721b15b7e13b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -97,6 +97,13 @@ struct vchiq_arm_state {
* tracked separately with the state.
*/
int peer_use_count;
+
+ /*
+ * Flag to indicate that the first vchiq connect has made it through.
+ * This means that both sides should be fully ready, and we should
+ * be able to suspend after this point.
+ */
+ int first_connect;
};
static int
@@ -273,6 +280,29 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state
return 0;
}
+int
+vchiq_platform_init_state(struct vchiq_state *state)
+{
+ struct vchiq_arm_state *platform_state;
+
+ platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
+ if (!platform_state)
+ return -ENOMEM;
+
+ rwlock_init(&platform_state->susp_res_lock);
+
+ init_completion(&platform_state->ka_evt);
+ atomic_set(&platform_state->ka_use_count, 0);
+ atomic_set(&platform_state->ka_use_ack_count, 0);
+ atomic_set(&platform_state->ka_release_count, 0);
+
+ platform_state->state = state;
+
+ state->platform_state = (struct opaque_platform_state *)platform_state;
+
+ return 0;
+}
+
static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
{
return (struct vchiq_arm_state *)state->platform_state;
@@ -363,8 +393,7 @@ int vchiq_shutdown(struct vchiq_instance *instance)
struct vchiq_state *state = instance->state;
int ret = 0;
- if (mutex_lock_killable(&state->mutex))
- return -EAGAIN;
+ mutex_lock(&state->mutex);
/* Remove all services */
vchiq_shutdown_internal(state, instance);
@@ -982,39 +1011,6 @@ exit:
}
int
-vchiq_platform_init_state(struct vchiq_state *state)
-{
- struct vchiq_arm_state *platform_state;
- char threadname[16];
-
- platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL);
- if (!platform_state)
- return -ENOMEM;
-
- snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
- state->id);
- platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
- (void *)state, threadname);
- if (IS_ERR(platform_state->ka_thread)) {
- dev_err(state->dev, "couldn't create thread %s\n", threadname);
- return PTR_ERR(platform_state->ka_thread);
- }
-
- rwlock_init(&platform_state->susp_res_lock);
-
- init_completion(&platform_state->ka_evt);
- atomic_set(&platform_state->ka_use_count, 0);
- atomic_set(&platform_state->ka_use_ack_count, 0);
- atomic_set(&platform_state->ka_release_count, 0);
-
- platform_state->state = state;
-
- state->platform_state = (struct opaque_platform_state *)platform_state;
-
- return 0;
-}
-
-int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
@@ -1329,19 +1325,37 @@ out:
return ret;
}
-void vchiq_platform_connected(struct vchiq_state *state)
-{
- struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
-
- wake_up_process(arm_state->ka_thread);
-}
-
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate)
{
+ struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
+ char threadname[16];
+
dev_dbg(state->dev, "suspend: %d: %s->%s\n",
state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
+ return;
+
+ write_lock_bh(&arm_state->susp_res_lock);
+ if (arm_state->first_connect) {
+ write_unlock_bh(&arm_state->susp_res_lock);
+ return;
+ }
+
+ arm_state->first_connect = 1;
+ write_unlock_bh(&arm_state->susp_res_lock);
+ snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
+ state->id);
+ arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
+ (void *)state,
+ threadname);
+ if (IS_ERR(arm_state->ka_thread)) {
+ dev_err(state->dev, "suspend: Couldn't create thread %s\n",
+ threadname);
+ } else {
+ wake_up_process(arm_state->ka_thread);
+ }
}
static const struct of_device_id vchiq_of_match[] = {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index e7b0c800a205..e2cac0898b8f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -3343,7 +3343,6 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
- vchiq_platform_connected(state);
complete(&state->connect);
}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 3b5c0618e567..9b4e766990a4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -575,8 +575,6 @@ int vchiq_send_remote_use(struct vchiq_state *state);
int vchiq_send_remote_use_active(struct vchiq_state *state);
-void vchiq_platform_connected(struct vchiq_state *state);
-
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
enum vchiq_connstate newstate);
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 9bff21068721..c2fbdb534f61 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -408,7 +408,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
return ret;
}
-static struct thermal_zone_device_ops legacy_ops = {
+static const struct thermal_zone_device_ops legacy_ops = {
.get_temp = armada_get_temp_legacy,
};
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index 2077e85ef5ca..a8d4b766ba21 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -137,7 +137,7 @@ static int da9062_thermal_get_temp(struct thermal_zone_device *z,
return 0;
}
-static struct thermal_zone_device_ops da9062_thermal_ops = {
+static const struct thermal_zone_device_ops da9062_thermal_ops = {
.get_temp = da9062_thermal_get_temp,
};
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index f9157a47156b..723bc72f0626 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -106,7 +106,7 @@ static int dove_get_temp(struct thermal_zone_device *thermal,
return 0;
}
-static struct thermal_zone_device_ops ops = {
+static const struct thermal_zone_device_ops ops = {
.get_temp = dove_get_temp,
};
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index bab52e6b3b15..38c993d1bcb3 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -361,7 +361,7 @@ static bool imx_should_bind(struct thermal_zone_device *tz,
return trip->type == THERMAL_TRIP_PASSIVE;
}
-static struct thermal_zone_device_ops imx_tz_ops = {
+static const struct thermal_zone_device_ops imx_tz_ops = {
.should_bind = imx_should_bind,
.get_temp = imx_get_temp,
.change_mode = imx_change_mode,
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 0e07693ecf59..908cc1bf57f1 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -515,7 +515,7 @@ eval_odvp:
return result;
}
-static struct thermal_zone_device_ops int3400_thermal_ops = {
+static const struct thermal_zone_device_ops int3400_thermal_ops = {
.get_temp = int3400_thermal_get_temp,
.change_mode = int3400_thermal_change_mode,
};
@@ -690,6 +690,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INTC1068", 0},
{"INTC10A0", 0},
{"INTC10D4", 0},
+ {"INTC10FC", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 5a925a8df7b3..ba63796761eb 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -276,6 +276,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INTC1069", 0},
{"INTC10A1", 0},
{"INTC10D5", 0},
+ {"INTC10FD", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c b/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c
index 2d6504514893..0ccc72c93499 100644
--- a/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c
+++ b/drivers/thermal/intel/int340x_thermal/platform_temperature_control.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/debugfs.h>
#include <linux/pci.h>
#include "processor_thermal_device.h"
@@ -49,14 +50,16 @@ struct mmio_reg {
};
#define MAX_ATTR_GROUP_NAME_LEN 32
-#define PTC_MAX_ATTRS 3
+#define PTC_MAX_ATTRS 4
struct ptc_data {
u32 offset;
+ struct pci_dev *pdev;
struct attribute_group ptc_attr_group;
struct attribute *ptc_attrs[PTC_MAX_ATTRS];
struct device_attribute temperature_target_attr;
struct device_attribute enable_attr;
+ struct device_attribute thermal_tolerance_attr;
char group_name[MAX_ATTR_GROUP_NAME_LEN];
};
@@ -78,6 +81,7 @@ static u32 ptc_offsets[PTC_MAX_INSTANCES] = {0x5B20, 0x5B28, 0x5B30};
static const char * const ptc_strings[] = {
"temperature_target",
"enable",
+ "thermal_tolerance",
NULL
};
@@ -177,6 +181,8 @@ PTC_SHOW(temperature_target);
PTC_STORE(temperature_target);
PTC_SHOW(enable);
PTC_STORE(enable);
+PTC_SHOW(thermal_tolerance);
+PTC_STORE(thermal_tolerance);
#define ptc_init_attribute(_name)\
do {\
@@ -193,9 +199,11 @@ static int ptc_create_groups(struct pci_dev *pdev, int instance, struct ptc_data
ptc_init_attribute(temperature_target);
ptc_init_attribute(enable);
+ ptc_init_attribute(thermal_tolerance);
data->ptc_attrs[index++] = &data->temperature_target_attr.attr;
data->ptc_attrs[index++] = &data->enable_attr.attr;
+ data->ptc_attrs[index++] = &data->thermal_tolerance_attr.attr;
data->ptc_attrs[index] = NULL;
snprintf(data->group_name, MAX_ATTR_GROUP_NAME_LEN,
@@ -209,6 +217,63 @@ static int ptc_create_groups(struct pci_dev *pdev, int instance, struct ptc_data
}
static struct ptc_data ptc_instance[PTC_MAX_INSTANCES];
+static struct dentry *ptc_debugfs;
+
+#define PTC_TEMP_OVERRIDE_ENABLE_INDEX 4
+#define PTC_TEMP_OVERRIDE_INDEX 5
+
+static ssize_t ptc_temperature_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct ptc_data *ptc_instance = file->private_data;
+ struct pci_dev *pdev = ptc_instance->pdev;
+ char buf[32];
+ ssize_t len;
+ u32 value;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtouint(buf, 0, &value))
+ return -EINVAL;
+
+ if (ptc_mmio_regs[PTC_TEMP_OVERRIDE_INDEX].units)
+ value /= ptc_mmio_regs[PTC_TEMP_OVERRIDE_INDEX].units;
+
+ if (value > ptc_mmio_regs[PTC_TEMP_OVERRIDE_INDEX].mask)
+ return -EINVAL;
+
+ if (!value) {
+ ptc_mmio_write(pdev, ptc_instance->offset, PTC_TEMP_OVERRIDE_ENABLE_INDEX, 0);
+ } else {
+ ptc_mmio_write(pdev, ptc_instance->offset, PTC_TEMP_OVERRIDE_INDEX, value);
+ ptc_mmio_write(pdev, ptc_instance->offset, PTC_TEMP_OVERRIDE_ENABLE_INDEX, 1);
+ }
+
+ return count;
+}
+
+static const struct file_operations ptc_fops = {
+ .open = simple_open,
+ .write = ptc_temperature_write,
+ .llseek = generic_file_llseek,
+};
+
+static void ptc_create_debugfs(void)
+{
+ ptc_debugfs = debugfs_create_dir("platform_temperature_control", NULL);
+
+ debugfs_create_file("temperature_0", 0200, ptc_debugfs, &ptc_instance[0], &ptc_fops);
+ debugfs_create_file("temperature_1", 0200, ptc_debugfs, &ptc_instance[1], &ptc_fops);
+ debugfs_create_file("temperature_2", 0200, ptc_debugfs, &ptc_instance[2], &ptc_fops);
+}
+
+static void ptc_delete_debugfs(void)
+{
+ debugfs_remove_recursive(ptc_debugfs);
+}
int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
{
@@ -217,8 +282,11 @@ int proc_thermal_ptc_add(struct pci_dev *pdev, struct proc_thermal_device *proc_
for (i = 0; i < PTC_MAX_INSTANCES; i++) {
ptc_instance[i].offset = ptc_offsets[i];
+ ptc_instance[i].pdev = pdev;
ptc_create_groups(pdev, i, &ptc_instance[i]);
}
+
+ ptc_create_debugfs();
}
return 0;
@@ -234,6 +302,8 @@ void proc_thermal_ptc_remove(struct pci_dev *pdev)
for (i = 0; i < PTC_MAX_INSTANCES; i++)
sysfs_remove_group(&pdev->dev.kobj, &ptc_instance[i].ptc_attr_group);
+
+ ptc_delete_debugfs();
}
}
EXPORT_SYMBOL_GPL(proc_thermal_ptc_remove);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 9a6ca43b6fa2..49398794124a 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -31,6 +31,7 @@
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
#define PCI_DEVICE_ID_INTEL_PTL_THERMAL 0xB01D
+#define PCI_DEVICE_ID_INTEL_WCL_THERMAL 0xFD1D
struct power_config {
u32 index;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 00160936070a..d4d7e8e147d2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -499,6 +499,10 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_DVFS |
PROC_THERMAL_FEATURE_MSI_SUPPORT | PROC_THERMAL_FEATURE_WT_HINT |
PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) },
+ { PCI_DEVICE_DATA(INTEL, WCL_THERMAL, PROC_THERMAL_FEATURE_MSI_SUPPORT |
+ PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_DLVR |
+ PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_HINT |
+ PROC_THERMAL_FEATURE_POWER_FLOOR | PROC_THERMAL_FEATURE_PTC) },
{ },
};
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
index 3a028b78d9af..1f3d22b659db 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
@@ -442,6 +442,7 @@ int proc_thermal_rfim_add(struct pci_dev *pdev, struct proc_thermal_device *proc
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_LNLM_THERMAL:
case PCI_DEVICE_ID_INTEL_PTL_THERMAL:
+ case PCI_DEVICE_ID_INTEL_WCL_THERMAL:
dlvr_mmio_regs_table = lnl_dlvr_mmio_regs;
dlvr_mapping = lnl_dlvr_mapping;
break;
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 7c2265231668..4619e090f756 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -48,7 +48,7 @@ static int kirkwood_get_temp(struct thermal_zone_device *thermal,
return 0;
}
-static struct thermal_zone_device_ops ops = {
+static const struct thermal_zone_device_ops ops = {
.get_temp = kirkwood_get_temp,
};
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
index 2d6b75b0539f..ea4dd2fb1f47 100644
--- a/drivers/thermal/loongson2_thermal.c
+++ b/drivers/thermal/loongson2_thermal.c
@@ -112,13 +112,19 @@ static int loongson2_thermal_set_trips(struct thermal_zone_device *tz, int low,
return loongson2_thermal_set(data, low/MILLI, high/MILLI, true);
}
-static struct thermal_zone_device_ops loongson2_of_thermal_ops = {
+static const struct thermal_zone_device_ops loongson2_2k1000_of_thermal_ops = {
.get_temp = loongson2_2k1000_get_temp,
.set_trips = loongson2_thermal_set_trips,
};
+static const struct thermal_zone_device_ops loongson2_2k2000_of_thermal_ops = {
+ .get_temp = loongson2_2k2000_get_temp,
+ .set_trips = loongson2_thermal_set_trips,
+};
+
static int loongson2_thermal_probe(struct platform_device *pdev)
{
+ const struct thermal_zone_device_ops *thermal_ops;
struct device *dev = &pdev->dev;
struct loongson2_thermal_data *data;
struct thermal_zone_device *tzd;
@@ -140,7 +146,9 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
if (IS_ERR(data->temp_reg))
return PTR_ERR(data->temp_reg);
- loongson2_of_thermal_ops.get_temp = loongson2_2k2000_get_temp;
+ thermal_ops = &loongson2_2k2000_of_thermal_ops;
+ } else {
+ thermal_ops = &loongson2_2k1000_of_thermal_ops;
}
irq = platform_get_irq(pdev, 0);
@@ -152,8 +160,7 @@ static int loongson2_thermal_probe(struct platform_device *pdev)
loongson2_thermal_set(data, 0, 0, false);
for (i = 0; i <= LOONGSON2_MAX_SENSOR_SEL_NUM; i++) {
- tzd = devm_thermal_of_zone_register(dev, i, data,
- &loongson2_of_thermal_ops);
+ tzd = devm_thermal_of_zone_register(dev, i, data, thermal_ops);
if (!IS_ERR(tzd))
break;
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 985925147ac0..f4d1e66d7db9 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -125,7 +125,11 @@ struct lvts_ctrl_data {
struct lvts_data {
const struct lvts_ctrl_data *lvts_ctrl;
+ const u32 *conn_cmd;
+ const u32 *init_cmd;
int num_lvts_ctrl;
+ int num_conn_cmd;
+ int num_init_cmd;
int temp_factor;
int temp_offset;
int gt_calib_bit_offset;
@@ -571,7 +575,7 @@ static irqreturn_t lvts_irq_handler(int irq, void *data)
return iret;
}
-static struct thermal_zone_device_ops lvts_ops = {
+static const struct thermal_zone_device_ops lvts_ops = {
.get_temp = lvts_get_temp,
.set_trips = lvts_set_trips,
};
@@ -902,7 +906,7 @@ static void lvts_ctrl_monitor_enable(struct device *dev, struct lvts_ctrl *lvts_
* each write in the configuration register must be separated by a
* delay of 2 us.
*/
-static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, u32 *cmds, int nr_cmds)
+static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, const u32 *cmds, int nr_cmds)
{
int i;
@@ -985,9 +989,10 @@ static int lvts_ctrl_set_enable(struct lvts_ctrl *lvts_ctrl, int enable)
static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
- u32 id, cmds[] = { 0xC103FFFF, 0xC502FF55 };
+ const struct lvts_data *lvts_data = lvts_ctrl->lvts_data;
+ u32 id;
- lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
+ lvts_write_config(lvts_ctrl, lvts_data->conn_cmd, lvts_data->num_conn_cmd);
/*
* LVTS_ID : Get ID and status of the thermal controller
@@ -1006,17 +1011,9 @@ static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl)
static int lvts_ctrl_initialize(struct device *dev, struct lvts_ctrl *lvts_ctrl)
{
- /*
- * Write device mask: 0xC1030000
- */
- u32 cmds[] = {
- 0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1,
- 0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300,
- 0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC,
- 0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1
- };
+ const struct lvts_data *lvts_data = lvts_ctrl->lvts_data;
- lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
+ lvts_write_config(lvts_ctrl, lvts_data->init_cmd, lvts_data->num_init_cmd);
return 0;
}
@@ -1445,6 +1442,25 @@ static int lvts_resume(struct device *dev)
return 0;
}
+static const u32 default_conn_cmds[] = { 0xC103FFFF, 0xC502FF55 };
+static const u32 mt7988_conn_cmds[] = { 0xC103FFFF, 0xC502FC55 };
+
+/*
+ * Write device mask: 0xC1030000
+ */
+static const u32 default_init_cmds[] = {
+ 0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1,
+ 0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300,
+ 0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC,
+ 0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1
+};
+
+static const u32 mt7988_init_cmds[] = {
+ 0xC1030300, 0xC1030420, 0xC1030500, 0xC10307A6, 0xC1030CFC,
+ 0xC1030A8C, 0xC103098D, 0xC10308F1, 0xC1030B04, 0xC1030E01,
+ 0xC10306B8
+};
+
/*
* The MT8186 calibration data is stored as packed 3-byte little-endian
* values using a weird layout that makes sense only when viewed as a 32-bit
@@ -1739,7 +1755,11 @@ static const struct lvts_ctrl_data mt8195_lvts_ap_data_ctrl[] = {
static const struct lvts_data mt7988_lvts_ap_data = {
.lvts_ctrl = mt7988_lvts_ap_data_ctrl,
+ .conn_cmd = mt7988_conn_cmds,
+ .init_cmd = mt7988_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt7988_lvts_ap_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(mt7988_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(mt7988_init_cmds),
.temp_factor = LVTS_COEFF_A_MT7988,
.temp_offset = LVTS_COEFF_B_MT7988,
.gt_calib_bit_offset = 24,
@@ -1747,7 +1767,11 @@ static const struct lvts_data mt7988_lvts_ap_data = {
static const struct lvts_data mt8186_lvts_data = {
.lvts_ctrl = mt8186_lvts_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8186_lvts_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT7988,
.temp_offset = LVTS_COEFF_B_MT7988,
.gt_calib_bit_offset = 24,
@@ -1756,7 +1780,11 @@ static const struct lvts_data mt8186_lvts_data = {
static const struct lvts_data mt8188_lvts_mcu_data = {
.lvts_ctrl = mt8188_lvts_mcu_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8188_lvts_mcu_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 20,
@@ -1765,7 +1793,11 @@ static const struct lvts_data mt8188_lvts_mcu_data = {
static const struct lvts_data mt8188_lvts_ap_data = {
.lvts_ctrl = mt8188_lvts_ap_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8188_lvts_ap_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 20,
@@ -1774,7 +1806,11 @@ static const struct lvts_data mt8188_lvts_ap_data = {
static const struct lvts_data mt8192_lvts_mcu_data = {
.lvts_ctrl = mt8192_lvts_mcu_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
@@ -1783,7 +1819,11 @@ static const struct lvts_data mt8192_lvts_mcu_data = {
static const struct lvts_data mt8192_lvts_ap_data = {
.lvts_ctrl = mt8192_lvts_ap_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_ap_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
@@ -1792,7 +1832,11 @@ static const struct lvts_data mt8192_lvts_ap_data = {
static const struct lvts_data mt8195_lvts_mcu_data = {
.lvts_ctrl = mt8195_lvts_mcu_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_mcu_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
@@ -1801,7 +1845,11 @@ static const struct lvts_data mt8195_lvts_mcu_data = {
static const struct lvts_data mt8195_lvts_ap_data = {
.lvts_ctrl = mt8195_lvts_ap_data_ctrl,
+ .conn_cmd = default_conn_cmds,
+ .init_cmd = default_init_cmds,
.num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_ap_data_ctrl),
+ .num_conn_cmd = ARRAY_SIZE(default_conn_cmds),
+ .num_init_cmd = ARRAY_SIZE(default_init_cmds),
.temp_factor = LVTS_COEFF_A_MT8195,
.temp_offset = LVTS_COEFF_B_MT8195,
.gt_calib_bit_offset = 24,
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index 991d1573983d..75eaa9a68ab8 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -209,8 +209,7 @@ static int lmh_probe(struct platform_device *pdev)
}
lmh_data->irq = platform_get_irq(pdev, 0);
- lmh_data->domain = irq_domain_create_linear(of_fwnode_handle(np), 1, &lmh_irq_ops,
- lmh_data);
+ lmh_data->domain = irq_domain_create_linear(dev_fwnode(dev), 1, &lmh_irq_ops, lmh_data);
if (!lmh_data->domain) {
dev_err(dev, "Error adding irq_domain\n");
return -EINVAL;
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index a81e7d6e865f..f39ca0ddd17b 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -16,31 +18,51 @@
#include "../thermal_hwmon.h"
+#define QPNP_TM_REG_DIG_MINOR 0x00
#define QPNP_TM_REG_DIG_MAJOR 0x01
#define QPNP_TM_REG_TYPE 0x04
#define QPNP_TM_REG_SUBTYPE 0x05
#define QPNP_TM_REG_STATUS 0x08
+#define QPNP_TM_REG_IRQ_STATUS 0x10
#define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40
#define QPNP_TM_REG_ALARM_CTRL 0x46
+/* TEMP_DAC_STGx registers are only present for TEMP_GEN2 v2.0 */
+#define QPNP_TM_REG_TEMP_DAC_STG1 0x47
+#define QPNP_TM_REG_TEMP_DAC_STG2 0x48
+#define QPNP_TM_REG_TEMP_DAC_STG3 0x49
+#define QPNP_TM_REG_LITE_TEMP_CFG1 0x50
+#define QPNP_TM_REG_LITE_TEMP_CFG2 0x51
+
#define QPNP_TM_TYPE 0x09
#define QPNP_TM_SUBTYPE_GEN1 0x08
#define QPNP_TM_SUBTYPE_GEN2 0x09
+#define QPNP_TM_SUBTYPE_LITE 0xC0
#define STATUS_GEN1_STAGE_MASK GENMASK(1, 0)
#define STATUS_GEN2_STATE_MASK GENMASK(6, 4)
-#define STATUS_GEN2_STATE_SHIFT 4
-#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6)
+/* IRQ status only needed for TEMP_ALARM_LITE */
+#define IRQ_STATUS_MASK BIT(0)
+
+#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2 BIT(6)
#define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0)
#define SHUTDOWN_CTRL1_RATE_25HZ BIT(3)
#define ALARM_CTRL_FORCE_ENABLE BIT(7)
+#define LITE_TEMP_CFG_THRESHOLD_MASK GENMASK(3, 2)
+
#define THRESH_COUNT 4
#define STAGE_COUNT 3
+enum overtemp_stage {
+ STAGE1 = 0,
+ STAGE2,
+ STAGE3,
+};
+
/* Over-temperature trip point values in mC */
static const long temp_map_gen1[THRESH_COUNT][STAGE_COUNT] = {
{ 105000, 125000, 145000 },
@@ -63,24 +85,68 @@ static const long temp_map_gen2_v1[THRESH_COUNT][STAGE_COUNT] = {
#define TEMP_STAGE_HYSTERESIS 2000
+/*
+ * For TEMP_GEN2 v2.0, TEMP_DAC_STG1/2/3 registers are used to set the threshold
+ * for each stage independently.
+ * TEMP_DAC_STG* = 0 --> 80 C
+ * Each 8 step increase in TEMP_DAC_STG* value corresponds to 5 C (5000 mC).
+ */
+#define TEMP_DAC_MIN 80000
+#define TEMP_DAC_SCALE_NUM 8
+#define TEMP_DAC_SCALE_DEN 5000
+
+#define TEMP_DAC_TEMP_TO_REG(temp) \
+ (((temp) - TEMP_DAC_MIN) * TEMP_DAC_SCALE_NUM / TEMP_DAC_SCALE_DEN)
+#define TEMP_DAC_REG_TO_TEMP(reg) \
+ (TEMP_DAC_MIN + (reg) * TEMP_DAC_SCALE_DEN / TEMP_DAC_SCALE_NUM)
+
+static const long temp_dac_max[STAGE_COUNT] = {
+ 119375, 159375, 159375
+};
+
+/*
+ * TEMP_ALARM_LITE has two stages: warning and shutdown with independently
+ * configured threshold temperatures.
+ */
+
+static const long temp_lite_warning_map[THRESH_COUNT] = {
+ 115000, 125000, 135000, 145000
+};
+
+static const long temp_lite_shutdown_map[THRESH_COUNT] = {
+ 135000, 145000, 160000, 175000
+};
+
/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
#define DEFAULT_TEMP 37000
+struct qpnp_tm_chip;
+
+struct spmi_temp_alarm_data {
+ const struct thermal_zone_device_ops *ops;
+ const long (*temp_map)[THRESH_COUNT][STAGE_COUNT];
+ int (*sync_thresholds)(struct qpnp_tm_chip *chip);
+ int (*get_temp_stage)(struct qpnp_tm_chip *chip);
+ int (*configure_trip_temps)(struct qpnp_tm_chip *chip);
+};
+
struct qpnp_tm_chip {
struct regmap *map;
struct device *dev;
struct thermal_zone_device *tz_dev;
+ const struct spmi_temp_alarm_data *data;
unsigned int subtype;
long temp;
- unsigned int thresh;
unsigned int stage;
unsigned int base;
+ unsigned int ntrips;
/* protects .thresh, .stage and chip registers */
struct mutex lock;
bool initialized;
+ bool require_stage2_shutdown;
+ long temp_thresh_map[STAGE_COUNT];
struct iio_channel *adc;
- const long (*temp_map)[THRESH_COUNT][STAGE_COUNT];
};
/* This array maps from GEN2 alarm state to GEN1 alarm stage */
@@ -114,34 +180,66 @@ static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data)
*/
static long qpnp_tm_decode_temp(struct qpnp_tm_chip *chip, unsigned int stage)
{
- if (!chip->temp_map || chip->thresh >= THRESH_COUNT || stage == 0 ||
- stage > STAGE_COUNT)
+ if (stage == 0 || stage > STAGE_COUNT)
return 0;
- return (*chip->temp_map)[chip->thresh][stage - 1];
+ return chip->temp_thresh_map[stage - 1];
}
/**
- * qpnp_tm_get_temp_stage() - return over-temperature stage
+ * qpnp_tm_gen1_get_temp_stage() - return over-temperature stage
* @chip: Pointer to the qpnp_tm chip
*
- * Return: stage (GEN1) or state (GEN2) on success, or errno on failure.
+ * Return: stage on success, or errno on failure.
*/
-static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip)
+static int qpnp_tm_gen1_get_temp_stage(struct qpnp_tm_chip *chip)
{
int ret;
- u8 reg = 0;
+ u8 reg;
ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
if (ret < 0)
return ret;
- if (chip->subtype == QPNP_TM_SUBTYPE_GEN1)
- ret = reg & STATUS_GEN1_STAGE_MASK;
- else
- ret = (reg & STATUS_GEN2_STATE_MASK) >> STATUS_GEN2_STATE_SHIFT;
+ return FIELD_GET(STATUS_GEN1_STAGE_MASK, reg);
+}
- return ret;
+/**
+ * qpnp_tm_gen2_get_temp_stage() - return over-temperature stage
+ * @chip: Pointer to the qpnp_tm chip
+ *
+ * Return: stage on success, or errno on failure.
+ */
+static int qpnp_tm_gen2_get_temp_stage(struct qpnp_tm_chip *chip)
+{
+ int ret;
+ u8 reg;
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(STATUS_GEN2_STATE_MASK, reg);
+
+ return alarm_state_map[ret];
+}
+
+/**
+ * qpnp_tm_lite_get_temp_stage() - return over-temperature stage
+ * @chip: Pointer to the qpnp_tm chip
+ *
+ * Return: alarm interrupt state on success, or errno on failure.
+ */
+static int qpnp_tm_lite_get_temp_stage(struct qpnp_tm_chip *chip)
+{
+ u8 reg = 0;
+ int ret;
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_IRQ_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ return FIELD_GET(IRQ_STATUS_MASK, reg);
}
/*
@@ -150,23 +248,16 @@ static int qpnp_tm_get_temp_stage(struct qpnp_tm_chip *chip)
*/
static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
{
- unsigned int stage, stage_new, stage_old;
+ unsigned int stage_new, stage_old;
int ret;
WARN_ON(!mutex_is_locked(&chip->lock));
- ret = qpnp_tm_get_temp_stage(chip);
+ ret = chip->data->get_temp_stage(chip);
if (ret < 0)
return ret;
- stage = ret;
-
- if (chip->subtype == QPNP_TM_SUBTYPE_GEN1) {
- stage_new = stage;
- stage_old = chip->stage;
- } else {
- stage_new = alarm_state_map[stage];
- stage_old = alarm_state_map[chip->stage];
- }
+ stage_new = ret;
+ stage_old = chip->stage;
if (stage_new > stage_old) {
/* increasing stage, use lower bound */
@@ -178,7 +269,7 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
- TEMP_STAGE_HYSTERESIS;
}
- chip->stage = stage;
+ chip->stage = stage_new;
return 0;
}
@@ -218,35 +309,35 @@ static int qpnp_tm_get_temp(struct thermal_zone_device *tz, int *temp)
static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
int temp)
{
- long stage2_threshold_min = (*chip->temp_map)[THRESH_MIN][1];
- long stage2_threshold_max = (*chip->temp_map)[THRESH_MAX][1];
- bool disable_s2_shutdown = false;
- u8 reg;
+ long stage2_threshold_min = (*chip->data->temp_map)[THRESH_MIN][STAGE2];
+ long stage2_threshold_max = (*chip->data->temp_map)[THRESH_MAX][STAGE2];
+ bool disable_stage2_shutdown = false;
+ u8 reg, threshold;
WARN_ON(!mutex_is_locked(&chip->lock));
/*
- * Default: S2 and S3 shutdown enabled, thresholds at
+ * Default: Stage 2 and Stage 3 shutdown enabled, thresholds at
* lowest threshold set, monitoring at 25Hz
*/
reg = SHUTDOWN_CTRL1_RATE_25HZ;
if (temp == THERMAL_TEMP_INVALID ||
temp < stage2_threshold_min) {
- chip->thresh = THRESH_MIN;
+ threshold = THRESH_MIN;
goto skip;
}
if (temp <= stage2_threshold_max) {
- chip->thresh = THRESH_MAX -
+ threshold = THRESH_MAX -
((stage2_threshold_max - temp) /
TEMP_THRESH_STEP);
- disable_s2_shutdown = true;
+ disable_stage2_shutdown = true;
} else {
- chip->thresh = THRESH_MAX;
+ threshold = THRESH_MAX;
if (chip->adc)
- disable_s2_shutdown = true;
+ disable_stage2_shutdown = true;
else
dev_warn(chip->dev,
"No ADC is configured and critical temperature %d mC is above the maximum stage 2 threshold of %ld mC! Configuring stage 2 shutdown at %ld mC.\n",
@@ -254,9 +345,11 @@ static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
}
skip:
- reg |= chip->thresh;
- if (disable_s2_shutdown)
- reg |= SHUTDOWN_CTRL1_OVERRIDE_S2;
+ memcpy(chip->temp_thresh_map, chip->data->temp_map[threshold],
+ sizeof(chip->temp_thresh_map));
+ reg |= threshold;
+ if (disable_stage2_shutdown && !chip->require_stage2_shutdown)
+ reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2;
return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
}
@@ -282,6 +375,146 @@ static const struct thermal_zone_device_ops qpnp_tm_sensor_ops = {
.set_trip_temp = qpnp_tm_set_trip_temp,
};
+static int qpnp_tm_gen2_rev2_set_temp_thresh(struct qpnp_tm_chip *chip, unsigned int trip, int temp)
+{
+ int ret, temp_cfg;
+ u8 reg;
+
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
+ if (trip >= STAGE_COUNT) {
+ dev_err(chip->dev, "invalid TEMP_DAC trip = %d\n", trip);
+ return -EINVAL;
+ } else if (temp < TEMP_DAC_MIN || temp > temp_dac_max[trip]) {
+ dev_err(chip->dev, "invalid TEMP_DAC temp = %d\n", temp);
+ return -EINVAL;
+ }
+
+ reg = TEMP_DAC_TEMP_TO_REG(temp);
+ temp_cfg = TEMP_DAC_REG_TO_TEMP(reg);
+
+ ret = qpnp_tm_write(chip, QPNP_TM_REG_TEMP_DAC_STG1 + trip, reg);
+ if (ret < 0) {
+ dev_err(chip->dev, "TEMP_DAC_STG write failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ chip->temp_thresh_map[trip] = temp_cfg;
+
+ return 0;
+}
+
+static int qpnp_tm_gen2_rev2_set_trip_temp(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip, int temp)
+{
+ unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv);
+ struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = qpnp_tm_gen2_rev2_set_temp_thresh(chip, trip_index, temp);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static const struct thermal_zone_device_ops qpnp_tm_gen2_rev2_sensor_ops = {
+ .get_temp = qpnp_tm_get_temp,
+ .set_trip_temp = qpnp_tm_gen2_rev2_set_trip_temp,
+};
+
+static int qpnp_tm_lite_set_temp_thresh(struct qpnp_tm_chip *chip, unsigned int trip, int temp)
+{
+ int ret, temp_cfg, i;
+ const long *temp_map;
+ u8 reg, thresh;
+ u16 addr;
+
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
+ if (trip >= STAGE_COUNT) {
+ dev_err(chip->dev, "invalid TEMP_LITE trip = %d\n", trip);
+ return -EINVAL;
+ }
+
+ switch (trip) {
+ case 0:
+ temp_map = temp_lite_warning_map;
+ addr = QPNP_TM_REG_LITE_TEMP_CFG1;
+ break;
+ case 1:
+ /*
+ * The second trip point is purely in software to facilitate
+ * a controlled shutdown after the warning threshold is crossed
+ * but before the automatic hardware shutdown threshold is
+ * crossed.
+ */
+ return 0;
+ case 2:
+ temp_map = temp_lite_shutdown_map;
+ addr = QPNP_TM_REG_LITE_TEMP_CFG2;
+ break;
+ default:
+ return 0;
+ }
+
+ if (temp < temp_map[THRESH_MIN] || temp > temp_map[THRESH_MAX]) {
+ dev_err(chip->dev, "invalid TEMP_LITE temp = %d\n", temp);
+ return -EINVAL;
+ }
+
+ thresh = 0;
+ temp_cfg = temp_map[thresh];
+ for (i = THRESH_MAX; i >= THRESH_MIN; i--) {
+ if (temp >= temp_map[i]) {
+ thresh = i;
+ temp_cfg = temp_map[i];
+ break;
+ }
+ }
+
+ if (temp_cfg == chip->temp_thresh_map[trip])
+ return 0;
+
+ ret = qpnp_tm_read(chip, addr, &reg);
+ if (ret < 0) {
+ dev_err(chip->dev, "LITE_TEMP_CFG read failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ reg &= ~LITE_TEMP_CFG_THRESHOLD_MASK;
+ reg |= FIELD_PREP(LITE_TEMP_CFG_THRESHOLD_MASK, thresh);
+
+ ret = qpnp_tm_write(chip, addr, reg);
+ if (ret < 0) {
+ dev_err(chip->dev, "LITE_TEMP_CFG write failed, ret=%d\n", ret);
+ return ret;
+ }
+
+ chip->temp_thresh_map[trip] = temp_cfg;
+
+ return 0;
+}
+
+static int qpnp_tm_lite_set_trip_temp(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip, int temp)
+{
+ unsigned int trip_index = THERMAL_TRIP_PRIV_TO_INT(trip->priv);
+ struct qpnp_tm_chip *chip = thermal_zone_device_priv(tz);
+ int ret;
+
+ mutex_lock(&chip->lock);
+ ret = qpnp_tm_lite_set_temp_thresh(chip, trip_index, temp);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static const struct thermal_zone_device_ops qpnp_tm_lite_sensor_ops = {
+ .get_temp = qpnp_tm_get_temp,
+ .set_trip_temp = qpnp_tm_lite_set_trip_temp,
+};
+
static irqreturn_t qpnp_tm_isr(int irq, void *data)
{
struct qpnp_tm_chip *chip = data;
@@ -291,49 +524,227 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
return IRQ_HANDLED;
}
-/*
- * This function initializes the internal temp value based on only the
- * current thermal stage and threshold. Setup threshold control and
- * disable shutdown override.
- */
-static int qpnp_tm_init(struct qpnp_tm_chip *chip)
+/* Read the hardware default stage threshold temperatures */
+static int qpnp_tm_sync_thresholds(struct qpnp_tm_chip *chip)
{
- unsigned int stage;
+ u8 reg, threshold;
int ret;
+
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
+ if (ret < 0)
+ return ret;
+
+ threshold = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+ memcpy(chip->temp_thresh_map, chip->data->temp_map[threshold],
+ sizeof(chip->temp_thresh_map));
+
+ return ret;
+}
+
+static int qpnp_tm_configure_trip_temp(struct qpnp_tm_chip *chip)
+{
+ int crit_temp, ret;
+
+ ret = thermal_zone_get_crit_temp(chip->tz_dev, &crit_temp);
+ if (ret)
+ crit_temp = THERMAL_TEMP_INVALID;
+
+ mutex_lock(&chip->lock);
+ ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+/* Configure TEMP_DAC registers based on DT thermal_zone trips */
+static int qpnp_tm_gen2_rev2_configure_trip_temps_cb(struct thermal_trip *trip, void *data)
+{
+ struct qpnp_tm_chip *chip = data;
+ int ret;
+
+ mutex_lock(&chip->lock);
+ trip->priv = THERMAL_INT_TO_TRIP_PRIV(chip->ntrips);
+ ret = qpnp_tm_gen2_rev2_set_temp_thresh(chip, chip->ntrips, trip->temperature);
+ chip->ntrips++;
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
+static int qpnp_tm_gen2_rev2_configure_trip_temps(struct qpnp_tm_chip *chip)
+{
+ int ret, i;
+
+ ret = thermal_zone_for_each_trip(chip->tz_dev,
+ qpnp_tm_gen2_rev2_configure_trip_temps_cb, chip);
+ if (ret < 0)
+ return ret;
+
+ /* Verify that trips are strictly increasing. */
+ for (i = 1; i < STAGE_COUNT; i++) {
+ if (chip->temp_thresh_map[i] <= chip->temp_thresh_map[i - 1]) {
+ dev_err(chip->dev, "Threshold %d=%ld <= threshold %d=%ld\n",
+ i, chip->temp_thresh_map[i], i - 1,
+ chip->temp_thresh_map[i - 1]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Read the hardware default TEMP_DAC stage threshold temperatures */
+static int qpnp_tm_gen2_rev2_sync_thresholds(struct qpnp_tm_chip *chip)
+{
+ int ret, i;
u8 reg = 0;
- int crit_temp;
+
+ for (i = 0; i < STAGE_COUNT; i++) {
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_TEMP_DAC_STG1 + i, &reg);
+ if (ret < 0)
+ return ret;
+
+ chip->temp_thresh_map[i] = TEMP_DAC_REG_TO_TEMP(reg);
+ }
+
+ return 0;
+}
+
+/* Configure TEMP_LITE registers based on DT thermal_zone trips */
+static int qpnp_tm_lite_configure_trip_temps_cb(struct thermal_trip *trip, void *data)
+{
+ struct qpnp_tm_chip *chip = data;
+ int ret;
mutex_lock(&chip->lock);
+ trip->priv = THERMAL_INT_TO_TRIP_PRIV(chip->ntrips);
+ ret = qpnp_tm_lite_set_temp_thresh(chip, chip->ntrips, trip->temperature);
+ chip->ntrips++;
+ mutex_unlock(&chip->lock);
- ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
+ return ret;
+}
+
+static int qpnp_tm_lite_configure_trip_temps(struct qpnp_tm_chip *chip)
+{
+ int ret;
+
+ ret = thermal_zone_for_each_trip(chip->tz_dev, qpnp_tm_lite_configure_trip_temps_cb, chip);
+ if (ret < 0)
+ return ret;
+
+ /* Verify that trips are strictly increasing. */
+ if (chip->temp_thresh_map[2] <= chip->temp_thresh_map[0]) {
+ dev_err(chip->dev, "Threshold 2=%ld <= threshold 0=%ld\n",
+ chip->temp_thresh_map[2], chip->temp_thresh_map[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Read the hardware default TEMP_LITE stage threshold temperatures */
+static int qpnp_tm_lite_sync_thresholds(struct qpnp_tm_chip *chip)
+{
+ int ret, thresh;
+ u8 reg = 0;
+
+ /*
+ * Store the warning trip temp in temp_thresh_map[0] and the shutdown trip
+ * temp in temp_thresh_map[2]. The second trip point is purely in software
+ * to facilitate a controlled shutdown after the warning threshold is
+ * crossed but before the automatic hardware shutdown threshold is
+ * crossed. Thus, there is no register to read for the second trip
+ * point.
+ */
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_LITE_TEMP_CFG1, &reg);
if (ret < 0)
- goto out;
+ return ret;
- chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
- chip->temp = DEFAULT_TEMP;
+ thresh = FIELD_GET(LITE_TEMP_CFG_THRESHOLD_MASK, reg);
+ chip->temp_thresh_map[0] = temp_lite_warning_map[thresh];
- ret = qpnp_tm_get_temp_stage(chip);
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_LITE_TEMP_CFG2, &reg);
if (ret < 0)
- goto out;
- chip->stage = ret;
+ return ret;
- stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
- ? chip->stage : alarm_state_map[chip->stage];
+ thresh = FIELD_GET(LITE_TEMP_CFG_THRESHOLD_MASK, reg);
+ chip->temp_thresh_map[2] = temp_lite_shutdown_map[thresh];
- if (stage)
- chip->temp = qpnp_tm_decode_temp(chip, stage);
+ return 0;
+}
- mutex_unlock(&chip->lock);
+static const struct spmi_temp_alarm_data spmi_temp_alarm_data = {
+ .ops = &qpnp_tm_sensor_ops,
+ .temp_map = &temp_map_gen1,
+ .sync_thresholds = qpnp_tm_sync_thresholds,
+ .configure_trip_temps = qpnp_tm_configure_trip_temp,
+ .get_temp_stage = qpnp_tm_gen1_get_temp_stage,
+};
- ret = thermal_zone_get_crit_temp(chip->tz_dev, &crit_temp);
- if (ret)
- crit_temp = THERMAL_TEMP_INVALID;
+static const struct spmi_temp_alarm_data spmi_temp_alarm_gen2_data = {
+ .ops = &qpnp_tm_sensor_ops,
+ .temp_map = &temp_map_gen1,
+ .sync_thresholds = qpnp_tm_sync_thresholds,
+ .configure_trip_temps = qpnp_tm_configure_trip_temp,
+ .get_temp_stage = qpnp_tm_gen2_get_temp_stage,
+};
- mutex_lock(&chip->lock);
+static const struct spmi_temp_alarm_data spmi_temp_alarm_gen2_rev1_data = {
+ .ops = &qpnp_tm_sensor_ops,
+ .temp_map = &temp_map_gen2_v1,
+ .sync_thresholds = qpnp_tm_sync_thresholds,
+ .configure_trip_temps = qpnp_tm_configure_trip_temp,
+ .get_temp_stage = qpnp_tm_gen2_get_temp_stage,
+};
- ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
+static const struct spmi_temp_alarm_data spmi_temp_alarm_gen2_rev2_data = {
+ .ops = &qpnp_tm_gen2_rev2_sensor_ops,
+ .sync_thresholds = qpnp_tm_gen2_rev2_sync_thresholds,
+ .configure_trip_temps = qpnp_tm_gen2_rev2_configure_trip_temps,
+ .get_temp_stage = qpnp_tm_gen2_get_temp_stage,
+};
+
+static const struct spmi_temp_alarm_data spmi_temp_alarm_lite_data = {
+ .ops = &qpnp_tm_lite_sensor_ops,
+ .sync_thresholds = qpnp_tm_lite_sync_thresholds,
+ .configure_trip_temps = qpnp_tm_lite_configure_trip_temps,
+ .get_temp_stage = qpnp_tm_lite_get_temp_stage,
+};
+
+/*
+ * This function initializes the internal temp value based on only the
+ * current thermal stage and threshold.
+ */
+static int qpnp_tm_threshold_init(struct qpnp_tm_chip *chip)
+{
+ int ret;
+
+ ret = chip->data->sync_thresholds(chip);
+ if (ret < 0)
+ return ret;
+
+ ret = chip->data->get_temp_stage(chip);
+ if (ret < 0)
+ return ret;
+ chip->stage = ret;
+ chip->temp = DEFAULT_TEMP;
+
+ if (chip->stage)
+ chip->temp = qpnp_tm_decode_temp(chip, chip->stage);
+
+ return ret;
+}
+
+/* This function initializes threshold control and disables shutdown override. */
+static int qpnp_tm_init(struct qpnp_tm_chip *chip)
+{
+ int ret;
+ u8 reg;
+
+ ret = chip->data->configure_trip_temps(chip);
if (ret < 0)
- goto out;
+ return ret;
/* Enable the thermal alarm PMIC module in always-on mode. */
reg = ALARM_CTRL_FORCE_ENABLE;
@@ -341,8 +752,6 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
chip->initialized = true;
-out:
- mutex_unlock(&chip->lock);
return ret;
}
@@ -350,8 +759,8 @@ static int qpnp_tm_probe(struct platform_device *pdev)
{
struct qpnp_tm_chip *chip;
struct device_node *node;
- u8 type, subtype, dig_major;
- u32 res;
+ u8 type, subtype, dig_major, dig_minor;
+ u32 res, dig_revision;
int ret, irq;
node = pdev->dev.of_node;
@@ -402,18 +811,53 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, ret,
"could not read dig_major\n");
+ ret = qpnp_tm_read(chip, QPNP_TM_REG_DIG_MINOR, &dig_minor);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "could not read dig_minor\n");
+
if (type != QPNP_TM_TYPE || (subtype != QPNP_TM_SUBTYPE_GEN1
- && subtype != QPNP_TM_SUBTYPE_GEN2)) {
+ && subtype != QPNP_TM_SUBTYPE_GEN2
+ && subtype != QPNP_TM_SUBTYPE_LITE)) {
dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n",
type, subtype);
return -ENODEV;
}
chip->subtype = subtype;
- if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 1)
- chip->temp_map = &temp_map_gen2_v1;
+ if (subtype == QPNP_TM_SUBTYPE_GEN1)
+ chip->data = &spmi_temp_alarm_data;
+ else if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major == 0)
+ chip->data = &spmi_temp_alarm_gen2_data;
+ else if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major == 1)
+ chip->data = &spmi_temp_alarm_gen2_rev1_data;
+ else if (subtype == QPNP_TM_SUBTYPE_GEN2 && dig_major >= 2)
+ chip->data = &spmi_temp_alarm_gen2_rev2_data;
+ else if (subtype == QPNP_TM_SUBTYPE_LITE)
+ chip->data = &spmi_temp_alarm_lite_data;
else
- chip->temp_map = &temp_map_gen1;
+ return -ENODEV;
+
+ if (chip->subtype == QPNP_TM_SUBTYPE_GEN2) {
+ dig_revision = (dig_major << 8) | dig_minor;
+ /*
+ * Check if stage 2 automatic partial shutdown must remain
+ * enabled to avoid potential repeated faults upon reaching
+ * over-temperature stage 3.
+ */
+ switch (dig_revision) {
+ case 0x0001:
+ case 0x0002:
+ case 0x0100:
+ case 0x0101:
+ chip->require_stage2_shutdown = true;
+ break;
+ }
+ }
+
+ ret = qpnp_tm_threshold_init(chip);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "threshold init failed\n");
/*
* Register the sensor before initializing the hardware to be able to
@@ -421,7 +865,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
* before the hardware initialization is completed.
*/
chip->tz_dev = devm_thermal_of_zone_register(
- &pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
+ &pdev->dev, 0, chip, chip->data->ops);
if (IS_ERR(chip->tz_dev))
return dev_err_probe(&pdev->dev, PTR_ERR(chip->tz_dev),
"failed to register sensor\n");
diff --git a/drivers/thermal/renesas/rcar_thermal.c b/drivers/thermal/renesas/rcar_thermal.c
index 00a66ee0a5b0..fdd7afdc4ff6 100644
--- a/drivers/thermal/renesas/rcar_thermal.c
+++ b/drivers/thermal/renesas/rcar_thermal.c
@@ -277,7 +277,7 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
return rcar_thermal_get_current_temp(priv, temp);
}
-static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
+static const struct thermal_zone_device_ops rcar_thermal_zone_ops = {
.get_temp = rcar_thermal_get_temp,
};
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index a8ad85feb68f..3beff9b6fac3 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -69,16 +70,18 @@ struct chip_tsadc_table {
* struct rockchip_tsadc_chip - hold the private data of tsadc chip
* @chn_offset: the channel offset of the first channel
* @chn_num: the channel number of tsadc chip
- * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @trim_slope: used to convert the trim code to a temperature in millicelsius
+ * @tshut_temp: the hardware-controlled shutdown temperature value, with no trim
* @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
* @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
* @initialize: SoC special initialize tsadc controller method
* @irq_ack: clear the interrupt
* @control: enable/disable method for the tsadc controller
- * @get_temp: get the temperature
+ * @get_temp: get the raw temperature, unadjusted by trim
* @set_alarm_temp: set the high temperature interrupt
* @set_tshut_temp: set the hardware-controlled shutdown temperature
* @set_tshut_mode: set the hardware-controlled shutdown mode
+ * @get_trim_code: convert a hardware temperature code to one adjusted for by trim
* @table: the chip-specific conversion table
*/
struct rockchip_tsadc_chip {
@@ -86,6 +89,9 @@ struct rockchip_tsadc_chip {
int chn_offset;
int chn_num;
+ /* Used to convert trim code to trim temp */
+ int trim_slope;
+
/* The hardware-controlled tshut property */
int tshut_temp;
enum tshut_mode tshut_mode;
@@ -105,6 +111,8 @@ struct rockchip_tsadc_chip {
int (*set_tshut_temp)(const struct chip_tsadc_table *table,
int chn, void __iomem *reg, int temp);
void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
+ int (*get_trim_code)(const struct chip_tsadc_table *table,
+ int code, int trim_base, int trim_base_frac);
/* Per-table methods */
struct chip_tsadc_table table;
@@ -114,12 +122,16 @@ struct rockchip_tsadc_chip {
* struct rockchip_thermal_sensor - hold the information of thermal sensor
* @thermal: pointer to the platform/configuration data
* @tzd: pointer to a thermal zone
+ * @of_node: pointer to the device_node representing this sensor, if any
* @id: identifier of the thermal sensor
+ * @trim_temp: per-sensor trim temperature value
*/
struct rockchip_thermal_sensor {
struct rockchip_thermal_data *thermal;
struct thermal_zone_device *tzd;
+ struct device_node *of_node;
int id;
+ int trim_temp;
};
/**
@@ -132,7 +144,11 @@ struct rockchip_thermal_sensor {
* @pclk: the advanced peripherals bus clock
* @grf: the general register file will be used to do static set by software
* @regs: the base address of tsadc controller
- * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @trim_base: major component of sensor trim value, in Celsius
+ * @trim_base_frac: minor component of sensor trim value, in Decicelsius
+ * @trim: fallback thermal trim value for each channel
+ * @tshut_temp: the hardware-controlled shutdown temperature value, with no trim
+ * @trim_temp: the fallback trim temperature for the whole sensor
* @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
* @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
*/
@@ -149,7 +165,12 @@ struct rockchip_thermal_data {
struct regmap *grf;
void __iomem *regs;
+ int trim_base;
+ int trim_base_frac;
+ int trim;
+
int tshut_temp;
+ int trim_temp;
enum tshut_mode tshut_mode;
enum tshut_polarity tshut_polarity;
};
@@ -249,6 +270,9 @@ struct rockchip_thermal_data {
#define GRF_CON_TSADC_CH_INV (0x10001 << 1)
+
+#define RK_MAX_TEMP (180000)
+
/**
* struct tsadc_table - code to temperature conversion table
* @code: the value of adc channel
@@ -1045,7 +1069,7 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val, regs + TSADCV2_INT_EN);
}
-static void rk_tsadcv3_tshut_mode(int chn, void __iomem *regs,
+static void rk_tsadcv4_tshut_mode(int chn, void __iomem *regs,
enum tshut_mode mode)
{
u32 val_gpio, val_cru;
@@ -1061,6 +1085,15 @@ static void rk_tsadcv3_tshut_mode(int chn, void __iomem *regs,
writel_relaxed(val_cru, regs + TSADCV3_HSHUT_CRU_INT_EN);
}
+static int rk_tsadcv2_get_trim_code(const struct chip_tsadc_table *table,
+ int code, int trim_base, int trim_base_frac)
+{
+ int temp = trim_base * 1000 + trim_base_frac * 100;
+ u32 base_code = rk_tsadcv2_temp_to_code(table, temp);
+
+ return code - base_code;
+}
+
static const struct rockchip_tsadc_chip px30_tsadc_data = {
/* cpu, gpu */
.chn_offset = 0,
@@ -1284,6 +1317,30 @@ static const struct rockchip_tsadc_chip rk3568_tsadc_data = {
},
};
+static const struct rockchip_tsadc_chip rk3576_tsadc_data = {
+ /* top, big_core, little_core, ddr, npu, gpu */
+ .chn_offset = 0,
+ .chn_num = 6, /* six channels for tsadc */
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+ .initialize = rk_tsadcv8_initialize,
+ .irq_ack = rk_tsadcv4_irq_ack,
+ .control = rk_tsadcv4_control,
+ .get_temp = rk_tsadcv4_get_temp,
+ .set_alarm_temp = rk_tsadcv3_alarm_temp,
+ .set_tshut_temp = rk_tsadcv3_tshut_temp,
+ .set_tshut_mode = rk_tsadcv4_tshut_mode,
+ .get_trim_code = rk_tsadcv2_get_trim_code,
+ .trim_slope = 923,
+ .table = {
+ .id = rk3588_code_table,
+ .length = ARRAY_SIZE(rk3588_code_table),
+ .data_mask = TSADCV4_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rk3588_tsadc_data = {
/* top, big_core0, big_core1, little_core, center, gpu, npu */
.chn_offset = 0,
@@ -1297,7 +1354,7 @@ static const struct rockchip_tsadc_chip rk3588_tsadc_data = {
.get_temp = rk_tsadcv4_get_temp,
.set_alarm_temp = rk_tsadcv3_alarm_temp,
.set_tshut_temp = rk_tsadcv3_tshut_temp,
- .set_tshut_mode = rk_tsadcv3_tshut_mode,
+ .set_tshut_mode = rk_tsadcv4_tshut_mode,
.table = {
.id = rk3588_code_table,
.length = ARRAY_SIZE(rk3588_code_table),
@@ -1343,6 +1400,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
.data = (void *)&rk3568_tsadc_data,
},
{
+ .compatible = "rockchip,rk3576-tsadc",
+ .data = (void *)&rk3576_tsadc_data,
+ },
+ {
.compatible = "rockchip,rk3588-tsadc",
.data = (void *)&rk3588_tsadc_data,
},
@@ -1387,7 +1448,7 @@ static int rockchip_thermal_set_trips(struct thermal_zone_device *tz, int low, i
__func__, sensor->id, low, high);
return tsadc->set_alarm_temp(&tsadc->table,
- sensor->id, thermal->regs, high);
+ sensor->id, thermal->regs, high + sensor->trim_temp);
}
static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_temp)
@@ -1399,6 +1460,8 @@ static int rockchip_thermal_get_temp(struct thermal_zone_device *tz, int *out_te
retval = tsadc->get_temp(&tsadc->table,
sensor->id, thermal->regs, out_temp);
+ *out_temp -= sensor->trim_temp;
+
return retval;
}
@@ -1407,6 +1470,104 @@ static const struct thermal_zone_device_ops rockchip_of_thermal_ops = {
.set_trips = rockchip_thermal_set_trips,
};
+/**
+ * rockchip_get_efuse_value - read an OTP cell from a device node
+ * @np: pointer to the device node with the nvmem-cells property
+ * @cell_name: name of cell that should be read
+ * @value: pointer to where the read value will be placed
+ *
+ * Return: Negative errno on failure, during which *value will not be touched,
+ * or 0 on success.
+ */
+static int rockchip_get_efuse_value(struct device_node *np, const char *cell_name,
+ int *value)
+{
+ struct nvmem_cell *cell;
+ int ret = 0;
+ size_t len;
+ u8 *buf;
+ int i;
+
+ cell = of_nvmem_cell_get(np, cell_name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (len > sizeof(*value)) {
+ ret = -ERANGE;
+ goto exit;
+ }
+
+ /* Copy with implicit endian conversion */
+ *value = 0;
+ for (i = 0; i < len; i++)
+ *value |= (int) buf[i] << (8 * i);
+
+exit:
+ kfree(buf);
+ return ret;
+}
+
+static int rockchip_get_trim_configuration(struct device *dev, struct device_node *np,
+ struct rockchip_thermal_data *thermal)
+{
+ const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+ int trim_base = 0, trim_base_frac = 0, trim = 0;
+ int trim_code;
+ int ret;
+
+ thermal->trim_base = 0;
+ thermal->trim_base_frac = 0;
+ thermal->trim = 0;
+
+ if (!tsadc->get_trim_code)
+ return 0;
+
+ ret = rockchip_get_efuse_value(np, "trim_base", &trim_base);
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ trim_base = 30;
+ dev_dbg(dev, "trim_base is absent, defaulting to 30\n");
+ } else {
+ dev_err(dev, "failed reading nvmem value of trim_base: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+ ret = rockchip_get_efuse_value(np, "trim_base_frac", &trim_base_frac);
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ dev_dbg(dev, "trim_base_frac is absent, defaulting to 0\n");
+ } else {
+ dev_err(dev, "failed reading nvmem value of trim_base_frac: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+ thermal->trim_base = trim_base;
+ thermal->trim_base_frac = trim_base_frac;
+
+ /*
+ * If the tsadc node contains the trim property, then it is used in the
+ * absence of per-channel trim values
+ */
+ if (!rockchip_get_efuse_value(np, "trim", &trim))
+ thermal->trim = trim;
+ if (trim) {
+ trim_code = tsadc->get_trim_code(&tsadc->table, trim,
+ trim_base, trim_base_frac);
+ thermal->trim_temp = thermal->chip->trim_slope * trim_code;
+ }
+
+ return 0;
+}
+
static int rockchip_configure_from_dt(struct device *dev,
struct device_node *np,
struct rockchip_thermal_data *thermal)
@@ -1467,6 +1628,8 @@ static int rockchip_configure_from_dt(struct device *dev,
if (IS_ERR(thermal->grf))
dev_warn(dev, "Missing rockchip,grf property\n");
+ rockchip_get_trim_configuration(dev, np, thermal);
+
return 0;
}
@@ -1477,23 +1640,50 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
int id)
{
const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+ struct device *dev = &pdev->dev;
+ int trim = thermal->trim;
+ int trim_code, tshut_temp;
+ int trim_temp = 0;
int error;
+ if (thermal->trim_temp)
+ trim_temp = thermal->trim_temp;
+
+ if (tsadc->get_trim_code && sensor->of_node) {
+ error = rockchip_get_efuse_value(sensor->of_node, "trim", &trim);
+ if (error < 0 && error != -ENOENT) {
+ dev_err(dev, "failed reading trim of sensor %d: %pe\n",
+ id, ERR_PTR(error));
+ return error;
+ }
+ if (trim) {
+ trim_code = tsadc->get_trim_code(&tsadc->table, trim,
+ thermal->trim_base,
+ thermal->trim_base_frac);
+ trim_temp = thermal->chip->trim_slope * trim_code;
+ }
+ }
+
+ sensor->trim_temp = trim_temp;
+
+ dev_dbg(dev, "trim of sensor %d is %d\n", id, sensor->trim_temp);
+
+ tshut_temp = min(thermal->tshut_temp + sensor->trim_temp, RK_MAX_TEMP);
+
tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
- error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
- thermal->tshut_temp);
+ error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs, tshut_temp);
if (error)
- dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
- __func__, thermal->tshut_temp, error);
+ dev_err(dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, tshut_temp, error);
sensor->thermal = thermal;
sensor->id = id;
- sensor->tzd = devm_thermal_of_zone_register(&pdev->dev, id, sensor,
+ sensor->tzd = devm_thermal_of_zone_register(dev, id, sensor,
&rockchip_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
error = PTR_ERR(sensor->tzd);
- dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
+ dev_err(dev, "failed to register sensor %d: %d\n",
id, error);
return error;
}
@@ -1516,9 +1706,11 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct rockchip_thermal_data *thermal;
+ struct device_node *child;
int irq;
int i;
int error;
+ u32 chn;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -1569,6 +1761,18 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
thermal->chip->initialize(thermal->grf, thermal->regs,
thermal->tshut_polarity);
+ for_each_available_child_of_node(np, child) {
+ if (!of_property_read_u32(child, "reg", &chn)) {
+ if (chn < thermal->chip->chn_num)
+ thermal->sensors[chn].of_node = child;
+ else
+ dev_warn(&pdev->dev,
+ "sensor address (%d) too large, ignoring its trim\n",
+ chn);
+ }
+
+ }
+
for (i = 0; i < thermal->chip->chn_num; i++) {
error = rockchip_thermal_register_sensor(pdev, thermal,
&thermal->sensors[i],
@@ -1638,8 +1842,11 @@ static int __maybe_unused rockchip_thermal_suspend(struct device *dev)
static int __maybe_unused rockchip_thermal_resume(struct device *dev)
{
struct rockchip_thermal_data *thermal = dev_get_drvdata(dev);
- int i;
+ const struct rockchip_tsadc_chip *tsadc = thermal->chip;
+ struct rockchip_thermal_sensor *sensor;
+ int tshut_temp;
int error;
+ int i;
error = clk_enable(thermal->clk);
if (error)
@@ -1653,21 +1860,23 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
rockchip_thermal_reset_controller(thermal->reset);
- thermal->chip->initialize(thermal->grf, thermal->regs,
- thermal->tshut_polarity);
+ tsadc->initialize(thermal->grf, thermal->regs, thermal->tshut_polarity);
for (i = 0; i < thermal->chip->chn_num; i++) {
- int id = thermal->sensors[i].id;
+ sensor = &thermal->sensors[i];
+
+ tshut_temp = min(thermal->tshut_temp + sensor->trim_temp,
+ RK_MAX_TEMP);
- thermal->chip->set_tshut_mode(id, thermal->regs,
+ tsadc->set_tshut_mode(sensor->id, thermal->regs,
thermal->tshut_mode);
- error = thermal->chip->set_tshut_temp(&thermal->chip->table,
- id, thermal->regs,
- thermal->tshut_temp);
+ error = tsadc->set_tshut_temp(&thermal->chip->table,
+ sensor->id, thermal->regs,
+ tshut_temp);
if (error)
dev_err(dev, "%s: invalid tshut=%d, error=%d\n",
- __func__, thermal->tshut_temp, error);
+ __func__, tshut_temp, error);
}
thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index bb96be947521..603dadcd3df5 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -41,7 +41,7 @@ static inline int thermal_get_temp(struct thermal_zone_device *thermal,
return 0;
}
-static struct thermal_zone_device_ops ops = {
+static const struct thermal_zone_device_ops ops = {
.get_temp = thermal_get_temp,
};
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index a14a37d54698..1470ca519def 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -132,7 +132,7 @@ static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature)
return 0;
}
-static struct thermal_zone_device_ops st_tz_ops = {
+static const struct thermal_zone_device_ops st_tz_ops = {
.get_temp = st_thermal_get_temp,
};
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 926f1052e6de..53a5c649f4b1 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -1206,7 +1206,7 @@ static const struct irq_domain_ops soctherm_oc_domain_ops = {
/**
* soctherm_oc_int_init() - Initial enabling of the over
* current interrupts
- * @np: The devicetree node for soctherm
+ * @fwnode: The devicetree node for soctherm
* @num_irqs: The number of new interrupt requests
*
* Sets the over current interrupt request chip data
@@ -1215,7 +1215,7 @@ static const struct irq_domain_ops soctherm_oc_domain_ops = {
* -ENOMEM (out of memory), or irq_base if the function failed to
* allocate the irqs
*/
-static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
+static int soctherm_oc_int_init(struct fwnode_handle *fwnode, int num_irqs)
{
if (!num_irqs) {
pr_info("%s(): OC interrupts are not enabled\n", __func__);
@@ -1234,10 +1234,8 @@ static int soctherm_oc_int_init(struct device_node *np, int num_irqs)
soc_irq_cdata.irq_chip.irq_set_type = soctherm_oc_irq_set_type;
soc_irq_cdata.irq_chip.irq_set_wake = NULL;
- soc_irq_cdata.domain = irq_domain_create_linear(of_fwnode_handle(np), num_irqs,
- &soctherm_oc_domain_ops,
- &soc_irq_cdata);
-
+ soc_irq_cdata.domain = irq_domain_create_linear(fwnode, num_irqs, &soctherm_oc_domain_ops,
+ &soc_irq_cdata);
if (!soc_irq_cdata.domain) {
pr_err("%s: Failed to create IRQ domain\n", __func__);
return -ENOMEM;
@@ -1968,10 +1966,9 @@ static void tegra_soctherm_throttle(struct device *dev)
static int soctherm_interrupts_init(struct platform_device *pdev,
struct tegra_soctherm *tegra)
{
- struct device_node *np = pdev->dev.of_node;
int ret;
- ret = soctherm_oc_int_init(np, TEGRA_SOC_OC_IRQ_MAX);
+ ret = soctherm_oc_int_init(dev_fwnode(&pdev->dev), TEGRA_SOC_OC_IRQ_MAX);
if (ret < 0) {
dev_err(&pdev->dev, "soctherm_oc_int_init failed\n");
return ret;
diff --git a/drivers/thermal/testing/zone.c b/drivers/thermal/testing/zone.c
index 1f4e450100e2..4257d813d572 100644
--- a/drivers/thermal/testing/zone.c
+++ b/drivers/thermal/testing/zone.c
@@ -381,7 +381,7 @@ static int tt_zone_get_temp(struct thermal_zone_device *tz, int *temp)
return 0;
}
-static struct thermal_zone_device_ops tt_zone_ops = {
+static const struct thermal_zone_device_ops tt_zone_ops = {
.get_temp = tt_zone_get_temp,
};
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 24b9055a0b6c..d80612506a33 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -40,10 +40,13 @@ temp_show(struct device *dev, struct device_attribute *attr, char *buf)
ret = thermal_zone_get_temp(tz, &temperature);
- if (ret)
- return ret;
+ if (!ret)
+ return sprintf(buf, "%d\n", temperature);
- return sprintf(buf, "%d\n", temperature);
+ if (ret == -EAGAIN)
+ return -ENODATA;
+
+ return ret;
}
static ssize_t
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 28febb95f8fa..b6c58a7e7b6a 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1450,7 +1450,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
return ret;
data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
- data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
+ data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK;
data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
@@ -3437,7 +3437,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
}
}
-static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
{
if (flags)
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
@@ -3445,7 +3445,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
tb_sw_dbg(sw, "disabling wakeup\n");
if (tb_switch_is_usb4(sw))
- return usb4_switch_set_wake(sw, flags);
+ return usb4_switch_set_wake(sw, flags, runtime);
return tb_lc_set_wake(sw, flags);
}
@@ -3521,7 +3521,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime)
tb_switch_check_wakes(sw);
/* Disable wakes */
- tb_switch_set_wake(sw, 0);
+ tb_switch_set_wake(sw, 0, true);
err = tb_switch_tmu_init(sw);
if (err)
@@ -3603,7 +3603,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
- tb_switch_set_wake(sw, flags);
+ tb_switch_set_wake(sw, flags, runtime);
if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 87afd5a7c504..f503bad86413 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1317,7 +1317,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size);
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime);
int usb4_switch_set_sleep(struct tb_switch *sw);
int usb4_switch_nvm_sector_size(struct tb_switch *sw);
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index fce3c0f2354a..fdae76c8f728 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -403,12 +403,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
* usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
* @flags: Wakeup flags (%0 to disable)
+ * @runtime: Wake is being programmed during system runtime
*
* Enables/disables router to wake up from sleep.
*/
-int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
+int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
{
- struct usb4_port *usb4;
struct tb_port *port;
u64 route = tb_route(sw);
u32 val;
@@ -438,13 +438,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
val |= PORT_CS_19_WOU4;
} else {
bool configured = val & PORT_CS_19_PC;
- usb4 = port->usb4;
+ bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
- if (((flags & TB_WAKE_ON_CONNECT) &&
- device_may_wakeup(&usb4->dev)) && !configured)
+ if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
val |= PORT_CS_19_WOC;
- if (((flags & TB_WAKE_ON_DISCONNECT) &&
- device_may_wakeup(&usb4->dev)) && configured)
+ if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
val |= PORT_CS_19_WOD;
if ((flags & TB_WAKE_ON_USB4) && configured)
val |= PORT_CS_19_WOU4;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 0213381fa358..d16c207a1a9b 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -399,7 +399,7 @@ static int serdev_drv_probe(struct device *dev)
const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
int ret;
- ret = dev_pm_domain_attach(dev, true);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
if (ret)
return ret;
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 508e8c6f01d4..884fefbfd5a1 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -954,7 +954,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
__func__);
return 0;
}
- dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(port->dev, priv->sg_tx_p, num, DMA_TO_DEVICE);
priv->desc_tx = desc;
desc->callback = pch_dma_tx_complete;
desc->callback_param = priv;
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index cb3b127b06b6..22749ab0428a 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -93,6 +94,7 @@ static void serial_base_ctrl_release(struct device *dev)
{
struct serial_ctrl_device *ctrl_dev = to_serial_base_ctrl_device(dev);
+ of_node_put(dev->of_node);
kfree(ctrl_dev);
}
@@ -140,6 +142,7 @@ static void serial_base_port_release(struct device *dev)
{
struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ of_node_put(dev->of_node);
kfree(port_dev);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 3e1215f7a9a0..256fe8c86828 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5751,6 +5751,7 @@ static void port_event(struct usb_hub *hub, int port1)
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
int i = 0;
+ int err;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
@@ -5847,8 +5848,11 @@ static void port_event(struct usb_hub *hub, int port1)
} else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
dev_dbg(&port_dev->dev, "do warm reset, port only\n");
- if (hub_port_reset(hub, port1, NULL,
- HUB_BH_RESET_TIME, true) < 0)
+ err = hub_port_reset(hub, port1, NULL,
+ HUB_BH_RESET_TIME, true);
+ if (!udev && err == -ENOTCONN)
+ connect_change = 0;
+ else if (err < 0)
hub_port_disable(hub, port1, 1);
} else {
dev_dbg(&port_dev->dev, "do warm reset, full device\n");
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index d5b622f78cf3..0637bfbc054e 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -5389,20 +5389,34 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg)
if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) {
/* ULPI interface */
gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY;
- }
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
- /* Suspend the Phy Clock */
- pcgcctl = dwc2_readl(hsotg, PCGCTL);
- pcgcctl |= PCGCTL_STOPPCLK;
- dwc2_writel(hsotg, pcgcctl, PCGCTL);
- udelay(10);
+ /* Suspend the Phy Clock */
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
- gpwrdn = dwc2_readl(hsotg, GPWRDN);
- gpwrdn |= GPWRDN_PMUACTV;
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ } else {
+ /* UTMI+ Interface */
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn |= GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+
+ pcgcctl = dwc2_readl(hsotg, PCGCTL);
+ pcgcctl |= PCGCTL_STOPPCLK;
+ dwc2_writel(hsotg, pcgcctl, PCGCTL);
+ udelay(10);
+ }
/* Set flag to indicate that we are in hibernation */
hsotg->hibernated = 1;
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 7334de85ad10..ca7e1c02773a 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -680,12 +680,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
ret = reset_control_deassert(qcom->resets);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret);
- goto reset_assert;
+ return ret;
}
ret = clk_bulk_prepare_enable(qcom->num_clocks, qcom->clks);
if (ret < 0)
- goto reset_assert;
+ return ret;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
@@ -755,8 +755,6 @@ remove_core:
dwc3_core_remove(&qcom->dwc);
clk_disable:
clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
-reset_assert:
- reset_control_assert(qcom->resets);
return ret;
}
@@ -771,7 +769,6 @@ static void dwc3_qcom_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(qcom->num_clocks, qcom->clks);
dwc3_qcom_interconnect_exit(qcom);
- reset_control_assert(qcom->resets);
}
static int dwc3_qcom_pm_suspend(struct device *dev)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index fba2a56dae97..f94ea196ce54 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1065,6 +1065,8 @@ static ssize_t webusb_landingPage_store(struct config_item *item, const char *pa
unsigned int bytes_to_strip = 0;
int l = len;
+ if (!len)
+ return len;
if (page[l - 1] == '\n') {
--l;
++bytes_to_strip;
@@ -1188,6 +1190,8 @@ static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page,
struct gadget_info *gi = os_desc_item_to_gadget_info(item);
int res, l;
+ if (!len)
+ return len;
l = min_t(int, len, OS_STRING_QW_SIGN_LEN >> 1);
if (page[l - 1] == '\n')
--l;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 2dea9e42a0f8..ea5f0af1e8d2 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2369,8 +2369,7 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex));
if (epfile->dentry) {
- d_delete(epfile->dentry);
- dput(epfile->dentry);
+ simple_recursive_removal(epfile->dentry, NULL);
epfile->dentry = NULL;
}
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index fcce84a726f2..b51e132b0cd2 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1561,7 +1561,6 @@ static void destroy_ep_files (struct dev_data *dev)
spin_lock_irq (&dev->lock);
while (!list_empty(&dev->epfiles)) {
struct ep_data *ep;
- struct inode *parent;
struct dentry *dentry;
/* break link to FS */
@@ -1571,7 +1570,6 @@ static void destroy_ep_files (struct dev_data *dev)
dentry = ep->dentry;
ep->dentry = NULL;
- parent = d_inode(dentry->d_parent);
/* break link to controller */
mutex_lock(&ep->lock);
@@ -1586,10 +1584,7 @@ static void destroy_ep_files (struct dev_data *dev)
put_ep (ep);
/* break link to dcache */
- inode_lock(parent);
- d_delete (dentry);
- dput (dentry);
- inode_unlock(parent);
+ simple_recursive_removal(dentry, NULL);
spin_lock_irq (&dev->lock);
}
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index f5d09a91e554..b97fb7b0cb2c 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2348,15 +2348,14 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (gpio_is_valid(dev->mach->gpio_pullup)) {
- retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup,
- "pca25x_udc GPIO PULLUP");
+ retval = devm_gpio_request_one(&pdev->dev, dev->mach->gpio_pullup,
+ GPIOF_OUT_INIT_LOW, "pca25x_udc GPIO PULLUP");
if (retval) {
dev_dbg(&pdev->dev,
"can't get pullup gpio %d, err: %d\n",
dev->mach->gpio_pullup, retval);
goto err;
}
- gpio_direction_output(dev->mach->gpio_pullup, 0);
}
timer_setup(&dev->timer, udc_watchdog, 0);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 6869c58367f2..caf4d4cd4b75 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1913,6 +1913,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
* gadget driver here and have everything work;
* that currently misbehaves.
*/
+ usb_gadget_set_state(g, USB_STATE_NOTATTACHED);
/* Force check of devctl register for PM runtime */
pm_runtime_mark_last_busy(musb->controller);
@@ -2019,6 +2020,7 @@ void musb_g_disconnect(struct musb *musb)
case OTG_STATE_B_PERIPHERAL:
case OTG_STATE_B_IDLE:
musb_set_state(musb, OTG_STATE_B_IDLE);
+ usb_gadget_set_state(&musb->g, USB_STATE_NOTATTACHED);
break;
case OTG_STATE_B_SRP_INIT:
break;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6ac7a0a5cf07..abfcfca3f971 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -803,6 +803,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ { USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 9acb6f837327..4cc1fae8acb9 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -204,6 +204,9 @@
#define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
+#define FTDI_NDI_VID 0x23F2
+#define FTDI_NDI_EMGUIDE_GEMINI_PID 0x0003 /* NDI Emguide Gemini */
+
/*
* ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
*/
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 27879cc57536..147ca50c94be 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1415,6 +1415,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x30), /* Telit FE910C04 (ECM) */
+ .driver_info = NCTRL(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */
.driver_info = NCTRL(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) },
@@ -2343,6 +2346,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
.driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff), /* Foxconn T99W640 MBIM */
+ .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
diff --git a/drivers/virt/coco/efi_secret/efi_secret.c b/drivers/virt/coco/efi_secret/efi_secret.c
index 1864f9f80617..5946c5abeae8 100644
--- a/drivers/virt/coco/efi_secret/efi_secret.c
+++ b/drivers/virt/coco/efi_secret/efi_secret.c
@@ -31,8 +31,6 @@
struct efi_secret {
struct dentry *secrets_dir;
- struct dentry *fs_dir;
- struct dentry *fs_files[EFI_SECRET_NUM_FILES];
void __iomem *secret_data;
u64 secret_data_len;
};
@@ -119,10 +117,8 @@ static void wipe_memory(void *addr, size_t size)
static int efi_secret_unlink(struct inode *dir, struct dentry *dentry)
{
- struct efi_secret *s = efi_secret_get();
struct inode *inode = d_inode(dentry);
struct secret_entry *e = (struct secret_entry *)inode->i_private;
- int i;
if (e) {
/* Zero out the secret data */
@@ -132,19 +128,7 @@ static int efi_secret_unlink(struct inode *dir, struct dentry *dentry)
inode->i_private = NULL;
- for (i = 0; i < EFI_SECRET_NUM_FILES; i++)
- if (s->fs_files[i] == dentry)
- s->fs_files[i] = NULL;
-
- /*
- * securityfs_remove tries to lock the directory's inode, but we reach
- * the unlink callback when it's already locked
- */
- inode_unlock(dir);
- securityfs_remove(dentry);
- inode_lock(dir);
-
- return 0;
+ return simple_unlink(inode, dentry);
}
static const struct inode_operations efi_secret_dir_inode_operations = {
@@ -194,15 +178,6 @@ unmap:
static void efi_secret_securityfs_teardown(struct platform_device *dev)
{
struct efi_secret *s = efi_secret_get();
- int i;
-
- for (i = (EFI_SECRET_NUM_FILES - 1); i >= 0; i--) {
- securityfs_remove(s->fs_files[i]);
- s->fs_files[i] = NULL;
- }
-
- securityfs_remove(s->fs_dir);
- s->fs_dir = NULL;
securityfs_remove(s->secrets_dir);
s->secrets_dir = NULL;
@@ -217,7 +192,7 @@ static int efi_secret_securityfs_setup(struct platform_device *dev)
unsigned char *ptr;
struct secret_header *h;
struct secret_entry *e;
- struct dentry *dent;
+ struct dentry *dent, *dir;
char guid_str[EFI_VARIABLE_GUID_LEN + 1];
ptr = (void __force *)s->secret_data;
@@ -240,8 +215,6 @@ static int efi_secret_securityfs_setup(struct platform_device *dev)
}
s->secrets_dir = NULL;
- s->fs_dir = NULL;
- memset(s->fs_files, 0, sizeof(s->fs_files));
dent = securityfs_create_dir("secrets", NULL);
if (IS_ERR(dent)) {
@@ -251,14 +224,13 @@ static int efi_secret_securityfs_setup(struct platform_device *dev)
}
s->secrets_dir = dent;
- dent = securityfs_create_dir("coco", s->secrets_dir);
- if (IS_ERR(dent)) {
+ dir = securityfs_create_dir("coco", s->secrets_dir);
+ if (IS_ERR(dir)) {
dev_err(&dev->dev, "Error creating coco securityfs directory entry err=%ld\n",
- PTR_ERR(dent));
- return PTR_ERR(dent);
+ PTR_ERR(dir));
+ return PTR_ERR(dir);
}
- d_inode(dent)->i_op = &efi_secret_dir_inode_operations;
- s->fs_dir = dent;
+ d_inode(dir)->i_op = &efi_secret_dir_inode_operations;
bytes_left = h->len - sizeof(*h);
ptr += sizeof(*h);
@@ -274,15 +246,14 @@ static int efi_secret_securityfs_setup(struct platform_device *dev)
if (efi_guidcmp(e->guid, NULL_GUID)) {
efi_guid_to_str(&e->guid, guid_str);
- dent = securityfs_create_file(guid_str, 0440, s->fs_dir, (void *)e,
+ dent = securityfs_create_file(guid_str, 0440, dir, (void *)e,
&efi_secret_bin_file_fops);
if (IS_ERR(dent)) {
dev_err(&dev->dev, "Error creating efi_secret securityfs entry\n");
ret = PTR_ERR(dent);
goto err_cleanup;
}
-
- s->fs_files[i++] = dent;
+ i++;
}
ptr += e->len;
bytes_left -= e->len;
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 1f60c9d5cb18..a7b297dae489 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -329,20 +329,21 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
unsigned int this_vecs = affd->set_size[i];
+ unsigned int nr_masks;
int j;
- struct cpumask *result = group_cpus_evenly(this_vecs);
+ struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
if (!result) {
kfree(masks);
return NULL;
}
- for (j = 0; j < this_vecs; j++)
+ for (j = 0; j < nr_masks; j++)
cpumask_copy(&masks[curvec + j], &result[j]);
kfree(result);
- curvec += this_vecs;
- usedvecs += this_vecs;
+ curvec += nr_masks;
+ usedvecs += nr_masks;
}
/* Fill out vectors at the end that don't need affinity */