summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/drbd/drbd_nl.c1
-rw-r--r--drivers/clk/renesas/clk-mstp.c20
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/firewire/core-cdev.c2
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c5
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c11
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c6
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c6
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c2
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h25
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c8
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c22
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c25
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c1
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c118
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c35
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c5
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c4
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-cp2112.c10
-rw-r--r--drivers/hid/hid-lenovo.c4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c9
-rw-r--r--drivers/iommu/amd/io_pgtable.c25
-rw-r--r--drivers/iommu/intel/iommu.c7
-rw-r--r--drivers/iommu/iommufd/device.c3
-rw-r--r--drivers/iommu/iommufd/eventq.c9
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h3
-rw-r--r--drivers/iommu/iommufd/main.c59
-rw-r--r--drivers/iommu/s390-iommu.c29
-rw-r--r--drivers/md/md-linear.c1
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c1
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c68
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c3
-rw-r--r--drivers/mmc/host/sdhci.c34
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c31
-rw-r--r--drivers/pmdomain/core.c20
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c1
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c3
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c3
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c2
-rw-r--r--drivers/reset/reset-eyeq.c11
-rw-r--r--drivers/ufs/core/ufs-mcq.c4
-rw-r--r--drivers/vhost/net.c40
-rw-r--r--drivers/vhost/scsi.c2
77 files changed, 704 insertions, 233 deletions
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e09930c2b226..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5bc473c2adb3..2f65fe2c6bdf 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static struct device_node *cpg_mstp_pd_np __initdata = NULL;
+static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
+
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
- of_genpd_add_provider_simple(np, pd);
+ cpg_mstp_pd_np = of_node_get(np);
+ cpg_mstp_pd_genpd = pd;
+}
+
+static int __init cpg_mstp_pd_init_provider(void)
+{
+ int error;
+
+ if (!cpg_mstp_pd_np)
+ return -ENODEV;
+
+ error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
+
+ of_node_put(cpg_mstp_pd_np);
+ return error;
}
+postcore_initcall(cpg_mstp_pd_init_provider);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 354c981943b6..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
- rate = (parent_rate / p) / m;
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fc7eace8b65b..58e3839a2140 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2953,6 +2953,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2974,21 +2983,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e058ba027792..9f5ccc1720cb 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -2430,7 +2430,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 78b10c6ef7fe..2e93189d7142 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -41,7 +41,7 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
-#define FW_CDEV_KERNEL_VERSION 5
+#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 7cfc5fdfa49d..64863db7a715 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -198,7 +198,10 @@ static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
- dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ if (err != -ENODEV)
+ dev_warn(bpmp->dev,
+ "failed to parse memory region: %d\n", err);
+
return err;
}
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 12b24a717e43..284e762d92c4 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -942,8 +942,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
+ int ret;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
if (IS_ERR(desc))
@@ -957,6 +958,12 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+
+ /* ACPI uses hundredths of milliseconds units */
+ ret = gpio_set_debounce_timeout(desc, info.debounce * 10);
+ if (ret)
+ return ERR_PTR(ret);
+
return desc;
}
@@ -992,7 +999,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index bfb04e67c4bc..7b95d1b03361 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -319,6 +319,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * Same as G1619-04. New model.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
* Spurious wakeups from GPIO 11
* Found in BIOS 1.04
* https://gitlab.freedesktop.org/drm/amd/-/issues/3954
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fbe7616555c8..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
- if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
- if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 33eb4826b58b..aa88bad7416b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -426,7 +426,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -516,11 +518,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 01d234cf8156..c8459337fcb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5136,7 +5136,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
- if (!adev->in_s0ix && !adev->in_runpm)
+ if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@@ -5156,10 +5156,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
amdgpu_device_ip_suspend_phase1(adev);
- if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
- }
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
r = amdgpu_device_evict_resources(adev);
if (r)
@@ -5254,15 +5252,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- if (r)
- goto exit;
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_userq_resume(adev);
- if (r)
- goto exit;
- }
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5275,7 +5271,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
- if (!adev->in_s0ix && !r && !adev->in_runpm)
+ if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c85de8c8f6f5..c37527704d43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1654,6 +1654,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.pfp_fw_version >= 102 &&
+ adev->gfx.mec_fw_version >= 66 &&
+ adev->mes.fw_version[0] >= 128) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7e749f9b6d69..349c351e242b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1550,6 +1550,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1567,6 +1586,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e86370ae705..97d9eba17963 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -8717,7 +8717,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
+{ /*
+ * We cannot be sure that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ * So we have to go through the CRTC to find the right IRQ.
+ */
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8770,7 +8779,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
+#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
+#endif
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b47cb4a5f488..408f05dfab90 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2236,7 +2236,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
return ret;
}
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) {
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index c0ad8f59e483..8b3304dedcd9 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2677,7 +2677,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2746,8 +2746,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index a614d1384f71..38726ae1bf15 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index db9b089ef62c..86853535fb7b 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2432,8 +2432,6 @@ static const struct drm_gpuvm_ops lock_ops = {
*
* The expected usage is::
*
- * .. code-block:: c
- *
* vm_bind {
* struct drm_exec exec;
*
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 41228478b21c..0a3a3f6a5f9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -546,7 +546,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
luminance_range->max_luminance,
panel->vbt.backlight.pwm_freq_hz,
intel_dp->edp_dpcd, &current_level, &current_mode,
- false);
+ panel->backlight.edp.vesa.luminance_control_support);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 81eb046aeebf..b9f67d7a00d8 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum xe_guc_action {
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 0366a9da5977..d7719d0e36ca 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -17,6 +17,7 @@
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
* | | | - `GuC Opt In Feature KLVs`_ |
+ * | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -153,6 +154,30 @@ enum {
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
/**
+ * DOC: GuC Scheduling Policies KLVs
+ *
+ * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
+ *
+ * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
+ * Some platforms do not allow concurrent execution of RCS and CCS
+ * workloads from different address spaces. By default, the GuC prioritizes
+ * RCS submissions over CCS ones, which can lead to CCS workloads being
+ * significantly (or completely) starved of execution time. This KLV allows
+ * the driver to specify a quantum (in ms) and a ratio (percentage value
+ * between 0 and 100), and the GuC will prioritize the CCS for that
+ * percentage of each quantum. For example, specifying 100ms and 30% will
+ * make the GuC prioritize the CCS for 30ms of every 100ms.
+ * Note that this does not necessarly mean that RCS and CCS engines will
+ * only be active for their percentage of the quantum, as the restriction
+ * only kicks in if both classes are fully busy with non-compatible address
+ * spaces; i.e., if one engine is idle or running the same address space,
+ * a pending job on the other engine will still be submitted to the HW no
+ * matter what the ratio is
+ */
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index bd9015761aa0..3e3b2d9033a7 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -311,12 +311,16 @@ int xe_device_sysfs_init(struct xe_device *xe)
if (xe->info.platform == XE_BATTLEMAGE) {
ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
if (ret)
- return ret;
+ goto cleanup;
ret = late_bind_create_files(dev);
if (ret)
- return ret;
+ goto cleanup;
}
return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
+
+cleanup:
+ xe_device_sysfs_fini(xe);
+ return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 8991b4aed440..c07edcda99c5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -151,6 +151,16 @@ err_lrc:
return err;
}
+static void __xe_exec_queue_fini(struct xe_exec_queue *q)
+{
+ int i;
+
+ q->ops->fini(q);
+
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_put(q->lrc[i]);
+}
+
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags,
@@ -181,11 +191,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
- goto err_post_alloc;
+ goto err_post_init;
}
return q;
+err_post_init:
+ __xe_exec_queue_fini(q);
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
@@ -283,13 +295,11 @@ void xe_exec_queue_destroy(struct kref *ref)
xe_exec_queue_put(eq);
}
- q->ops->fini(q);
+ q->ops->destroy(q);
}
void xe_exec_queue_fini(struct xe_exec_queue *q)
{
- int i;
-
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
* and wakeup any waiters.
@@ -298,9 +308,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
wake_up_var(&q->xef->exec_queue.pending_removal);
- for (i = 0; i < q->width; ++i)
- xe_lrc_put(q->lrc[i]);
-
+ __xe_exec_queue_fini(q);
__xe_exec_queue_free(q);
}
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index cc1cffb5c87f..1c9d03f2a3e5 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -166,8 +166,14 @@ struct xe_exec_queue_ops {
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
- /** @fini: Fini exec queue for submission backend */
+ /** @fini: Undoes the init() for submission backend */
void (*fini)(struct xe_exec_queue *q);
+ /**
+ * @destroy: Destroy exec queue for submission backend. The backend
+ * function must call xe_exec_queue_fini() (which will in turn call the
+ * fini() backend function) to ensure the queue is properly cleaned up.
+ */
+ void (*destroy)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 788f56b066b6..f83d421ac9d3 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -385,10 +385,20 @@ err_free:
return err;
}
-static void execlist_exec_queue_fini_async(struct work_struct *w)
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_execlist_exec_queue *exl = q->execlist;
+
+ drm_sched_entity_fini(&exl->entity);
+ drm_sched_fini(&exl->sched);
+
+ kfree(exl);
+}
+
+static void execlist_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_execlist_exec_queue *ee =
- container_of(w, struct xe_execlist_exec_queue, fini_async);
+ container_of(w, struct xe_execlist_exec_queue, destroy_async);
struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- drm_sched_entity_fini(&exl->entity);
- drm_sched_fini(&exl->sched);
- kfree(exl);
-
xe_exec_queue_fini(q);
}
@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
/* NIY */
}
-static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
- INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
+ queue_work(system_unbound_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.init = execlist_exec_queue_init,
.kill = execlist_exec_queue_kill,
.fini = execlist_exec_queue_fini,
+ .destroy = execlist_exec_queue_destroy,
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index 415140936f11..92c4ba52db0c 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
bool has_run;
- struct work_struct fini_async;
+ struct work_struct destroy_async;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index c8eda36546d3..17634195cdc2 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -41,6 +41,7 @@
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
@@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
- gt->uc.guc.submission_state.enabled = false;
+ xe_guc_submit_disable(&gt->uc.guc);
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 494909f74eb2..d84831a03610 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1632,7 +1632,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
- fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index b1d1d6da3758..270fc3792493 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -880,9 +880,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
return ret;
}
- guc->submission_state.enabled = true;
-
- return 0;
+ return xe_guc_submit_enable(guc);
}
int xe_guc_reset(struct xe_guc *guc)
@@ -1579,7 +1577,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
- guc->submission_state.enabled = false;
+ xe_guc_submit_disable(guc);
}
int xe_guc_reset_prepare(struct xe_guc *guc)
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3f421e2adc0..c30c0e3ccbbb 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
- /** @fini_async: do final fini async from this worker */
- struct work_struct fini_async;
+ /** @destroy_async: do final destroy async from this worker */
+ struct work_struct destroy_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index cafb47711e9b..0104afbc941c 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -32,6 +32,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -316,6 +317,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
+/*
+ * Given that we want to guarantee enough RCS throughput to avoid missing
+ * frames, we set the yield policy to 20% of each 80ms interval.
+ */
+#define RC_YIELD_DURATION 80 /* in ms */
+#define RC_YIELD_RATIO 20 /* in percent */
+static u32 *emit_render_compute_yield_klv(u32 *emit)
+{
+ *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
+ *emit++ = RC_YIELD_DURATION;
+ *emit++ = RC_YIELD_RATIO;
+
+ return emit;
+}
+
+#define SCHEDULING_POLICY_MAX_DWORDS 16
+static int guc_init_global_schedule_policy(struct xe_guc *guc)
+{
+ u32 data[SCHEDULING_POLICY_MAX_DWORDS];
+ u32 *emit = data;
+ u32 count = 0;
+ int ret;
+
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
+ return 0;
+
+ *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+
+ if (CCS_MASK(guc_to_gt(guc)))
+ emit = emit_render_compute_yield_klv(emit);
+
+ count = emit - data;
+ if (count > 1) {
+ xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
+
+ ret = xe_guc_ct_send_block(&guc->ct, data, count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC sheduling policies: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int xe_guc_submit_enable(struct xe_guc *guc)
+{
+ int ret;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ return ret;
+
+ guc->submission_state.enabled = true;
+
+ return 0;
+}
+
+void xe_guc_submit_disable(struct xe_guc *guc)
+{
+ guc->submission_state.enabled = false;
+}
+
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
@@ -1277,48 +1343,57 @@ rearm:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
-static void __guc_exec_queue_fini_async(struct work_struct *w)
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_guc_exec_queue *ge = q->guc;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
+}
+
+static void __guc_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
- container_of(w, struct xe_guc_exec_queue, fini_async);
+ container_of(w, struct xe_guc_exec_queue, destroy_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
- release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- xe_sched_entity_fini(&ge->entity);
- xe_sched_fini(&ge->sched);
- /*
- * RCU free due sched being exported via DRM scheduler fences
- * (timeline name).
- */
- kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
+
xe_pm_runtime_put(guc_to_xe(guc));
}
-static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
- INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+ INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
- __guc_exec_queue_fini_async(&q->guc->fini_async);
+ __guc_exec_queue_destroy_async(&q->guc->destroy_async);
else
- queue_work(xe->destroy_wq, &q->guc->fini_async);
+ queue_work(xe->destroy_wq, &q->guc->destroy_async);
}
-static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
{
/*
* Might be done from within the GPU scheduler, need to do async as we
@@ -1327,7 +1402,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
* this we and don't really care when everything is fini'd, just that it
* is.
*/
- guc_exec_queue_fini_async(q);
+ guc_exec_queue_destroy_async(q);
}
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@@ -1341,7 +1416,7 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
if (exec_queue_registered(q))
disable_scheduling_deregister(guc, q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@@ -1574,14 +1649,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
#define STATIC_MSG_CLEANUP 0
#define STATIC_MSG_SUSPEND 1
#define STATIC_MSG_RESUME 2
-static void guc_exec_queue_fini(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
- __guc_exec_queue_fini(exec_queue_to_guc(q), q);
+ __guc_exec_queue_destroy(exec_queue_to_guc(q), q);
}
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -1711,6 +1786,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
+ .destroy = guc_exec_queue_destroy,
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@@ -1732,7 +1808,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else if (exec_queue_destroyed(q))
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
if (q->guc->suspend_pending) {
set_exec_queue_suspended(q);
@@ -1989,7 +2065,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 9b71a986c6ca..0d126b807c10 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -13,6 +13,8 @@ struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
+int xe_guc_submit_enable(struct xe_guc *guc);
+void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index c17ed1ae8649..c5b63e10bb91 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val = 0, min, max;
+ u32 reg_val = 0;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
/* Check if PL limits are disabled. */
if (!(reg_val & PWR_LIM_EN)) {
*value = PL_DISABLE;
- drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
- *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+ *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power;
/* For platforms with mailbox power limit support clamping would be done by pcode. */
if (!hwmon->xe->info.has_mbx_power_limits) {
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ u64 pkg_pwr, min, max;
+
+ pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
+ max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
if (min && max)
@@ -493,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- u32 x, y, x_w = 2; /* 2 bits */
- u64 r, tau4, out;
+ u32 reg_val, x, y, x_w = 2; /* 2 bits */
+ u64 tau4, out;
int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
@@ -505,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &reg_val);
if (ret) {
drm_err(&hwmon->xe->drm,
- "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
- channel, power_attr, r, ret);
- r = 0;
+ "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
+ channel, power_attr, reg_val, ret);
+ reg_val = 0;
}
} else {
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel));
}
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
- y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
/*
* tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
index 61b0a1531a53..2cfe9eb67391 100644
--- a/drivers/gpu/drm/xe/xe_nvm.c
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -35,6 +35,10 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
static void xe_nvm_release_dev(struct device *dev)
{
+ struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+ struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
+
+ kfree(nvm);
}
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
@@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe)
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
- kfree(nvm);
xe->nvm = NULL;
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index b804234a6551..9e1236a9ec67 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
kt->tile = tile;
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
- if (err) {
- kobject_put(&kt->base);
- return err;
- }
+ if (err)
+ goto err_object;
tile->sysfs = &kt->base;
err = xe_vram_freq_sysfs_init(tile);
if (err)
- return err;
+ goto err_object;
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
+
+err_object:
+ kobject_put(&kt->base);
+ return err;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index dc4f61e56579..5146999d27fa 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -240,8 +240,8 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
pfence = xe_preempt_fence_create(q, q->lr.context,
++q->lr.seqno);
- if (!pfence) {
- err = -ENOMEM;
+ if (IS_ERR(pfence)) {
+ err = PTR_ERR(pfence);
goto out_fini;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 79997553d8f9..b934523593d9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -597,8 +597,6 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
- depends on ACPI
- select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 0f2cbae39b2b..7017bfa59093 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
struct amdtp_hid_data *hid_data = hid->driver_data;
struct amdtp_cl_data *cli_data = hid_data->cli_data;
struct request_list *req_list = &cli_data->req_list;
+ struct amd_input_data *in_data = cli_data->in_data;
+ struct amd_mp2_dev *mp2;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->hid_sensor_hubs[i] == hid) {
struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work)
u8 report_id, node_type;
u8 report_size = 0;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
req_node = list_last_entry(&req_list->list, struct request_list, list);
list_del(&req_node->list);
current_index = req_node->current_index;
@@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work)
node_type = req_node->report_type;
kfree(req_node);
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
mp2_ops = mp2->mp2_ops;
if (node_type == HID_FEATURE_REPORT) {
report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
@@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work)
cli_data->cur_hid_dev = current_index;
cli_data->sensor_requested_cnt[current_index] = 0;
amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
+ if (!list_empty(&req_list->list))
+ schedule_delayed_work(&cli_data->work, 0);
}
void amd_sfh_work_buffer(struct work_struct *work)
@@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work)
u8 report_size;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
cli_data->report_id[i], in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index f44a3bb2fbd4..78f830c133e5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -10,6 +10,7 @@
#ifndef AMD_SFH_COMMON_H
#define AMD_SFH_COMMON_H
+#include <linux/mutex.h>
#include <linux/pci.h>
#include "amd_sfh_hid.h"
@@ -59,6 +60,8 @@ struct amd_mp2_dev {
u32 mp2_acs;
struct sfh_dev_status dev_en;
struct work_struct work;
+ /* mp2 to protect data */
+ struct mutex lock;
u8 init_done;
u8 rver;
};
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2983af969579..1d9f955573aa 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -466,6 +466,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
+ rc = devm_mutex_init(&pdev->dev, &privdata->lock);
+ if (rc)
+ return rc;
+
privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
if (privdata->sfh1_1_ops) {
if (boot_cpu_data.x86 >= 0x1A)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index d27dcfb2b9e4..8db9d4e7c3b0 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -974,7 +974,10 @@ static int asus_input_mapping(struct hid_device *hdev,
case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break;
case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break;
case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break;
+ case 0x4e: asus_map_key_clear(KEY_FN_ESC); break;
+ case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break;
+ case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */
case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */
case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */
case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 482f62a78c41..5a95ea3bec98 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -229,10 +229,12 @@ static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
- if (ret < 0)
+ if (ret != CP2112_GPIO_SET_LENGTH) {
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
+ }
- return ret;
+ return 0;
}
static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
@@ -309,9 +311,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set_unlocked(dev, offset, value);
-
- return 0;
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index b3121fa7a72d..654879814f97 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,8 +32,6 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
-#include <linux/platform_profile.h>
-
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
@@ -734,7 +732,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
report_key_event(input, KEY_RFKILL);
return 1;
}
- platform_profile_cycle();
+ report_key_event(input, KEY_PERFORMANCE);
return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index 854926b3cfd4..a2643ae790d6 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -997,6 +997,8 @@ static const struct pci_device_id quicki2c_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ }
};
MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
index d412eafcf9ea..4e60a7de4727 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -13,6 +13,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A
/* Packet size value, the unit is 16 bytes */
#define MAX_PACKET_SIZE_VALUE_LNL 256
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 5e5f179dd113..84314989dc53 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -976,6 +976,8 @@ static const struct pci_device_id quickspi_pci_tbl[] = {
{PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), },
{}
};
MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
index 6fdf674b21c5..f3532d866749 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -19,6 +19,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B
/* HIDSPI special ACPI parameters DSM methods */
#define ACPI_QUICKSPI_REVISION_NUM 2
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 028d9f031dde..8b506417ad2f 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -233,6 +233,7 @@ static u16 get_legacy_obj_type(u16 opcode)
{
switch (opcode) {
case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
return MLX5_EVENT_QUEUE_TYPE_RQ;
case MLX5_CMD_OP_CREATE_QP:
return MLX5_EVENT_QUEUE_TYPE_QP;
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5219d7ddfdaa..95f63c5f6159 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -555,6 +555,7 @@ struct gcr3_tbl_info {
};
struct amd_io_pgtable {
+ seqcount_t seqcount; /* Protects root/mode update */
struct io_pgtable pgtbl;
int mode;
u64 *root;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 8de689b2c5ed..ba9e582a8bbe 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1455,12 +1455,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(e->devid));
devid = e->devid;
- for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias)
+ if (alias) {
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i)
pci_seg->alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
- set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -3067,7 +3067,8 @@ static int __init early_amd_iommu_init(void)
if (!boot_cpu_has(X86_FEATURE_CX16)) {
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index a91e71f981ef..70c2f5b1631b 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
+#include <linux/seqlock.h>
#include <asm/barrier.h>
@@ -130,8 +131,11 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
+ write_seqcount_begin(&pgtable->seqcount);
pgtable->root = pte;
pgtable->mode += 1;
+ write_seqcount_end(&pgtable->seqcount);
+
amd_iommu_update_and_flush_device_table(domain);
pte = NULL;
@@ -153,6 +157,7 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
{
unsigned long last_addr = address + (page_size - 1);
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
+ unsigned int seqcount;
int level, end_lvl;
u64 *pte, *page;
@@ -170,8 +175,14 @@ static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
}
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
+
address = PAGE_SIZE_ALIGN(address, page_size);
end_lvl = PAGE_SIZE_LEVEL(page_size);
@@ -249,6 +260,7 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
unsigned long *page_size)
{
int level;
+ unsigned int seqcount;
u64 *pte;
*page_size = 0;
@@ -256,8 +268,12 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
if (address > PM_LEVEL_SIZE(pgtable->mode))
return NULL;
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ do {
+ seqcount = read_seqcount_begin(&pgtable->seqcount);
+ level = pgtable->mode - 1;
+ pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
+ } while (read_seqcount_retry(&pgtable->seqcount, seqcount));
+
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@@ -541,6 +557,7 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
if (!pgtable->root)
return NULL;
pgtable->mode = PAGE_MODE_3_LEVEL;
+ seqcount_init(&pgtable->seqcount);
cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9c3ab9d9f69a..dff2d895b8ab 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1575,6 +1575,10 @@ static void switch_to_super_page(struct dmar_domain *domain,
unsigned long lvl_pages = lvl_to_nr_pages(level);
struct dma_pte *pte = NULL;
+ if (WARN_ON(!IS_ALIGNED(start_pfn, lvl_pages) ||
+ !IS_ALIGNED(end_pfn + 1, lvl_pages)))
+ return;
+
while (start_pfn <= end_pfn) {
if (!pte)
pte = pfn_to_dma_pte(domain, start_pfn, &level,
@@ -1650,7 +1654,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
unsigned long pages_to_remove;
pteval |= DMA_PTE_LARGE_PAGE;
- pages_to_remove = min_t(unsigned long, nr_pages,
+ pages_to_remove = min_t(unsigned long,
+ round_down(nr_pages, lvl_pages),
nr_pte_to_next_page(pte) * lvl_pages);
end_pfn = iov_pfn + pages_to_remove - 1;
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 65fbd098f9e9..4c842368289f 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -711,6 +711,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&igroup->lock);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
+
/* Caller must destroy hwpt */
return hwpt;
}
@@ -1057,7 +1059,6 @@ void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid)
hwpt = iommufd_hw_pagetable_detach(idev, pasid);
if (!hwpt)
return;
- iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, "IOMMUFD");
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index fc4de63b0bce..e23d9ee4fe38 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -393,12 +393,12 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
const struct file_operations *fops)
{
struct file *filep;
- int fdno;
spin_lock_init(&eventq->lock);
INIT_LIST_HEAD(&eventq->deliver);
init_waitqueue_head(&eventq->wait_queue);
+ /* The filep is fput() by the core code during failure */
filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
if (IS_ERR(filep))
return PTR_ERR(filep);
@@ -408,10 +408,7 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
eventq->filep = filep;
refcount_inc(&eventq->obj.users);
- fdno = get_unused_fd_flags(O_CLOEXEC);
- if (fdno < 0)
- fput(filep);
- return fdno;
+ return get_unused_fd_flags(O_CLOEXEC);
}
static const struct file_operations iommufd_fault_fops =
@@ -452,7 +449,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
return 0;
out_put_fdno:
put_unused_fd(fdno);
- fput(fault->common.filep);
return rc;
}
@@ -536,7 +532,6 @@ int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
out_put_fdno:
put_unused_fd(fdno);
- fput(veventq->common.filep);
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
out_unlock_veventqs:
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0da2a81eedfa..627f9b78483a 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -454,9 +454,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
- lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
-
if (hwpt_paging->auto_domain) {
+ lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
return;
}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 15af7ced0501..ce775fbbae94 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -23,6 +23,7 @@
#include "iommufd_test.h"
struct iommufd_object_ops {
+ size_t file_offset;
void (*pre_destroy)(struct iommufd_object *obj);
void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
@@ -121,6 +122,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
old = xas_store(&xas, NULL);
xa_unlock(&ictx->objects);
WARN_ON(old != XA_ZERO_ENTRY);
+
+ if (WARN_ON(!refcount_dec_and_test(&obj->users)))
+ return;
+
kfree(obj);
}
@@ -131,10 +136,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
- if (iommufd_object_ops[obj->type].abort)
- iommufd_object_ops[obj->type].abort(obj);
+ const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type];
+
+ if (ops->file_offset) {
+ struct file **filep = ((void *)obj) + ops->file_offset;
+
+ /*
+ * A file should hold a users refcount while the file is open
+ * and put it back in its release. The file should hold a
+ * pointer to obj in their private data. Normal fput() is
+ * deferred to a workqueue and can get out of order with the
+ * following kfree(obj). Using the sync version ensures the
+ * release happens immediately. During abort we require the file
+ * refcount is one at this point - meaning the object alloc
+ * function cannot do anything to allow another thread to take a
+ * refcount prior to a guaranteed success.
+ */
+ if (*filep)
+ __fput_sync(*filep);
+ }
+
+ if (ops->abort)
+ ops->abort(obj);
else
- iommufd_object_ops[obj->type].destroy(obj);
+ ops->destroy(obj);
iommufd_object_abort(ictx, obj);
}
@@ -550,16 +575,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_EXEC)
return -EPERM;
+ mtree_lock(&ictx->mt_mmap);
/* vma->vm_pgoff carries a page-shifted start position to an immap */
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
- if (!immap)
+ if (!immap || !refcount_inc_not_zero(&immap->owner->users)) {
+ mtree_unlock(&ictx->mt_mmap);
return -ENXIO;
+ }
+ mtree_unlock(&ictx->mt_mmap);
+
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
- if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
- return -ENXIO;
+ if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
+ rc = -ENXIO;
+ goto err_refcount;
+ }
vma->vm_pgoff = 0;
vma->vm_private_data = immap;
@@ -570,10 +602,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
immap->mmio_addr >> PAGE_SHIFT, length,
vma->vm_page_prot);
if (rc)
- return rc;
+ goto err_refcount;
+ return 0;
- /* vm_ops.open won't be called for mmap itself. */
- refcount_inc(&immap->owner->users);
+err_refcount:
+ refcount_dec(&immap->owner->users);
return rc;
}
@@ -651,6 +684,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
}
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD");
+#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \
+ .file_offset = (offsetof(_struct, _filep) + \
+ BUILD_BUG_ON_ZERO(!__same_type( \
+ struct file *, ((_struct *)NULL)->_filep)) + \
+ BUILD_BUG_ON_ZERO(offsetof(_struct, _obj)))
+
static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_ACCESS] = {
.destroy = iommufd_access_destroy_object,
@@ -661,6 +700,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_FAULT] = {
.destroy = iommufd_fault_destroy,
+ IOMMUFD_FILE_OFFSET(struct iommufd_fault, common.filep, common.obj),
},
[IOMMUFD_OBJ_HW_QUEUE] = {
.destroy = iommufd_hw_queue_destroy,
@@ -683,6 +723,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_VEVENTQ] = {
.destroy = iommufd_veventq_destroy,
.abort = iommufd_veventq_abort,
+ IOMMUFD_FILE_OFFSET(struct iommufd_veventq, common.filep, common.obj),
},
[IOMMUFD_OBJ_VIOMMU] = {
.destroy = iommufd_viommu_destroy,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 9c80d61deb2c..aa576736d60b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
}
}
+static bool reg_ioat_propagate_error(int cc, u8 status)
+{
+ /*
+ * If the device is in the error state the reset routine
+ * will register the IOAT of the newly set domain on re-enable
+ */
+ if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return false;
+ /*
+ * If the device was removed treat registration as success
+ * and let the subsequent error event trigger tear down.
+ */
+ if (cc == ZPCI_CC_INVAL_HANDLE)
+ return false;
+ return cc != ZPCI_CC_OK;
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -696,7 +713,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev->dma_table = s390_domain->dma_table;
zdev_s390_domain_update(zdev, domain);
@@ -1032,7 +1049,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
lockdep_assert_held(&zdev->dom_lock);
- if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
return NULL;
s390_domain = to_s390_domain(zdev->s390_domain);
@@ -1123,12 +1141,7 @@ static int s390_attach_dev_identity(struct iommu_domain *domain,
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
-
- /*
- * If the device is undergoing error recovery the reset code
- * will re-establish the new domain.
- */
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev_s390_domain_update(zdev, domain);
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 5d9b08115375..3e1f165c2d20 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -73,6 +73,7 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f1d8811a542a..419139ad7663 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -382,6 +382,7 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.chunk_sectors = mddev->chunk_sectors;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index bf44878ec640..d30b82beeb92 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3211,6 +3211,7 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b60c30bfb6c7..9832eefb2f15 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4008,6 +4008,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
lim.io_min = mddev->chunk_sectors << 9;
lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 023649fe2476..e385ef1355e8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7732,6 +7732,7 @@ static int raid5_set_limits(struct mddev *mddev)
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index a9e6277789ba..79df2fa89a3f 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -292,7 +292,7 @@ static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 3a1de477e9af..b0f91cc9e40e 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -283,6 +283,8 @@
#define PCIE_GLI_9767_UHS2_CTL2_ZC_VALUE 0xb
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL BIT(6)
#define PCIE_GLI_9767_UHS2_CTL2_ZC_CTL_VALUE 0x1
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN BIT(13)
+#define PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE BIT(14)
#define GLI_MAX_TUNING_LOOP 40
@@ -1179,6 +1181,65 @@ static void gl9767_set_low_power_negotiation(struct pci_dev *pdev, bool enable)
gl9767_vhs_read(pdev);
}
+static void sdhci_gl9767_uhs2_phy_reset(struct sdhci_host *host, bool assert)
+{
+ struct sdhci_pci_slot *slot = sdhci_priv(host);
+ struct pci_dev *pdev = slot->chip->pdev;
+ u32 value, set, clr;
+
+ if (assert) {
+ /* Assert reset, set RESETN and clean RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ } else {
+ /* De-assert reset, clean RESETN and set RESETN_VALUE */
+ set = PCIE_GLI_9767_UHS2_CTL2_FORCE_RESETN_VALUE;
+ clr = PCIE_GLI_9767_UHS2_CTL2_FORCE_PHY_RESETN;
+ }
+
+ gl9767_vhs_write(pdev);
+ pci_read_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, &value);
+ value |= set;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ value &= ~clr;
+ pci_write_config_dword(pdev, PCIE_GLI_9767_UHS2_CTL2, value);
+ gl9767_vhs_read(pdev);
+}
+
+static void __gl9767_uhs2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd)
+{
+ u8 pwr = 0;
+
+ if (mode != MMC_POWER_OFF) {
+ pwr = sdhci_get_vdd_value(vdd);
+ if (!pwr)
+ WARN(1, "%s: Invalid vdd %#x\n",
+ mmc_hostname(host->mmc), vdd);
+ pwr |= SDHCI_VDD2_POWER_180;
+ }
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0) {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ } else {
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
+ pwr |= SDHCI_POWER_ON;
+ sdhci_writeb(host, pwr & 0xf, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+
+ /* Assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, true);
+ pwr |= SDHCI_VDD2_POWER_ON;
+ sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ usleep_range(5000, 6250);
+ }
+}
+
static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -1205,6 +1266,11 @@ static void sdhci_gl9767_set_clock(struct sdhci_host *host, unsigned int clock)
}
sdhci_enable_clk(host, clk);
+
+ if (mmc_card_uhs2(host->mmc))
+ /* De-assert reset */
+ sdhci_gl9767_uhs2_phy_reset(host, false);
+
gl9767_set_low_power_negotiation(pdev, true);
}
@@ -1476,7 +1542,7 @@ static void sdhci_gl9767_set_power(struct sdhci_host *host, unsigned char mode,
gl9767_vhs_read(pdev);
sdhci_gli_overcurrent_event_enable(host, false);
- sdhci_uhs2_set_power(host, mode, vdd);
+ __gl9767_uhs2_set_power(host, mode, vdd);
sdhci_gli_overcurrent_event_enable(host, true);
} else {
gl9767_vhs_write(pdev);
diff --git a/drivers/mmc/host/sdhci-uhs2.c b/drivers/mmc/host/sdhci-uhs2.c
index 0efeb9d0c376..c459a08d01da 100644
--- a/drivers/mmc/host/sdhci-uhs2.c
+++ b/drivers/mmc/host/sdhci-uhs2.c
@@ -295,7 +295,8 @@ static void __sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
else
sdhci_uhs2_set_power(host, ios->power_mode, ios->vdd);
- sdhci_set_clock(host, host->clock);
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
}
static int sdhci_uhs2_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3a17821efa5c..ac7e11f37af7 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2367,23 +2367,6 @@ void sdhci_set_ios_common(struct mmc_host *mmc, struct mmc_ios *ios)
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
-
- if (!ios->clock || ios->clock != host->clock) {
- host->ops->set_clock(host, ios->clock);
- host->clock = ios->clock;
-
- if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
- host->clock) {
- host->timeout_clk = mmc->actual_clock ?
- mmc->actual_clock / 1000 :
- host->clock / 1000;
- mmc->max_busy_timeout =
- host->ops->get_max_timeout_count ?
- host->ops->get_max_timeout_count(host) :
- 1 << 27;
- mmc->max_busy_timeout /= host->timeout_clk;
- }
- }
}
EXPORT_SYMBOL_GPL(sdhci_set_ios_common);
@@ -2410,6 +2393,23 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sdhci_set_ios_common(mmc, ios);
+ if (!ios->clock || ios->clock != host->clock) {
+ host->ops->set_clock(host, ios->clock);
+ host->clock = ios->clock;
+
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
+ host->clock) {
+ host->timeout_clk = mmc->actual_clock ?
+ mmc->actual_clock / 1000 :
+ host->clock / 1000;
+ mmc->max_busy_timeout =
+ host->ops->get_max_timeout_count ?
+ host->ops->get_max_timeout_count(host) :
+ 1 << 27;
+ mmc->max_busy_timeout /= host->timeout_clk;
+ }
+ }
+
if (host->ops->set_power)
host->ops->set_power(host, ios->power_mode, ios->vdd);
else
diff --git a/drivers/pinctrl/mediatek/pinctrl-airoha.c b/drivers/pinctrl/mediatek/pinctrl-airoha.c
index 1b2f132d76f0..b405dfa20891 100644
--- a/drivers/pinctrl/mediatek/pinctrl-airoha.c
+++ b/drivers/pinctrl/mediatek/pinctrl-airoha.c
@@ -108,6 +108,9 @@
#define JTAG_UDI_EN_MASK BIT(4)
#define JTAG_DFD_EN_MASK BIT(3)
+#define REG_FORCE_GPIO_EN 0x0228
+#define FORCE_GPIO_EN(n) BIT(n)
+
/* LED MAP */
#define REG_LAN_LED0_MAPPING 0x027c
#define REG_LAN_LED1_MAPPING 0x0280
@@ -719,16 +722,16 @@ static const struct airoha_pinctrl_func_group mdio_func_group[] = {
.name = "mdio",
.regmap[0] = {
AIROHA_FUNC_MUX,
- REG_GPIO_PON_MODE,
- GPIO_SGMII_MDIO_MODE_MASK,
- GPIO_SGMII_MDIO_MODE_MASK
- },
- .regmap[1] = {
- AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
GPIO_MDC_IO_MASTER_MODE_MODE,
GPIO_MDC_IO_MASTER_MODE_MODE
},
+ .regmap[1] = {
+ AIROHA_FUNC_MUX,
+ REG_FORCE_GPIO_EN,
+ FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2),
+ FORCE_GPIO_EN(1) | FORCE_GPIO_EN(2)
+ },
.regmap_size = 2,
},
};
@@ -1752,8 +1755,8 @@ static const struct airoha_pinctrl_func_group phy1_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1816,8 +1819,8 @@ static const struct airoha_pinctrl_func_group phy2_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1880,8 +1883,8 @@ static const struct airoha_pinctrl_func_group phy3_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
@@ -1944,8 +1947,8 @@ static const struct airoha_pinctrl_func_group phy4_led1_func_group[] = {
.regmap[0] = {
AIROHA_FUNC_MUX,
REG_GPIO_2ND_I2C_MODE,
- GPIO_LAN3_LED0_MODE_MASK,
- GPIO_LAN3_LED0_MODE_MASK
+ GPIO_LAN3_LED1_MODE_MASK,
+ GPIO_LAN3_LED1_MODE_MASK
},
.regmap[1] = {
AIROHA_FUNC_MUX,
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 0006ab3d0789..61c2277c9ce3 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -187,6 +187,7 @@ static const struct genpd_lock_ops genpd_raw_spin_ops = {
#define genpd_is_opp_table_fw(genpd) (genpd->flags & GENPD_FLAG_OPP_TABLE_FW)
#define genpd_is_dev_name_fw(genpd) (genpd->flags & GENPD_FLAG_DEV_NAME_FW)
#define genpd_is_no_sync_state(genpd) (genpd->flags & GENPD_FLAG_NO_SYNC_STATE)
+#define genpd_is_no_stay_on(genpd) (genpd->flags & GENPD_FLAG_NO_STAY_ON)
static inline bool irq_safe_dev_in_sleep_domain(struct device *dev,
const struct generic_pm_domain *genpd)
@@ -1357,7 +1358,6 @@ err_poweroff:
return ret;
}
-#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
static bool pd_ignore_unused;
static int __init pd_ignore_unused_setup(char *__unused)
{
@@ -1382,9 +1382,6 @@ static int __init genpd_power_off_unused(void)
mutex_lock(&gpd_list_lock);
list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
- genpd_lock(genpd);
- genpd->stay_on = false;
- genpd_unlock(genpd);
genpd_queue_power_off_work(genpd);
}
@@ -1393,7 +1390,6 @@ static int __init genpd_power_off_unused(void)
return 0;
}
late_initcall_sync(genpd_power_off_unused);
-#endif
#ifdef CONFIG_PM_SLEEP
@@ -2367,6 +2363,18 @@ static void genpd_lock_init(struct generic_pm_domain *genpd)
}
}
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
+{
+ genpd->stay_on = !genpd_is_no_stay_on(genpd) && !is_off;
+}
+#else
+static void genpd_set_stay_on(struct generic_pm_domain *genpd, bool is_off)
+{
+ genpd->stay_on = false;
+}
+#endif
+
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -2392,7 +2400,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GENPD_STATE_OFF : GENPD_STATE_ON;
- genpd->stay_on = !is_off;
+ genpd_set_stay_on(genpd, is_off);
genpd->sync_state = GENPD_SYNC_STATE_OFF;
genpd->device_count = 0;
genpd->provider = NULL;
diff --git a/drivers/pmdomain/renesas/rcar-gen4-sysc.c b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
index 5aa7fa1df8fe..7434bf42d215 100644
--- a/drivers/pmdomain/renesas/rcar-gen4-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-gen4-sysc.c
@@ -251,6 +251,7 @@ static int __init rcar_gen4_sysc_pd_setup(struct rcar_gen4_sysc_pd *pd)
genpd->detach_dev = cpg_mssr_detach_dev;
}
+ genpd->flags |= GENPD_FLAG_NO_STAY_ON;
genpd->power_off = rcar_gen4_sysc_pd_power_off;
genpd->power_on = rcar_gen4_sysc_pd_power_on;
diff --git a/drivers/pmdomain/renesas/rcar-sysc.c b/drivers/pmdomain/renesas/rcar-sysc.c
index 4b310c1d35fa..d8a8ffcde38d 100644
--- a/drivers/pmdomain/renesas/rcar-sysc.c
+++ b/drivers/pmdomain/renesas/rcar-sysc.c
@@ -241,6 +241,7 @@ static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
}
}
+ genpd->flags |= GENPD_FLAG_NO_STAY_ON;
genpd->power_off = rcar_sysc_pd_power_off;
genpd->power_on = rcar_sysc_pd_power_on;
@@ -342,7 +343,7 @@ struct rcar_pm_domains {
};
static struct genpd_onecell_data *rcar_sysc_onecell_data;
-static struct device_node *rcar_sysc_onecell_np;
+static struct device_node *rcar_sysc_onecell_np __initdata = NULL;
static int __init rcar_sysc_pd_init(void)
{
diff --git a/drivers/pmdomain/renesas/rmobile-sysc.c b/drivers/pmdomain/renesas/rmobile-sysc.c
index 8eedc9a1d825..a6bf7295e909 100644
--- a/drivers/pmdomain/renesas/rmobile-sysc.c
+++ b/drivers/pmdomain/renesas/rmobile-sysc.c
@@ -100,7 +100,8 @@ static void rmobile_init_pm_domain(struct rmobile_pm_domain *rmobile_pd)
struct generic_pm_domain *genpd = &rmobile_pd->genpd;
struct dev_power_governor *gov = rmobile_pd->gov;
- genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
+ genpd->flags |= GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP |
+ GENPD_FLAG_NO_STAY_ON;
genpd->attach_dev = cpg_mstp_attach_dev;
genpd->detach_dev = cpg_mstp_detach_dev;
diff --git a/drivers/pmdomain/rockchip/pm-domains.c b/drivers/pmdomain/rockchip/pm-domains.c
index 242570c505fb..1955c6d453e4 100644
--- a/drivers/pmdomain/rockchip/pm-domains.c
+++ b/drivers/pmdomain/rockchip/pm-domains.c
@@ -865,7 +865,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
pd->genpd.power_on = rockchip_pd_power_on;
pd->genpd.attach_dev = rockchip_pd_attach_dev;
pd->genpd.detach_dev = rockchip_pd_detach_dev;
- pd->genpd.flags = GENPD_FLAG_PM_CLK;
+ pd->genpd.flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_NO_STAY_ON;
if (pd_info->active_wakeup)
pd->genpd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
pm_genpd_init(&pd->genpd, NULL,
diff --git a/drivers/reset/reset-eyeq.c b/drivers/reset/reset-eyeq.c
index 02d50041048b..2d3998368a1c 100644
--- a/drivers/reset/reset-eyeq.c
+++ b/drivers/reset/reset-eyeq.c
@@ -410,6 +410,13 @@ static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
}
+static void eqr_of_node_put(void *_dev)
+{
+ struct device *dev = _dev;
+
+ of_node_put(dev->of_node);
+}
+
static int eqr_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -428,6 +435,10 @@ static int eqr_probe(struct auxiliary_device *adev,
if (!dev->of_node)
return -ENODEV;
+ ret = devm_add_action_or_reset(dev, eqr_of_node_put, dev);
+ if (ret)
+ return ret;
+
/*
* Using our newfound OF node, we can get match data. We cannot use
* device_get_match_data() because it does not match reused OF nodes.
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 1e50675772fe..cc88aaa106da 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -243,7 +243,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
&hwq->sqe_dma_addr,
GFP_KERNEL);
- if (!hwq->sqe_dma_addr) {
+ if (!hwq->sqe_base_addr) {
dev_err(hba->dev, "SQE allocation failed\n");
return -ENOMEM;
}
@@ -252,7 +252,7 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
&hwq->cqe_dma_addr,
GFP_KERNEL);
- if (!hwq->cqe_dma_addr) {
+ if (!hwq->cqe_base_addr) {
dev_err(hba->dev, "CQE allocation failed\n");
return -ENOMEM;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index c6508fe0d5c8..35ded4330431 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -765,11 +765,11 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int err;
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
- bool busyloop_intr;
bool in_order = vhost_has_feature(vq, VIRTIO_F_IN_ORDER);
do {
- busyloop_intr = false;
+ bool busyloop_intr = false;
+
if (nvq->done_idx == VHOST_NET_BATCH)
vhost_tx_batch(net, nvq, sock, &msg);
@@ -780,10 +780,18 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- /* Kicks are disabled at this point, break loop and
- * process any remaining batched packets. Queue will
- * be re-enabled afterwards.
+ /* Flush batched packets to handle pending RX
+ * work (if busyloop_intr is set) and to avoid
+ * unnecessary virtqueue kicks.
*/
+ vhost_tx_batch(net, nvq, sock, &msg);
+ if (unlikely(busyloop_intr)) {
+ vhost_poll_queue(&vq->poll);
+ } else if (unlikely(vhost_enable_notify(&net->dev,
+ vq))) {
+ vhost_disable_notify(&net->dev, vq);
+ continue;
+ }
break;
}
@@ -839,22 +847,7 @@ done:
++nvq->done_idx;
} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
- /* Kicks are still disabled, dispatch any remaining batched msgs. */
vhost_tx_batch(net, nvq, sock, &msg);
-
- if (unlikely(busyloop_intr))
- /* If interrupted while doing busy polling, requeue the
- * handler to be fair handle_rx as well as other tasks
- * waiting on cpu.
- */
- vhost_poll_queue(&vq->poll);
- else
- /* All of our work has been completed; however, before
- * leaving the TX handler, do one last check for work,
- * and requeue handler if necessary. If there is no work,
- * queue will be reenabled.
- */
- vhost_net_busy_poll_try_queue(net, vq);
}
static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
@@ -1014,7 +1007,7 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
}
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
- bool *busyloop_intr, unsigned int count)
+ bool *busyloop_intr, unsigned int *count)
{
struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
@@ -1024,7 +1017,8 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
if (!len && rvq->busyloop_timeout) {
/* Flush batched heads first */
- vhost_net_signal_used(rnvq, count);
+ vhost_net_signal_used(rnvq, *count);
+ *count = 0;
/* Both tx vq and rx socket were polled here */
vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
@@ -1180,7 +1174,7 @@ static void handle_rx(struct vhost_net *net)
do {
sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr, count);
+ &busyloop_intr, &count);
if (!sock_len)
break;
sock_len += sock_hlen;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index abf51332a5c5..98e4f68f4e3c 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -2884,7 +2884,7 @@ vhost_scsi_make_tport(struct target_fabric_configfs *tf,
check_len:
if (strlen(name) >= VHOST_SCSI_NAMELEN) {
pr_err("Emulated %s Address: %s, exceeds"
- " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+ " max: %d\n", vhost_scsi_dump_proto_id(tport), name,
VHOST_SCSI_NAMELEN);
kfree(tport);
return ERR_PTR(-EINVAL);