diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/vmw_balloon.c | 8 | ||||
-rw-r--r-- | drivers/platform/mellanox/mlxbf-pmc.c | 1 | ||||
-rw-r--r-- | drivers/platform/x86/dell/alienware-wmi-wmax.c | 12 | ||||
-rw-r--r-- | drivers/scsi/libfc/fc_fcp.c | 2 | ||||
-rw-r--r-- | drivers/scsi/qla4xxx/ql4_os.c | 8 | ||||
-rw-r--r-- | drivers/scsi/storvsc_drv.c | 96 |
6 files changed, 64 insertions, 63 deletions
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 6df51ee8db62..cc1d18b3df5c 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1737,7 +1737,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, { unsigned long status, flags; struct vmballoon *b; - int ret; + int ret = 0; b = container_of(b_dev_info, struct vmballoon, b_dev_info); @@ -1796,17 +1796,15 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * A failure happened. While we can deflate the page we just * inflated, this deflation can also encounter an error. Instead * we will decrease the size of the balloon to reflect the - * change and report failure. + * change. */ atomic64_dec(&b->size); - ret = -EBUSY; } else { /* * Success. Take a reference for the page, and we will add it to * the list after acquiring the lock. */ get_page(newpage); - ret = 0; } /* Update the balloon list under the @pages_lock */ @@ -1817,7 +1815,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * If we succeed just insert it to the list and update the statistics * under the lock. */ - if (!ret) { + if (status == VMW_BALLOON_SUCCESS) { balloon_page_insert(&b->b_dev_info, newpage); __count_vm_event(BALLOON_MIGRATE); } diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index 4776013e0764..16a2fd9fdd9b 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -2015,6 +2015,7 @@ static int mlxbf_pmc_init_perftype_counter(struct device *dev, unsigned int blk_ if (pmc->block[blk_num].type == MLXBF_PMC_TYPE_CRSPACE) { /* Program crspace counters to count clock cycles using "count_clock" sysfs */ attr = &pmc->block[blk_num].attr_count_clock; + sysfs_attr_init(&attr->dev_attr.attr); attr->dev_attr.attr.mode = 0644; attr->dev_attr.show = mlxbf_pmc_count_clock_show; attr->dev_attr.store = mlxbf_pmc_count_clock_store; diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c index 31f9643a6a3b..f417dcc9af35 100644 --- a/drivers/platform/x86/dell/alienware-wmi-wmax.c +++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c @@ -210,6 +210,14 @@ static const struct dmi_system_id awcc_dmi_table[] __initconst = { .driver_data = &g_series_quirks, }, { + .ident = "Dell Inc. G15 5530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5530"), + }, + .driver_data = &g_series_quirks, + }, + { .ident = "Dell Inc. G16 7630", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -1639,7 +1647,7 @@ static int wmax_wmi_probe(struct wmi_device *wdev, const void *context) static int wmax_wmi_suspend(struct device *dev) { - if (awcc->hwmon) + if (awcc && awcc->hwmon) awcc_hwmon_suspend(dev); return 0; @@ -1647,7 +1655,7 @@ static int wmax_wmi_suspend(struct device *dev) static int wmax_wmi_resume(struct device *dev) { - if (awcc->hwmon) + if (awcc && awcc->hwmon) awcc_hwmon_resume(dev); return 0; diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 16d0f02af1e4..31d08c115521 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -503,7 +503,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp) host_bcode = FC_ERROR; goto err; } - if (offset + len > fsp->data_len) { + if (size_add(offset, len) > fsp->data_len) { /* this should never happen */ if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) && fc_frame_crc_check(fp)) diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index a761c0aa5127..83ff66f954e6 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -4104,7 +4104,7 @@ void qla4xxx_srb_compl(struct kref *ref) * The mid-level driver tries to ensure that queuecommand never gets * invoked concurrently with itself or the interrupt handler (although * the interrupt handler may call this routine as part of request- - * completion handling). Unfortunely, it sometimes calls the scheduler + * completion handling). Unfortunately, it sometimes calls the scheduler * in interrupt context which is a big NO! NO!. **/ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) @@ -4647,7 +4647,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) cmd = scsi_host_find_tag(ha->host, index); /* * We cannot just check if the index is valid, - * becase if we are run from the scsi eh, then + * because if we are run from the scsi eh, then * the scsi/block layer is going to prevent * the tag from being released. */ @@ -4952,7 +4952,7 @@ recover_ha_init_adapter: /* Upon successful firmware/chip reset, re-initialize the adapter */ if (status == QLA_SUCCESS) { /* For ISP-4xxx, force function 1 to always initialize - * before function 3 to prevent both funcions from + * before function 3 to prevent both functions from * stepping on top of the other */ if (is_qla40XX(ha) && (ha->mac_index == 3)) ssleep(6); @@ -6914,7 +6914,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry = NULL; /* Create session object, with INVALID_ENTRY, - * the targer_id would get set when we issue the login + * the target_id would get set when we issue the login */ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host, cmds_max, sizeof(struct ddb_entry), diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 567f9cd29102..6e4112143c76 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device, } /* - * Our channel array is sparsley populated and we + * Our channel array could be sparsley populated and we * initiated I/O on a processor/hw-q that does not * currently have a designated channel. Fix this. * The strategy is simple: - * I. Ensure NUMA locality - * II. Distribute evenly (best effort) + * I. Prefer the channel associated with the current CPU + * II. Ensure NUMA locality + * III. Distribute evenly (best effort) */ + /* Prefer the channel on the I/O issuing processor/hw-q */ + if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus)) + return stor_device->stor_chns[q_num]; + node_mask = cpumask_of_node(cpu_to_node(q_num)); num_channels = 0; @@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device, /* See storvsc_change_target_cpu(). */ outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]); if (outgoing_channel != NULL) { - if (outgoing_channel->target_cpu == q_num) { - /* - * Ideally, we want to pick a different channel if - * available on the same NUMA node. - */ - node_mask = cpumask_of_node(cpu_to_node(q_num)); - for_each_cpu_wrap(tgt_cpu, - &stor_device->alloced_cpus, q_num + 1) { - if (!cpumask_test_cpu(tgt_cpu, node_mask)) - continue; - if (tgt_cpu == q_num) - continue; - channel = READ_ONCE( - stor_device->stor_chns[tgt_cpu]); - if (channel == NULL) - continue; - if (hv_get_avail_to_write_percent( - &channel->outbound) - > ring_avail_percent_lowater) { - outgoing_channel = channel; - goto found_channel; - } - } + if (hv_get_avail_to_write_percent(&outgoing_channel->outbound) + > ring_avail_percent_lowater) + goto found_channel; - /* - * All the other channels on the same NUMA node are - * busy. Try to use the channel on the current CPU - */ - if (hv_get_avail_to_write_percent( - &outgoing_channel->outbound) - > ring_avail_percent_lowater) + /* + * Channel is busy, try to find a channel on the same NUMA node + */ + node_mask = cpumask_of_node(cpu_to_node(q_num)); + for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, + q_num + 1) { + if (!cpumask_test_cpu(tgt_cpu, node_mask)) + continue; + channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); + if (!channel) + continue; + if (hv_get_avail_to_write_percent(&channel->outbound) + > ring_avail_percent_lowater) { + outgoing_channel = channel; goto found_channel; + } + } - /* - * If we reach here, all the channels on the current - * NUMA node are busy. Try to find a channel in - * other NUMA nodes - */ - for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) { - if (cpumask_test_cpu(tgt_cpu, node_mask)) - continue; - channel = READ_ONCE( - stor_device->stor_chns[tgt_cpu]); - if (channel == NULL) - continue; - if (hv_get_avail_to_write_percent( - &channel->outbound) - > ring_avail_percent_lowater) { - outgoing_channel = channel; - goto found_channel; - } + /* + * If we reach here, all the channels on the current + * NUMA node are busy. Try to find a channel in + * all NUMA nodes + */ + for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus, + q_num + 1) { + channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]); + if (!channel) + continue; + if (hv_get_avail_to_write_percent(&channel->outbound) + > ring_avail_percent_lowater) { + outgoing_channel = channel; + goto found_channel; } } + /* + * If we reach here, all the channels are busy. Use the + * original channel found. + */ } else { spin_lock_irqsave(&stor_device->lock, flags); outgoing_channel = stor_device->stor_chns[q_num]; |