mirror of https://github.com/torvalds/linux.git
Merge branch '6.18/scsi-queue' into 6.18/scsi-fixes
Pull in outstanding SCSI fixes for 6.18. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
4827790660
|
|
@ -24,6 +24,10 @@ properties:
|
||||||
- enum:
|
- enum:
|
||||||
- qcom,qcs8300-qmp-ufs-phy
|
- qcom,qcs8300-qmp-ufs-phy
|
||||||
- const: qcom,sa8775p-qmp-ufs-phy
|
- const: qcom,sa8775p-qmp-ufs-phy
|
||||||
|
- items:
|
||||||
|
- enum:
|
||||||
|
- qcom,kaanapali-qmp-ufs-phy
|
||||||
|
- const: qcom,sm8750-qmp-ufs-phy
|
||||||
- enum:
|
- enum:
|
||||||
- qcom,msm8996-qmp-ufs-phy
|
- qcom,msm8996-qmp-ufs-phy
|
||||||
- qcom,msm8998-qmp-ufs-phy
|
- qcom,msm8998-qmp-ufs-phy
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ select:
|
||||||
compatible:
|
compatible:
|
||||||
contains:
|
contains:
|
||||||
enum:
|
enum:
|
||||||
|
- qcom,kaanapali-ufshc
|
||||||
- qcom,sm8650-ufshc
|
- qcom,sm8650-ufshc
|
||||||
- qcom,sm8750-ufshc
|
- qcom,sm8750-ufshc
|
||||||
required:
|
required:
|
||||||
|
|
@ -24,6 +25,7 @@ properties:
|
||||||
compatible:
|
compatible:
|
||||||
items:
|
items:
|
||||||
- enum:
|
- enum:
|
||||||
|
- qcom,kaanapali-ufshc
|
||||||
- qcom,sm8650-ufshc
|
- qcom,sm8650-ufshc
|
||||||
- qcom,sm8750-ufshc
|
- qcom,sm8750-ufshc
|
||||||
- const: qcom,ufshc
|
- const: qcom,ufshc
|
||||||
|
|
|
||||||
|
|
@ -503,7 +503,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
||||||
host_bcode = FC_ERROR;
|
host_bcode = FC_ERROR;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
if (offset + len > fsp->data_len) {
|
if (size_add(offset, len) > fsp->data_len) {
|
||||||
/* this should never happen */
|
/* this should never happen */
|
||||||
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
|
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
|
||||||
fc_frame_crc_check(fp))
|
fc_frame_crc_check(fp))
|
||||||
|
|
|
||||||
|
|
@ -4104,7 +4104,7 @@ void qla4xxx_srb_compl(struct kref *ref)
|
||||||
* The mid-level driver tries to ensure that queuecommand never gets
|
* The mid-level driver tries to ensure that queuecommand never gets
|
||||||
* invoked concurrently with itself or the interrupt handler (although
|
* invoked concurrently with itself or the interrupt handler (although
|
||||||
* the interrupt handler may call this routine as part of request-
|
* the interrupt handler may call this routine as part of request-
|
||||||
* completion handling). Unfortunely, it sometimes calls the scheduler
|
* completion handling). Unfortunately, it sometimes calls the scheduler
|
||||||
* in interrupt context which is a big NO! NO!.
|
* in interrupt context which is a big NO! NO!.
|
||||||
**/
|
**/
|
||||||
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
||||||
|
|
@ -4647,7 +4647,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
|
||||||
cmd = scsi_host_find_tag(ha->host, index);
|
cmd = scsi_host_find_tag(ha->host, index);
|
||||||
/*
|
/*
|
||||||
* We cannot just check if the index is valid,
|
* We cannot just check if the index is valid,
|
||||||
* becase if we are run from the scsi eh, then
|
* because if we are run from the scsi eh, then
|
||||||
* the scsi/block layer is going to prevent
|
* the scsi/block layer is going to prevent
|
||||||
* the tag from being released.
|
* the tag from being released.
|
||||||
*/
|
*/
|
||||||
|
|
@ -4952,7 +4952,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
|
||||||
/* Upon successful firmware/chip reset, re-initialize the adapter */
|
/* Upon successful firmware/chip reset, re-initialize the adapter */
|
||||||
if (status == QLA_SUCCESS) {
|
if (status == QLA_SUCCESS) {
|
||||||
/* For ISP-4xxx, force function 1 to always initialize
|
/* For ISP-4xxx, force function 1 to always initialize
|
||||||
* before function 3 to prevent both funcions from
|
* before function 3 to prevent both functions from
|
||||||
* stepping on top of the other */
|
* stepping on top of the other */
|
||||||
if (is_qla40XX(ha) && (ha->mac_index == 3))
|
if (is_qla40XX(ha) && (ha->mac_index == 3))
|
||||||
ssleep(6);
|
ssleep(6);
|
||||||
|
|
@ -6914,7 +6914,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
|
||||||
struct ddb_entry *ddb_entry = NULL;
|
struct ddb_entry *ddb_entry = NULL;
|
||||||
|
|
||||||
/* Create session object, with INVALID_ENTRY,
|
/* Create session object, with INVALID_ENTRY,
|
||||||
* the targer_id would get set when we issue the login
|
* the target_id would get set when we issue the login
|
||||||
*/
|
*/
|
||||||
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
|
cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
|
||||||
cmds_max, sizeof(struct ddb_entry),
|
cmds_max, sizeof(struct ddb_entry),
|
||||||
|
|
|
||||||
|
|
@ -1406,14 +1406,19 @@ static struct vmbus_channel *get_og_chn(struct storvsc_device *stor_device,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Our channel array is sparsley populated and we
|
* Our channel array could be sparsley populated and we
|
||||||
* initiated I/O on a processor/hw-q that does not
|
* initiated I/O on a processor/hw-q that does not
|
||||||
* currently have a designated channel. Fix this.
|
* currently have a designated channel. Fix this.
|
||||||
* The strategy is simple:
|
* The strategy is simple:
|
||||||
* I. Ensure NUMA locality
|
* I. Prefer the channel associated with the current CPU
|
||||||
* II. Distribute evenly (best effort)
|
* II. Ensure NUMA locality
|
||||||
|
* III. Distribute evenly (best effort)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/* Prefer the channel on the I/O issuing processor/hw-q */
|
||||||
|
if (cpumask_test_cpu(q_num, &stor_device->alloced_cpus))
|
||||||
|
return stor_device->stor_chns[q_num];
|
||||||
|
|
||||||
node_mask = cpumask_of_node(cpu_to_node(q_num));
|
node_mask = cpumask_of_node(cpu_to_node(q_num));
|
||||||
|
|
||||||
num_channels = 0;
|
num_channels = 0;
|
||||||
|
|
@ -1469,59 +1474,48 @@ static int storvsc_do_io(struct hv_device *device,
|
||||||
/* See storvsc_change_target_cpu(). */
|
/* See storvsc_change_target_cpu(). */
|
||||||
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
|
outgoing_channel = READ_ONCE(stor_device->stor_chns[q_num]);
|
||||||
if (outgoing_channel != NULL) {
|
if (outgoing_channel != NULL) {
|
||||||
if (outgoing_channel->target_cpu == q_num) {
|
if (hv_get_avail_to_write_percent(&outgoing_channel->outbound)
|
||||||
/*
|
> ring_avail_percent_lowater)
|
||||||
* Ideally, we want to pick a different channel if
|
goto found_channel;
|
||||||
* available on the same NUMA node.
|
|
||||||
*/
|
|
||||||
node_mask = cpumask_of_node(cpu_to_node(q_num));
|
|
||||||
for_each_cpu_wrap(tgt_cpu,
|
|
||||||
&stor_device->alloced_cpus, q_num + 1) {
|
|
||||||
if (!cpumask_test_cpu(tgt_cpu, node_mask))
|
|
||||||
continue;
|
|
||||||
if (tgt_cpu == q_num)
|
|
||||||
continue;
|
|
||||||
channel = READ_ONCE(
|
|
||||||
stor_device->stor_chns[tgt_cpu]);
|
|
||||||
if (channel == NULL)
|
|
||||||
continue;
|
|
||||||
if (hv_get_avail_to_write_percent(
|
|
||||||
&channel->outbound)
|
|
||||||
> ring_avail_percent_lowater) {
|
|
||||||
outgoing_channel = channel;
|
|
||||||
goto found_channel;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All the other channels on the same NUMA node are
|
* Channel is busy, try to find a channel on the same NUMA node
|
||||||
* busy. Try to use the channel on the current CPU
|
*/
|
||||||
*/
|
node_mask = cpumask_of_node(cpu_to_node(q_num));
|
||||||
if (hv_get_avail_to_write_percent(
|
for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
|
||||||
&outgoing_channel->outbound)
|
q_num + 1) {
|
||||||
> ring_avail_percent_lowater)
|
if (!cpumask_test_cpu(tgt_cpu, node_mask))
|
||||||
|
continue;
|
||||||
|
channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
|
||||||
|
if (!channel)
|
||||||
|
continue;
|
||||||
|
if (hv_get_avail_to_write_percent(&channel->outbound)
|
||||||
|
> ring_avail_percent_lowater) {
|
||||||
|
outgoing_channel = channel;
|
||||||
goto found_channel;
|
goto found_channel;
|
||||||
|
|
||||||
/*
|
|
||||||
* If we reach here, all the channels on the current
|
|
||||||
* NUMA node are busy. Try to find a channel in
|
|
||||||
* other NUMA nodes
|
|
||||||
*/
|
|
||||||
for_each_cpu(tgt_cpu, &stor_device->alloced_cpus) {
|
|
||||||
if (cpumask_test_cpu(tgt_cpu, node_mask))
|
|
||||||
continue;
|
|
||||||
channel = READ_ONCE(
|
|
||||||
stor_device->stor_chns[tgt_cpu]);
|
|
||||||
if (channel == NULL)
|
|
||||||
continue;
|
|
||||||
if (hv_get_avail_to_write_percent(
|
|
||||||
&channel->outbound)
|
|
||||||
> ring_avail_percent_lowater) {
|
|
||||||
outgoing_channel = channel;
|
|
||||||
goto found_channel;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we reach here, all the channels on the current
|
||||||
|
* NUMA node are busy. Try to find a channel in
|
||||||
|
* all NUMA nodes
|
||||||
|
*/
|
||||||
|
for_each_cpu_wrap(tgt_cpu, &stor_device->alloced_cpus,
|
||||||
|
q_num + 1) {
|
||||||
|
channel = READ_ONCE(stor_device->stor_chns[tgt_cpu]);
|
||||||
|
if (!channel)
|
||||||
|
continue;
|
||||||
|
if (hv_get_avail_to_write_percent(&channel->outbound)
|
||||||
|
> ring_avail_percent_lowater) {
|
||||||
|
outgoing_channel = channel;
|
||||||
|
goto found_channel;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* If we reach here, all the channels are busy. Use the
|
||||||
|
* original channel found.
|
||||||
|
*/
|
||||||
} else {
|
} else {
|
||||||
spin_lock_irqsave(&stor_device->lock, flags);
|
spin_lock_irqsave(&stor_device->lock, flags);
|
||||||
outgoing_channel = stor_device->stor_chns[q_num];
|
outgoing_channel = stor_device->stor_chns[q_num];
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue