diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /drivers/scsi/scsi_lib.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 32 | 
1 files changed, 8 insertions, 24 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index e9b4f279d29c..41e9ac9fc138 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -265,7 +265,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  	struct scsi_request *rq;  	int ret = DRIVER_ERROR << 24; -	req = blk_get_request_flags(sdev->request_queue, +	req = blk_get_request(sdev->request_queue,  			data_direction == DMA_TO_DEVICE ?  			REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);  	if (IS_ERR(req)) @@ -273,7 +273,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  	rq = scsi_req(req);  	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req, -					buffer, bufflen, __GFP_RECLAIM)) +					buffer, bufflen, GFP_NOIO))  		goto out;  	rq->cmd_len = COMMAND_SIZE(cmd[0]); @@ -985,6 +985,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  				case 0x08: /* Long write in progress */  				case 0x09: /* self test in progress */  				case 0x14: /* space allocation in progress */ +				case 0x1a: /* start stop unit in progress */ +				case 0x1b: /* sanitize in progress */ +				case 0x1d: /* configuration in progress */ +				case 0x24: /* depopulation in progress */  					action = ACTION_DELAYED_RETRY;  					break;  				default: @@ -2149,27 +2153,6 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)  	return blk_mq_map_queues(set);  } -static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) -{ -	struct device *host_dev; -	u64 bounce_limit = 0xffffffff; - -	if (shost->unchecked_isa_dma) -		return BLK_BOUNCE_ISA; -	/* -	 * Platforms with virtual-DMA translation -	 * hardware have no practical limit. -	 */ -	if (!PCI_DMA_BUS_IS_PHYS) -		return BLK_BOUNCE_ANY; - -	host_dev = scsi_get_device(shost); -	if (host_dev && host_dev->dma_mask) -		bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; - -	return bounce_limit; -} -  void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)  {  	struct device *dev = shost->dma_dev; @@ -2189,7 +2172,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)  	}  	blk_queue_max_hw_sectors(q, shost->max_sectors); -	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); +	if (shost->unchecked_isa_dma) +		blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);  	blk_queue_segment_boundary(q, shost->dma_boundary);  	dma_set_seg_boundary(dev, shost->dma_boundary);  | 
