summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ide/Config.help29
-rw-r--r--drivers/ide/Config.in5
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/ide-cd.c41
-rw-r--r--drivers/ide/ide-disk.c373
-rw-r--r--drivers/ide/ide-dma.c225
-rw-r--r--drivers/ide/ide-probe.c19
-rw-r--r--drivers/ide/ide-taskfile.c82
-rw-r--r--drivers/ide/ide-tcq.c689
-rw-r--r--drivers/ide/ide.c431
-rw-r--r--drivers/ide/pdc202xx.c6
11 files changed, 314 insertions, 1587 deletions
diff --git a/drivers/ide/Config.help b/drivers/ide/Config.help
index 4896b4470321..4863d212895c 100644
--- a/drivers/ide/Config.help
+++ b/drivers/ide/Config.help
@@ -749,35 +749,6 @@ CONFIG_IDEDMA_ONLYDISK
Generally say N here.
-CONFIG_BLK_DEV_IDE_TCQ
- Support for tagged command queueing on ATA disk drives. This enables
- the IDE layer to have multiple in-flight requests on hardware that
- supports it. For now this includes the IBM Deskstar series drives,
- such as the 22GXP, 75GXP, 40GV, 60GXP, and 120GXP (ie any Deskstar made
- in the last couple of years), and at least some of the Western
- Digital drives in the Expert series.
-
- If you have such a drive, say Y here.
-
-CONFIG_BLK_DEV_IDE_TCQ_DEPTH
- Maximum size of commands to enable per-drive. Any value between 1
- and 32 is valid, with 32 being the maxium that the hardware supports.
-
- You probably just want the default of 32 here. If you enter an invalid
- number, the default value will be used.
-
-CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
- Enabled tagged command queueing unconditionally on drives that report
- support for it. Regardless of the chosen value here, tagging can be
- controlled at run time:
-
- echo "using_tcq:32" > /proc/ide/hdX/settings
-
- where any value between 1-32 selects chosen queue depth and enables
- TCQ, and 0 disables it.
-
- Generally say Y here.
-
CONFIG_BLK_DEV_IT8172
Say Y here to support the on-board IDE controller on the Integrated
Technology Express, Inc. ITE8172 SBC. Vendor page at
diff --git a/drivers/ide/Config.in b/drivers/ide/Config.in
index 9547b3d9cb19..342d76d0073d 100644
--- a/drivers/ide/Config.in
+++ b/drivers/ide/Config.in
@@ -47,11 +47,6 @@ if [ "$CONFIG_BLK_DEV_IDE" != "n" ]; then
dep_bool ' Use PCI DMA by default when available' CONFIG_IDEDMA_PCI_AUTO $CONFIG_BLK_DEV_IDEDMA_PCI
dep_bool ' Enable DMA only for disks ' CONFIG_IDEDMA_ONLYDISK $CONFIG_IDEDMA_PCI_AUTO
define_bool CONFIG_BLK_DEV_IDEDMA $CONFIG_BLK_DEV_IDEDMA_PCI
- dep_bool ' ATA tagged command queueing' CONFIG_BLK_DEV_IDE_TCQ $CONFIG_BLK_DEV_IDEDMA_PCI
- dep_bool ' TCQ on by default' CONFIG_BLK_DEV_IDE_TCQ_DEFAULT $CONFIG_BLK_DEV_IDE_TCQ
- if [ "$CONFIG_BLK_DEV_IDE_TCQ" != "n" ]; then
- int ' Default queue depth' CONFIG_BLK_DEV_IDE_TCQ_DEPTH 32
- fi
dep_bool ' ATA Work(s) In Progress (EXPERIMENTAL)' CONFIG_IDEDMA_PCI_WIP $CONFIG_BLK_DEV_IDEDMA_PCI $CONFIG_EXPERIMENTAL
dep_bool ' Good-Bad DMA Model-Firmware (WIP)' CONFIG_IDEDMA_NEW_DRIVE_LISTINGS $CONFIG_IDEDMA_PCI_WIP
dep_bool ' AEC62XX chipset support' CONFIG_BLK_DEV_AEC62XX $CONFIG_BLK_DEV_IDEDMA_PCI
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 7bf39a68e7c2..3b127d14423a 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -44,7 +44,6 @@ ide-obj-$(CONFIG_BLK_DEV_HPT366) += hpt366.o
ide-obj-$(CONFIG_BLK_DEV_HT6560B) += ht6560b.o
ide-obj-$(CONFIG_BLK_DEV_IDE_ICSIDE) += icside.o
ide-obj-$(CONFIG_BLK_DEV_IDEDMA_PCI) += ide-dma.o
-ide-obj-$(CONFIG_BLK_DEV_IDE_TCQ) += ide-tcq.o
ide-obj-$(CONFIG_BLK_DEV_IDEPCI) += ide-pci.o
ide-obj-$(CONFIG_BLK_DEV_ISAPNP) += ide-pnp.o
ide-obj-$(CONFIG_BLK_DEV_IDE_PMAC) += ide-pmac.o
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 29e0be7635c0..a4eca366dcd1 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -558,10 +558,6 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
if ((rq->flags & REQ_CMD) && !rq->current_nr_sectors)
uptodate = 1;
-#if 0
- /* FIXME --mdcki */
- HWGROUP(drive)->rq->special = NULL;
-#endif
ide_end_request(drive, uptodate);
}
@@ -1217,22 +1213,13 @@ static void restore_request (struct request *rq)
/*
* Start a read request from the CD-ROM.
*/
-static ide_startstop_t cdrom_start_read(struct ata_device *drive, struct ata_request *ar, unsigned int block)
+static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block)
{
struct cdrom_info *info = drive->driver_data;
- struct request *rq = ar->ar_rq;
-
- if (ar->ar_flags & ATA_AR_QUEUED) {
-// spin_lock_irqsave(DRIVE_LOCK(drive), flags);
- blkdev_dequeue_request(rq);
-// spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
- }
-
+ struct request *rq = HWGROUP(drive)->rq;
restore_request(rq);
- rq->special = ar;
-
/* Satisfy whatever we can of this request from our cached sector. */
if (cdrom_read_from_buffer(drive))
return ide_stopped;
@@ -1665,30 +1652,8 @@ ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t bloc
if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap)
action = cdrom_start_seek (drive, block);
else {
- unsigned long flags;
- struct ata_request *ar;
-
- /*
- * get a new command (push ar further down to avoid grabbing lock here
- */
- spin_lock_irqsave(DRIVE_LOCK(drive), flags);
-
- ar = ata_ar_get(drive);
-
- /*
- * we've reached maximum queue depth, bail
- */
- if (!ar) {
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
-
- return ide_started;
- }
-
- ar->ar_rq = rq;
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
-
if (rq_data_dir(rq) == READ)
- action = cdrom_start_read(drive, ar, block);
+ action = cdrom_start_read(drive, block);
else
action = cdrom_start_write(drive, rq);
}
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 4269d14d3453..182fac966e53 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -88,162 +88,135 @@ static int lba_capacity_is_ok(struct hd_driveid *id)
return 0; /* lba_capacity value may be bad */
}
-/*
- * Determine the apriopriate hardware command correspnding to the action in
- * question, depending upon the device capabilities and setup.
- */
static u8 get_command(ide_drive_t *drive, int cmd)
{
int lba48bit = (drive->id->cfs_enable_2 & 0x0400) ? 1 : 0;
- /* Well, calculating the command in this variable may be an
- * overoptimization. */
- u8 command = WIN_NOP;
#if 1
lba48bit = drive->addressing;
#endif
- /*
- * 48-bit commands are pretty sanely laid out
- */
if (lba48bit) {
- command = cmd == READ ? WIN_READ_EXT : WIN_WRITE_EXT;
-
- if (drive->using_dma) {
- command++; /* WIN_*DMA_EXT */
- if (drive->using_tcq)
- command++; /* WIN_*DMA_QUEUED_EXT */
- } else if (drive->mult_count)
- command += 5; /* WIN_MULT*_EXT */
-
- return command;
- }
-
- /*
- * 28-bit commands seem not to be, though...
- */
- if (cmd == READ) {
- if (drive->using_dma) {
- if (drive->using_tcq)
- command = WIN_READDMA_QUEUED;
+ if (cmd == READ) {
+ if (drive->using_dma)
+ return WIN_READDMA_EXT;
+ else if (drive->mult_count)
+ return WIN_MULTREAD_EXT;
else
- command = WIN_READDMA;
- } else if (drive->mult_count)
- command = WIN_MULTREAD;
- else
- command = WIN_READ;
+ return WIN_READ_EXT;
+ } else if (cmd == WRITE) {
+ if (drive->using_dma)
+ return WIN_WRITEDMA_EXT;
+ else if (drive->mult_count)
+ return WIN_MULTWRITE_EXT;
+ else
+ return WIN_WRITE_EXT;
+ }
} else {
- if (drive->using_dma) {
- if (drive->using_tcq)
- command = WIN_WRITEDMA_QUEUED;
+ if (cmd == READ) {
+ if (drive->using_dma)
+ return WIN_READDMA;
+ else if (drive->mult_count)
+ return WIN_MULTREAD;
+ else
+ return WIN_READ;
+ } else if (cmd == WRITE) {
+ if (drive->using_dma)
+ return WIN_WRITEDMA;
+ else if (drive->mult_count)
+ return WIN_MULTWRITE;
else
- command = WIN_WRITEDMA;
- } else if (drive->mult_count)
- command = WIN_MULTWRITE;
- else
- command = WIN_WRITE;
+ return WIN_WRITE;
+ }
}
-
- return command;
+ return WIN_NOP;
}
-static ide_startstop_t chs_do_request(ide_drive_t *drive, struct ata_request *ar, sector_t block)
+static ide_startstop_t chs_do_request(ide_drive_t *drive, struct request *rq, unsigned long block)
{
- struct ata_taskfile *args = &ar->ar_task;
- struct request *rq = ar->ar_rq;
- int sectors = rq->nr_sectors;
+ struct hd_drive_task_hdr taskfile;
+ struct hd_drive_hob_hdr hobfile;
+ struct ata_taskfile args;
+ int sectors;
- unsigned int track = (block / drive->sect);
- unsigned int sect = (block % drive->sect) + 1;
- unsigned int head = (track % drive->head);
- unsigned int cyl = (track / drive->head);
+ unsigned int track = (block / drive->sect);
+ unsigned int sect = (block % drive->sect) + 1;
+ unsigned int head = (track % drive->head);
+ unsigned int cyl = (track / drive->head);
- memset(&args->taskfile, 0, sizeof(struct hd_drive_task_hdr));
- memset(&args->hobfile, 0, sizeof(struct hd_drive_hob_hdr));
+ memset(&taskfile, 0, sizeof(struct hd_drive_task_hdr));
+ memset(&hobfile, 0, sizeof(struct hd_drive_hob_hdr));
+ sectors = rq->nr_sectors;
if (sectors == 256)
sectors = 0;
- if (ar->ar_flags & ATA_AR_QUEUED) {
- unsigned long flags;
+ taskfile.sector_count = sectors;
- args->taskfile.feature = sectors;
- args->taskfile.sector_count = ar->ar_tag << 3;
+ taskfile.sector_number = sect;
+ taskfile.low_cylinder = cyl;
+ taskfile.high_cylinder = (cyl>>8);
- spin_lock_irqsave(DRIVE_LOCK(drive), flags);
- blkdev_dequeue_request(rq);
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
- } else
- args->taskfile.sector_count = sectors;
-
- args->taskfile.sector_number = sect;
- args->taskfile.low_cylinder = cyl;
- args->taskfile.high_cylinder = (cyl>>8);
-
- args->taskfile.device_head = head;
- args->taskfile.device_head |= drive->select.all;
- args->taskfile.command = get_command(drive, rq_data_dir(rq));
+ taskfile.device_head = head;
+ taskfile.device_head |= drive->select.all;
+ taskfile.command = get_command(drive, rq_data_dir(rq));
#ifdef DEBUG
printk("%s: %sing: ", drive->name,
(rq_data_dir(rq)==READ) ? "read" : "writ");
+ if (lba) printk("LBAsect=%lld, ", block);
+ else printk("CHS=%d/%d/%d, ", cyl, head, sect);
printk("sectors=%ld, ", rq->nr_sectors);
- printk("CHS=%d/%d/%d, ", cyl, head, sect);
printk("buffer=0x%08lx\n", (unsigned long) rq->buffer);
#endif
- ide_cmd_type_parser(args);
- args->ar = ar;
- rq->special = ar;
+ args.taskfile = taskfile;
+ args.hobfile = hobfile;
+ ide_cmd_type_parser(&args);
+ rq->special = &args;
- return ata_taskfile(drive, args, rq);
+ return ata_taskfile(drive, &args, rq);
}
-static ide_startstop_t lba28_do_request(ide_drive_t *drive, struct ata_request *ar, sector_t block)
+static ide_startstop_t lba28_do_request(ide_drive_t *drive, struct request *rq, unsigned long block)
{
- struct ata_taskfile *args = &ar->ar_task;
- struct request *rq = ar->ar_rq;
- int sectors = rq->nr_sectors;
+ struct hd_drive_task_hdr taskfile;
+ struct hd_drive_hob_hdr hobfile;
+ struct ata_taskfile args;
+ int sectors;
+ sectors = rq->nr_sectors;
if (sectors == 256)
sectors = 0;
- memset(&args->taskfile, 0, sizeof(struct hd_drive_task_hdr));
- memset(&args->hobfile, 0, sizeof(struct hd_drive_hob_hdr));
-
- if (ar->ar_flags & ATA_AR_QUEUED) {
- unsigned long flags;
+ memset(&taskfile, 0, sizeof(struct hd_drive_task_hdr));
+ memset(&hobfile, 0, sizeof(struct hd_drive_hob_hdr));
- args->taskfile.feature = sectors;
- args->taskfile.sector_count = ar->ar_tag << 3;
+ taskfile.sector_count = sectors;
+ taskfile.sector_number = block;
+ taskfile.low_cylinder = (block >>= 8);
- spin_lock_irqsave(DRIVE_LOCK(drive), flags);
- blkdev_dequeue_request(rq);
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
- } else
- args->taskfile.sector_count = sectors;
+ taskfile.high_cylinder = (block >>= 8);
- args->taskfile.sector_number = block;
- args->taskfile.low_cylinder = (block >>= 8);
-
- args->taskfile.high_cylinder = (block >>= 8);
-
- args->taskfile.device_head = ((block >> 8) & 0x0f);
- args->taskfile.device_head |= drive->select.all;
- args->taskfile.command = get_command(drive, rq_data_dir(rq));
+ taskfile.device_head = ((block >> 8) & 0x0f);
+ taskfile.device_head |= drive->select.all;
+ taskfile.command = get_command(drive, rq_data_dir(rq));
#ifdef DEBUG
printk("%s: %sing: ", drive->name,
(rq_data_dir(rq)==READ) ? "read" : "writ");
- printk("sector=%lx, sectors=%ld, ", block, rq->nr_sectors);
+ if (lba) printk("LBAsect=%lld, ", block);
+ else printk("CHS=%d/%d/%d, ", cyl, head, sect);
+ printk("sectors=%ld, ", rq->nr_sectors);
printk("buffer=0x%08lx\n", (unsigned long) rq->buffer);
#endif
- ide_cmd_type_parser(args);
- args->ar = ar;
- rq->special = ar;
+ args.taskfile = taskfile;
+ args.hobfile = hobfile;
+ ide_cmd_type_parser(&args);
+ rq->special = &args;
- return ata_taskfile(drive, args, rq);
+ return ata_taskfile(drive, &args, rq);
}
/*
@@ -251,58 +224,57 @@ static ide_startstop_t lba28_do_request(ide_drive_t *drive, struct ata_request *
* 320173056 == 163929 MB or 48bit addressing
* 1073741822 == 549756 MB or 48bit addressing fake drive
*/
-static ide_startstop_t lba48_do_request(ide_drive_t *drive, struct ata_request *ar, sector_t block)
+
+static ide_startstop_t lba48_do_request(ide_drive_t *drive, struct request *rq, unsigned long long block)
{
- struct ata_taskfile *args = &ar->ar_task;
- struct request *rq = ar->ar_rq;
- int sectors = rq->nr_sectors;
+ struct hd_drive_task_hdr taskfile;
+ struct hd_drive_hob_hdr hobfile;
+ struct ata_taskfile args;
+ int sectors;
- memset(&args->taskfile, 0, sizeof(struct hd_drive_task_hdr));
- memset(&args->hobfile, 0, sizeof(struct hd_drive_hob_hdr));
+ memset(&taskfile, 0, sizeof(struct hd_drive_task_hdr));
+ memset(&hobfile, 0, sizeof(struct hd_drive_hob_hdr));
+ sectors = rq->nr_sectors;
if (sectors == 65536)
sectors = 0;
- if (ar->ar_flags & ATA_AR_QUEUED) {
- unsigned long flags;
+ taskfile.sector_count = sectors;
+ hobfile.sector_count = sectors >> 8;
- args->taskfile.feature = sectors;
- args->hobfile.feature = sectors >> 8;
- args->taskfile.sector_count = ar->ar_tag << 3;
-
- spin_lock_irqsave(DRIVE_LOCK(drive), flags);
- blkdev_dequeue_request(rq);
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
- } else {
- args->taskfile.sector_count = sectors;
- args->hobfile.sector_count = sectors >> 8;
+ if (rq->nr_sectors == 65536) {
+ taskfile.sector_count = 0x00;
+ hobfile.sector_count = 0x00;
}
- args->taskfile.sector_number = block;
- args->taskfile.low_cylinder = (block >>= 8);
- args->taskfile.high_cylinder = (block >>= 8);
+ taskfile.sector_number = block; /* low lba */
+ taskfile.low_cylinder = (block >>= 8); /* mid lba */
+ taskfile.high_cylinder = (block >>= 8); /* hi lba */
- args->hobfile.sector_number = (block >>= 8);
- args->hobfile.low_cylinder = (block >>= 8);
- args->hobfile.high_cylinder = (block >>= 8);
+ hobfile.sector_number = (block >>= 8); /* low lba */
+ hobfile.low_cylinder = (block >>= 8); /* mid lba */
+ hobfile.high_cylinder = (block >>= 8); /* hi lba */
- args->taskfile.device_head = drive->select.all;
- args->hobfile.device_head = args->taskfile.device_head;
- args->hobfile.control = (drive->ctl|0x80);
- args->taskfile.command = get_command(drive, rq_data_dir(rq));
+ taskfile.device_head = drive->select.all;
+ hobfile.device_head = taskfile.device_head;
+ hobfile.control = (drive->ctl|0x80);
+ taskfile.command = get_command(drive, rq_data_dir(rq));
#ifdef DEBUG
printk("%s: %sing: ", drive->name,
(rq_data_dir(rq)==READ) ? "read" : "writ");
- printk("sector=%lx, sectors=%ld, ", block, rq->nr_sectors);
+ if (lba) printk("LBAsect=%lld, ", block);
+ else printk("CHS=%d/%d/%d, ", cyl, head, sect);
+ printk("sectors=%ld, ", rq->nr_sectors);
printk("buffer=0x%08lx\n", (unsigned long) rq->buffer);
#endif
- ide_cmd_type_parser(args);
- args->ar = ar;
- rq->special = ar;
+ args.taskfile = taskfile;
+ args.hobfile = hobfile;
+ ide_cmd_type_parser(&args);
+ rq->special = &args;
- return ata_taskfile(drive, args, rq);
+ return ata_taskfile(drive, &args, rq);
}
/*
@@ -310,11 +282,8 @@ static ide_startstop_t lba48_do_request(ide_drive_t *drive, struct ata_request *
* otherwise, to address sectors. It also takes care of issuing special
* DRIVE_CMDs.
*/
-static ide_startstop_t idedisk_do_request(ide_drive_t *drive, struct request *rq, sector_t block)
+static ide_startstop_t idedisk_do_request(ide_drive_t *drive, struct request *rq, unsigned long block)
{
- unsigned long flags;
- struct ata_request *ar;
-
/*
* Wait until all request have bin finished.
*/
@@ -336,49 +305,16 @@ static ide_startstop_t idedisk_do_request(ide_drive_t *drive, struct request *rq
return promise_rw_disk(drive, rq, block);
}
- /*
- * get a new command (push ar further down to avoid grabbing lock here
- */
- spin_lock_irqsave(DRIVE_LOCK(drive), flags);
-
- ar = ata_ar_get(drive);
-
- /*
- * we've reached maximum queue depth, bail
- */
- if (!ar) {
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
- return ide_started;
- }
-
- ar->ar_rq = rq;
-
- if (drive->using_tcq) {
- int tag = ide_get_tag(drive);
-
- BUG_ON(drive->tcq->active_tag != -1);
-
- /* Set the tag: */
- ar->ar_flags |= ATA_AR_QUEUED;
- ar->ar_tag = tag;
- drive->tcq->ar[tag] = ar;
- drive->tcq->active_tag = tag;
- ar->ar_time = jiffies;
- drive->tcq->queued++;
- }
-
- spin_unlock_irqrestore(DRIVE_LOCK(drive), flags);
-
/* 48-bit LBA */
if ((drive->id->cfs_enable_2 & 0x0400) && (drive->addressing))
- return lba48_do_request(drive, ar, block);
+ return lba48_do_request(drive, rq, block);
/* 28-bit LBA */
if (drive->select.b.lba)
- return lba28_do_request(drive, ar, block);
+ return lba28_do_request(drive, rq, block);
/* 28-bit CHS */
- return chs_do_request(drive, ar, block);
+ return chs_do_request(drive, rq, block);
}
static int idedisk_open (struct inode *inode, struct file *filp, ide_drive_t *drive)
@@ -861,71 +797,11 @@ static int proc_idedisk_read_smart_values
PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
}
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
-static int proc_idedisk_read_tcq
- (char *page, char **start, off_t off, int count, int *eof, void *data)
-{
- ide_drive_t *drive = (ide_drive_t *) data;
- char *out = page;
- int len, cmds, i;
- unsigned long tag_mask = 0, flags, cur_jif = jiffies, max_jif;
-
- if (!drive->tcq) {
- len = sprintf(out, "not configured\n");
- PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
- }
-
- spin_lock_irqsave(&ide_lock, flags);
-
- len = sprintf(out, "TCQ currently on:\t%s\n", drive->using_tcq ? "yes" : "no");
- len += sprintf(out+len, "Max queue depth:\t%d\n",drive->queue_depth);
- len += sprintf(out+len, "Max achieved depth:\t%d\n",drive->tcq->max_depth);
- len += sprintf(out+len, "Max depth since last:\t%d\n",drive->tcq->max_last_depth);
- len += sprintf(out+len, "Current depth:\t\t%d\n", drive->tcq->queued);
- max_jif = 0;
- len += sprintf(out+len, "Active tags:\t\t[ ");
- for (i = 0, cmds = 0; i < drive->queue_depth; i++) {
- struct ata_request *ar = IDE_GET_AR(drive, i);
-
- if (!ar)
- continue;
-
- __set_bit(i, &tag_mask);
- len += sprintf(out+len, "%d, ", i);
- if (cur_jif - ar->ar_time > max_jif)
- max_jif = cur_jif - ar->ar_time;
- cmds++;
- }
- len += sprintf(out+len, "]\n");
-
- len += sprintf(out+len, "Queue:\t\t\treleased [ %d ] - started [ %d ]\n", drive->tcq->immed_rel, drive->tcq->immed_comp);
-
- if (drive->tcq->queued != cmds)
- len += sprintf(out+len, "pending request and queue count mismatch (counted: %d)\n", cmds);
-
- if (tag_mask != drive->tcq->tag_mask)
- len += sprintf(out+len, "tag masks differ (counted %lx != %lx\n", tag_mask, drive->tcq->tag_mask);
-
- len += sprintf(out+len, "DMA status:\t\t%srunning\n", test_bit(IDE_DMA, &HWGROUP(drive)->flags) ? "" : "not ");
-
- len += sprintf(out+len, "Oldest command:\t\t%lu jiffies\n", max_jif);
- len += sprintf(out+len, "Oldest command ever:\t%lu\n", drive->tcq->oldest_command);
-
- drive->tcq->max_last_depth = 0;
-
- spin_unlock_irqrestore(&ide_lock, flags);
- PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
-}
-#endif
-
static ide_proc_entry_t idedisk_proc[] = {
{ "cache", S_IFREG|S_IRUGO, proc_idedisk_read_cache, NULL },
{ "geometry", S_IFREG|S_IRUGO, proc_ide_read_geometry, NULL },
{ "smart_values", S_IFREG|S_IRUSR, proc_idedisk_read_smart_values, NULL },
{ "smart_thresholds", S_IFREG|S_IRUSR, proc_idedisk_read_smart_thresholds, NULL },
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
- { "tcq", S_IFREG|S_IRUSR, proc_idedisk_read_tcq, NULL },
-#endif
{ NULL, 0, NULL, NULL }
};
@@ -1007,24 +883,6 @@ static int set_acoustic(ide_drive_t *drive, int arg)
return 0;
}
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
-static int set_using_tcq(ide_drive_t *drive, int arg)
-{
- if (!drive->driver)
- return -EPERM;
- if (!drive->channel->dmaproc)
- return -EPERM;
- if (arg == drive->queue_depth && drive->using_tcq)
- return 0;
-
- drive->queue_depth = arg ? arg : 1;
- if (drive->channel->dmaproc(arg ? ide_dma_queued_on : ide_dma_queued_off, drive))
- return -EIO;
-
- return 0;
-}
-#endif
-
static int probe_lba_addressing (ide_drive_t *drive, int arg)
{
drive->addressing = 0;
@@ -1056,9 +914,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
ide_add_setting(drive, "acoustic", SETTING_RW, HDIO_GET_ACOUSTIC, HDIO_SET_ACOUSTIC, TYPE_BYTE, 0, 254, 1, 1, &drive->acoustic, set_acoustic);
ide_add_setting(drive, "failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->failures, NULL);
ide_add_setting(drive, "max_failures", SETTING_RW, -1, -1, TYPE_INT, 0, 65535, 1, 1, &drive->max_failures, NULL);
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
- ide_add_setting(drive, "using_tcq", SETTING_RW, HDIO_GET_QDMA, HDIO_SET_QDMA, TYPE_BYTE, 0, IDE_MAX_TAG, 1, 1, &drive->using_tcq, set_using_tcq);
-#endif
}
static int idedisk_suspend(struct device *dev, u32 state, u32 level)
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index 8f25d626f274..a9fbc59407a7 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -209,36 +209,29 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
__ide_end_request(drive, 1, rq->nr_sectors);
return ide_stopped;
}
- printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
+ printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat);
}
return ide_error(drive, "dma_intr", stat);
}
-int ide_build_sglist(struct ata_channel *hwif, struct request *rq)
+static int ide_build_sglist(struct ata_channel *hwif, struct request *rq)
{
request_queue_t *q = &hwif->drives[DEVICE_NR(rq->rq_dev) & 1].queue;
- struct ata_request *ar = rq->special;
+ struct scatterlist *sg = hwif->sg_table;
+ int nents;
- if (!(ar->ar_flags & ATA_AR_SETUP)) {
- ar->ar_flags |= ATA_AR_SETUP;
- ar->ar_sg_nents = blk_rq_map_sg(q, rq, ar->ar_sg_table);
- }
+ nents = blk_rq_map_sg(q, rq, hwif->sg_table);
- if (rq->q && ar->ar_sg_nents > rq->nr_phys_segments) {
- printk("%s: received %d phys segments, build %d\n", __FILE__, rq->nr_phys_segments, ar->ar_sg_nents);
- return 0;
- } else if (!ar->ar_sg_nents) {
- printk("%s: zero segments in request\n", __FILE__);
- return 0;
- }
+ if (rq->q && nents > rq->nr_phys_segments)
+ printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
if (rq_data_dir(rq) == READ)
- ar->ar_sg_ddir = PCI_DMA_FROMDEVICE;
+ hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
else
- ar->ar_sg_ddir = PCI_DMA_TODEVICE;
+ hwif->sg_dma_direction = PCI_DMA_TODEVICE;
- return pci_map_sg(hwif->pci_dev, ar->ar_sg_table, ar->ar_sg_nents, ar->ar_sg_ddir);
+ return pci_map_sg(hwif->pci_dev, sg, nents, hwif->sg_dma_direction);
}
/*
@@ -247,17 +240,23 @@ int ide_build_sglist(struct ata_channel *hwif, struct request *rq)
*/
static int raw_build_sglist(struct ata_channel *ch, struct request *rq)
{
- struct ata_request *ar = rq->special;
- struct scatterlist *sg = ar->ar_sg_table;
- struct ata_taskfile *args = &ar->ar_task;
+ struct scatterlist *sg = ch->sg_table;
+ int nents = 0;
+ struct ata_taskfile *args = rq->special;
+#if 1
unsigned char *virt_addr = rq->buffer;
int sector_count = rq->nr_sectors;
- int nents = 0;
+#else
+ nents = blk_rq_map_sg(rq->q, rq, ch->sg_table);
+
+ if (nents > rq->nr_segments)
+ printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
+#endif
if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
- ar->ar_sg_ddir = PCI_DMA_TODEVICE;
+ ch->sg_dma_direction = PCI_DMA_TODEVICE;
else
- ar->ar_sg_ddir = PCI_DMA_FROMDEVICE;
+ ch->sg_dma_direction = PCI_DMA_FROMDEVICE;
if (sector_count > 128) {
memset(&sg[nents], 0, sizeof(*sg));
@@ -275,18 +274,18 @@ static int raw_build_sglist(struct ata_channel *ch, struct request *rq)
sg[nents].length = sector_count * SECTOR_SIZE;
nents++;
- return pci_map_sg(ch->pci_dev, sg, nents, ar->ar_sg_ddir);
+ return pci_map_sg(ch->pci_dev, sg, nents, ch->sg_dma_direction);
}
/*
- * Prepare a dma request.
+ * ide_build_dmatable() prepares a dma request.
* Returns 0 if all went okay, returns 1 otherwise.
- * This may also be invoked from trm290.c
+ * May also be invoked from trm290.c
*/
-int ide_build_dmatable(ide_drive_t *drive, struct request *rq,
- ide_dma_action_t func)
+int ide_build_dmatable (ide_drive_t *drive, ide_dma_action_t func)
{
struct ata_channel *hwif = drive->channel;
+ unsigned int *table = hwif->dmatable_cpu;
#ifdef CONFIG_BLK_DEV_TRM290
unsigned int is_trm290_chipset = (hwif->chipset == ide_trm290);
#else
@@ -295,19 +294,16 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq,
unsigned int count = 0;
int i;
struct scatterlist *sg;
- struct ata_request *ar = rq->special;
- unsigned int *table = ar->ar_dmatable_cpu;
-
- if (rq->flags & REQ_DRIVE_TASKFILE)
- ar->ar_sg_nents = raw_build_sglist(hwif, rq);
- else
- ar->ar_sg_nents = ide_build_sglist(hwif, rq);
- if (!ar->ar_sg_nents)
+ if (HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) {
+ hwif->sg_nents = i = raw_build_sglist(hwif, HWGROUP(drive)->rq);
+ } else {
+ hwif->sg_nents = i = ide_build_sglist(hwif, HWGROUP(drive)->rq);
+ }
+ if (!i)
return 0;
- sg = ar->ar_sg_table;
- i = ar->ar_sg_nents;
+ sg = hwif->sg_table;
while (i) {
u32 cur_addr;
u32 cur_len;
@@ -326,7 +322,7 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq,
if (count++ >= PRD_ENTRIES) {
printk("ide-dma: req %p\n", HWGROUP(drive)->rq);
- printk("count %d, sg_nents %d, cur_len %d, cur_addr %u\n", count, ar->ar_sg_nents, cur_len, cur_addr);
+ printk("count %d, sg_nents %d, cur_len %d, cur_addr %u\n", count, hwif->sg_nents, cur_len, cur_addr);
BUG();
}
@@ -337,7 +333,7 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq,
if (is_trm290_chipset)
xcount = ((xcount >> 2) - 1) << 16;
if (xcount == 0x0000) {
- /*
+ /*
* Most chipsets correctly interpret a length of
* 0x0000 as 64KB, but at least one (e.g. CS5530)
* misinterprets it as zero (!). So here we break
@@ -345,8 +341,8 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq,
*/
if (count++ >= PRD_ENTRIES) {
pci_unmap_sg(hwif->pci_dev, sg,
- ar->ar_sg_nents,
- ar->ar_sg_ddir);
+ hwif->sg_nents,
+ hwif->sg_dma_direction);
return 0;
}
@@ -372,12 +368,13 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq,
}
/* Teardown mappings after DMA has completed. */
-void ide_destroy_dmatable(struct ata_device *d)
+void ide_destroy_dmatable (ide_drive_t *drive)
{
- struct pci_dev *dev = d->channel->pci_dev;
- struct ata_request *ar = IDE_CUR_AR(d);
+ struct pci_dev *dev = drive->channel->pci_dev;
+ struct scatterlist *sg = drive->channel->sg_table;
+ int nents = drive->channel->sg_nents;
- pci_unmap_sg(dev, ar->ar_sg_table, ar->ar_sg_nents, ar->ar_sg_ddir);
+ pci_unmap_sg(dev, sg, nents, drive->channel->sg_dma_direction);
}
/*
@@ -435,7 +432,7 @@ int report_drive_dmaing (ide_drive_t *drive)
printk(", UDMA(133)"); /* UDMA BIOS-enabled! */
}
} else if ((id->field_valid & 4) && (eighty_ninty_three(drive)) &&
- (id->dma_ultra & (id->dma_ultra >> 11) & 7)) {
+ (id->dma_ultra & (id->dma_ultra >> 11) & 7)) {
if ((id->dma_ultra >> 13) & 1) {
printk(", UDMA(100)"); /* UDMA BIOS-enabled! */
} else if ((id->dma_ultra >> 12) & 1) {
@@ -538,41 +535,6 @@ static void ide_toggle_bounce(ide_drive_t *drive, int on)
}
/*
- * Start DMA engine.
- */
-int ide_start_dma(struct ata_channel *hwif, ide_drive_t *drive, ide_dma_action_t func)
-{
- unsigned int reading = 0, count;
- unsigned long dma_base = hwif->dma_base;
- struct ata_request *ar = IDE_CUR_AR(drive);
-
- /* This can happen with drivers abusing the special request field.
- */
-
- if (!ar) {
- printk(KERN_ERR "DMA without ATA request\n");
-
- return 1;
- }
-
- if (rq_data_dir(ar->ar_rq) == READ)
- reading = 1 << 3;
-
- if (hwif->rwproc)
- hwif->rwproc(drive, func);
-
- if (!(count = ide_build_dmatable(drive, ar->ar_rq, func)))
- return 1; /* try PIO instead of DMA */
-
- ar->ar_flags |= ATA_AR_SETUP;
- outl(ar->ar_dmatable, dma_base + 4); /* PRD table */
- outb(reading, dma_base); /* specify r/w */
- outb(inb(dma_base + 2) | 6, dma_base+2);/* clear INTR & ERROR flags */
- drive->waiting_for_dma = 1;
- return 0;
-}
-
-/*
* ide_dmaproc() initiates/aborts DMA read/write operations on a drive.
*
* The caller is assumed to have selected the drive and programmed the drive's
@@ -592,10 +554,9 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
{
struct ata_channel *hwif = drive->channel;
unsigned long dma_base = hwif->dma_base;
- u8 unit = (drive->select.b.unit & 0x01);
- unsigned int reading = 0, set_high = 1;
- struct ata_request *ar;
- u8 dma_stat;
+ byte unit = (drive->select.b.unit & 0x01);
+ unsigned int count, reading = 0, set_high = 1;
+ byte dma_stat;
switch (func) {
case ide_dma_off:
@@ -603,68 +564,54 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
case ide_dma_off_quietly:
set_high = 0;
outb(inb(dma_base+2) & ~(1<<(5+unit)), dma_base+2);
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
- hwif->dmaproc(ide_dma_queued_off, drive);
-#endif
case ide_dma_on:
ide_toggle_bounce(drive, set_high);
drive->using_dma = (func == ide_dma_on);
- if (drive->using_dma) {
+ if (drive->using_dma)
outb(inb(dma_base+2)|(1<<(5+unit)), dma_base+2);
-#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
- hwif->dmaproc(ide_dma_queued_on, drive);
-#endif
- }
return 0;
case ide_dma_check:
return config_drive_for_dma (drive);
- case ide_dma_begin:
- if (test_and_set_bit(IDE_DMA, &HWGROUP(drive)->flags))
- BUG();
- /* Note that this is done *after* the cmd has
- * been issued to the drive, as per the BM-IDE spec.
- * The Promise Ultra33 doesn't work correctly when
- * we do this part before issuing the drive cmd.
- */
- outb(inb(dma_base)|1, dma_base); /* start DMA */
- return 0;
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
- case ide_dma_queued_on:
- case ide_dma_queued_off:
- case ide_dma_read_queued:
- case ide_dma_write_queued:
- case ide_dma_queued_start:
- return ide_tcq_dmaproc(func, drive);
-#endif
-
case ide_dma_read:
reading = 1 << 3;
case ide_dma_write:
- ar = IDE_CUR_AR(drive);
-
- if (ide_start_dma(hwif, drive, func))
- return 1;
-
+ /* active tuning based on IO direction */
+ if (hwif->rwproc)
+ hwif->rwproc(drive, func);
+
+ if (!(count = ide_build_dmatable(drive, func)))
+ return 1; /* try PIO instead of DMA */
+ outl(hwif->dmatable_dma, dma_base + 4); /* PRD table */
+ outb(reading, dma_base); /* specify r/w */
+ outb(inb(dma_base+2)|6, dma_base+2); /* clear INTR & ERROR flags */
+ drive->waiting_for_dma = 1;
if (drive->type != ATA_DISK)
return 0;
+
BUG_ON(HWGROUP(drive)->handler);
ide_set_handler(drive, &ide_dma_intr, WAIT_CMD, dma_timer_expiry); /* issue cmd to drive */
- if ((ar->ar_rq->flags & REQ_DRIVE_TASKFILE) &&
+ if ((HWGROUP(drive)->rq->flags & REQ_DRIVE_TASKFILE) &&
(drive->addressing == 1)) {
- struct ata_taskfile *args = &ar->ar_task;
+ struct ata_taskfile *args = HWGROUP(drive)->rq->special;
OUT_BYTE(args->taskfile.command, IDE_COMMAND_REG);
} else if (drive->addressing) {
OUT_BYTE(reading ? WIN_READDMA_EXT : WIN_WRITEDMA_EXT, IDE_COMMAND_REG);
} else {
OUT_BYTE(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
}
- return hwif->dmaproc(ide_dma_begin, drive);
+ return drive->channel->dmaproc(ide_dma_begin, drive);
+ case ide_dma_begin:
+ /* Note that this is done *after* the cmd has
+ * been issued to the drive, as per the BM-IDE spec.
+ * The Promise Ultra33 doesn't work correctly when
+ * we do this part before issuing the drive cmd.
+ */
+ outb(inb(dma_base)|1, dma_base); /* start DMA */
+ return 0;
case ide_dma_end: /* returns 1 on error, 0 otherwise */
- if (!test_and_clear_bit(IDE_DMA, &HWGROUP(drive)->flags))
- BUG();
drive->waiting_for_dma = 0;
outb(inb(dma_base)&~1, dma_base); /* stop DMA */
- dma_stat = inb(dma_base+2); /* get DMA status */
+ dma_stat = inb(dma_base+2); /* get DMA status */
outb(dma_stat|6, dma_base+2); /* clear the INTR & ERROR bits */
ide_destroy_dmatable(drive); /* purge DMA mappings */
return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; /* verify good DMA status */
@@ -687,7 +634,7 @@ int ide_dmaproc (ide_dma_action_t func, ide_drive_t *drive)
return 1;
case ide_dma_retune:
case ide_dma_lostirq:
- printk(KERN_ERR "%s: chipset supported func only: %d\n", __FUNCTION__, func);
+ printk(KERN_ERR "%s: chipset supported func only: %d\n", __FUNCTION__, func);
return 1;
default:
printk(KERN_ERR "%s: unsupported func: %d\n", __FUNCTION__, func);
@@ -703,6 +650,17 @@ void ide_release_dma(struct ata_channel *hwif)
if (!hwif->dma_base)
return;
+ if (hwif->dmatable_cpu) {
+ pci_free_consistent(hwif->pci_dev,
+ PRD_ENTRIES * PRD_BYTES,
+ hwif->dmatable_cpu,
+ hwif->dmatable_dma);
+ hwif->dmatable_cpu = NULL;
+ }
+ if (hwif->sg_table) {
+ kfree(hwif->sg_table);
+ hwif->sg_table = NULL;
+ }
if ((hwif->dma_extra) && (hwif->unit == 0))
release_region((hwif->dma_base + 16), hwif->dma_extra);
release_region(hwif->dma_base, 8);
@@ -721,6 +679,20 @@ void ide_setup_dma(struct ata_channel *hwif, unsigned long dma_base, unsigned in
}
request_region(dma_base, num_ports, hwif->name);
hwif->dma_base = dma_base;
+ hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
+ PRD_ENTRIES * PRD_BYTES,
+ &hwif->dmatable_dma);
+ if (hwif->dmatable_cpu == NULL)
+ goto dma_alloc_failure;
+
+ hwif->sg_table = kmalloc(sizeof(struct scatterlist) * PRD_ENTRIES,
+ GFP_KERNEL);
+ if (hwif->sg_table == NULL) {
+ pci_free_consistent(hwif->pci_dev, PRD_ENTRIES * PRD_BYTES,
+ hwif->dmatable_cpu, hwif->dmatable_dma);
+ goto dma_alloc_failure;
+ }
+
hwif->dmaproc = &ide_dmaproc;
if (hwif->chipset != ide_trm290) {
@@ -731,4 +703,7 @@ void ide_setup_dma(struct ata_channel *hwif, unsigned long dma_base, unsigned in
}
printk("\n");
return;
+
+dma_alloc_failure:
+ printk(" -- ERROR, UNABLE TO ALLOCATE DMA TABLES\n");
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index b7dbb283cfc8..72ca6e925246 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -168,9 +168,6 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
}
printk (" drive\n");
drive->type = type;
-
- goto init_queue;
-
return;
}
@@ -201,22 +198,6 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
if (drive->channel->quirkproc)
drive->quirk_list = drive->channel->quirkproc(drive);
-init_queue:
- /*
- * it's an ata drive, build command list
- */
- drive->queue_depth = 1;
-#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEPTH
- drive->queue_depth = CONFIG_BLK_DEV_IDE_TCQ_DEPTH;
-#else
- drive->queue_depth = drive->id->queue_depth + 1;
-#endif
- if (drive->queue_depth < 1 || drive->queue_depth > IDE_MAX_TAG)
- drive->queue_depth = IDE_MAX_TAG;
-
- if (ide_init_commandlist(drive))
- goto err_misc;
-
return;
err_misc:
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index c0e11b9b285f..2f7094b29da6 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -308,8 +308,7 @@ void ata_poll_drive_ready(ide_drive_t *drive)
static ide_startstop_t pre_task_mulout_intr(ide_drive_t *drive, struct request *rq)
{
- struct ata_request *ar = rq->special;
- struct ata_taskfile *args = &ar->ar_task;
+ struct ata_taskfile *args = rq->special;
ide_startstop_t startstop;
/*
@@ -464,35 +463,11 @@ ide_startstop_t ata_taskfile(ide_drive_t *drive,
if (args->prehandler != NULL)
return args->prehandler(drive, rq);
} else {
- ide_dma_action_t dmaaction;
- u8 command;
-
- if (!drive->using_dma)
- return ide_started;
-
- command = args->taskfile.command;
-
-#ifdef CONFIG_BLK_DEV_IDE_TCQ
- if (drive->using_tcq) {
- if (command == WIN_READDMA_QUEUED
- || command == WIN_READDMA_QUEUED_EXT
- || command == WIN_WRITEDMA_QUEUED
- || command == WIN_READDMA_QUEUED_EXT)
- return ide_start_tag(ide_dma_queued_start, drive, rq->special);
- }
-#endif
-
- if (command == WIN_WRITEDMA || command == WIN_WRITEDMA_EXT)
- dmaaction = ide_dma_write;
- else if (command == WIN_READDMA || command == WIN_READDMA_EXT)
- dmaaction = ide_dma_read;
- else
- return ide_stopped;
-
- if (!drive->channel->dmaproc(dmaaction, drive))
- return ide_started;
-
- return ide_stopped;
+ /* for dma commands we down set the handler */
+ if (drive->using_dma &&
+ !(drive->channel->dmaproc(((args->taskfile.command == WIN_WRITEDMA)
+ || (args->taskfile.command == WIN_WRITEDMA_EXT))
+ ? ide_dma_write : ide_dma_read, drive)));
}
return ide_started;
@@ -545,30 +520,12 @@ ide_startstop_t recal_intr(ide_drive_t *drive)
}
/*
- * Quiet handler for commands without a data phase -- handy instead of
- * task_no_data_intr() for commands we _know_ will fail (such as WIN_NOP)
- */
-ide_startstop_t task_no_data_quiet_intr(ide_drive_t *drive)
-{
- struct ata_request *ar = IDE_CUR_AR(drive);
- struct ata_taskfile *args = &ar->ar_task;
-
- ide__sti(); /* local CPU only */
-
- if (args)
- ide_end_drive_cmd(drive, GET_STAT(), GET_ERR());
-
- return ide_stopped;
-}
-
-/*
* Handler for commands without a data phase
*/
ide_startstop_t task_no_data_intr (ide_drive_t *drive)
{
- struct ata_request *ar = IDE_CUR_AR(drive);
- struct ata_taskfile *args = &ar->ar_task;
- u8 stat = GET_STAT();
+ struct ata_taskfile *args = HWGROUP(drive)->rq->special;
+ byte stat = GET_STAT();
ide__sti(); /* local CPU only */
@@ -628,8 +585,7 @@ static ide_startstop_t task_in_intr (ide_drive_t *drive)
static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
{
- struct ata_request *ar = rq->special;
- struct ata_taskfile *args = &ar->ar_task;
+ struct ata_taskfile *args = rq->special;
ide_startstop_t startstop;
if (ide_wait_stat(&startstop, drive, DATA_READY, drive->bad_wstat, WAIT_DRQ)) {
@@ -909,7 +865,6 @@ void ide_cmd_type_parser(struct ata_taskfile *args)
return;
case WIN_NOP:
- args->handler = task_no_data_quiet_intr;
args->command_type = IDE_DRIVE_TASK_NO_DATA;
return;
@@ -927,7 +882,7 @@ void ide_cmd_type_parser(struct ata_taskfile *args)
/*
* This function is intended to be used prior to invoking ide_do_drive_cmd().
*/
-void init_taskfile_request(struct request *rq)
+static void init_taskfile_request(struct request *rq)
{
memset(rq, 0, sizeof(*rq));
rq->flags = REQ_DRIVE_TASKFILE;
@@ -936,29 +891,18 @@ void init_taskfile_request(struct request *rq)
int ide_raw_taskfile(ide_drive_t *drive, struct ata_taskfile *args, byte *buf)
{
struct request rq;
- struct ata_request star;
- int ret;
-
- ata_ar_init(drive, &star);
init_taskfile_request(&rq);
- rq.buffer = buf;
- memcpy(&star.ar_task, args, sizeof(*args));
+ rq.buffer = buf;
if (args->command_type != IDE_DRIVE_TASK_NO_DATA)
rq.current_nr_sectors = rq.nr_sectors
= (args->hobfile.sector_count << 8)
| args->taskfile.sector_count;
- rq.special = &star;
+ rq.special = args;
- ret = ide_do_drive_cmd(drive, &rq, ide_wait);
-
- /*
- * copy back status etc
- */
- memcpy(args, &star.ar_task, sizeof(*args));
- return ret;
+ return ide_do_drive_cmd(drive, &rq, ide_wait);
}
/*
diff --git a/drivers/ide/ide-tcq.c b/drivers/ide/ide-tcq.c
deleted file mode 100644
index 50af815d98e7..000000000000
--- a/drivers/ide/ide-tcq.c
+++ /dev/null
@@ -1,689 +0,0 @@
-/*
- * Copyright (C) 2001, 2002 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/*
- * Support for the DMA queued protocol, which enables ATA disk drives to
- * use tagged command queueing.
- */
-#include <linux/config.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ide.h>
-
-#include <asm/delay.h>
-
-/*
- * warning: it will be _very_ verbose if defined
- */
-#undef IDE_TCQ_DEBUG
-
-#ifdef IDE_TCQ_DEBUG
-#define TCQ_PRINTK printk
-#else
-#define TCQ_PRINTK(x...)
-#endif
-
-/*
- * use nIEN or not
- */
-#undef IDE_TCQ_NIEN
-
-/*
- * we are leaving the SERVICE interrupt alone, IBM drives have it
- * on per default and it can't be turned off. Doesn't matter, this
- * is the sane config.
- */
-#undef IDE_TCQ_FIDDLE_SI
-
-ide_startstop_t ide_dmaq_intr(ide_drive_t *drive);
-ide_startstop_t ide_service(ide_drive_t *drive);
-
-static inline void drive_ctl_nien(ide_drive_t *drive, int set)
-{
-#ifdef IDE_TCQ_NIEN
- if (IDE_CONTROL_REG) {
- int mask = set ? 0x02 : 0x00;
-
- OUT_BYTE(drive->ctl | mask, IDE_CONTROL_REG);
- }
-#endif
-}
-
-/*
- * if we encounter _any_ error doing I/O to one of the tags, we must
- * invalidate the pending queue. clear the software busy queue and requeue
- * on the request queue for restart. issue a WIN_NOP to clear hardware queue
- */
-static void ide_tcq_invalidate_queue(ide_drive_t *drive)
-{
- request_queue_t *q = &drive->queue;
- unsigned long flags;
- struct ata_request *ar;
- int i;
-
- printk("%s: invalidating pending queue (%d)\n", drive->name, drive->tcq->queued);
-
- spin_lock_irqsave(&ide_lock, flags);
-
- del_timer(&HWGROUP(drive)->timer);
-
- if (test_bit(IDE_DMA, &HWGROUP(drive)->flags))
- drive->channel->dmaproc(ide_dma_end, drive);
-
- /*
- * assume oldest commands have the higher tags... doesn't matter
- * much. shove requests back into request queue.
- */
- for (i = drive->queue_depth - 1; i; i--) {
- ar = drive->tcq->ar[i];
- if (!ar)
- continue;
-
- ar->ar_rq->special = NULL;
- ar->ar_rq->flags &= ~REQ_STARTED;
- _elv_add_request(q, ar->ar_rq, 0, 0);
- ata_ar_put(drive, ar);
- }
-
- drive->tcq->queued = 0;
- drive->using_tcq = 0;
- drive->queue_depth = 1;
- clear_bit(IDE_BUSY, &HWGROUP(drive)->flags);
- clear_bit(IDE_DMA, &HWGROUP(drive)->flags);
- HWGROUP(drive)->handler = NULL;
-
- /*
- * do some internal stuff -- we really need this command to be
- * executed before any new commands are started. issue a NOP
- * to clear internal queue on drive
- */
- ar = ata_ar_get(drive);
-
- memset(&ar->ar_task, 0, sizeof(ar->ar_task));
- AR_TASK_CMD(ar) = WIN_NOP;
- ide_cmd_type_parser(&ar->ar_task);
- ar->ar_rq = &HWGROUP(drive)->wrq;
- init_taskfile_request(ar->ar_rq);
- ar->ar_rq->rq_dev = mk_kdev(drive->channel->major, (drive->select.b.unit)<<PARTN_BITS);
- ar->ar_rq->special = ar;
- _elv_add_request(q, ar->ar_rq, 0, 0);
-
- /*
- * make sure that nIEN is cleared
- */
- drive_ctl_nien(drive, 0);
-
- /*
- * start doing stuff again
- */
- q->request_fn(q);
- spin_unlock_irqrestore(&ide_lock, flags);
- printk("ide_tcq_invalidate_queue: done\n");
-}
-
-void ide_tcq_intr_timeout(unsigned long data)
-{
- ide_hwgroup_t *hwgroup = (ide_hwgroup_t *) data;
- unsigned long flags;
- ide_drive_t *drive;
-
- printk("ide_tcq_intr_timeout: timeout waiting for interrupt...\n");
-
- spin_lock_irqsave(&ide_lock, flags);
-
- if (test_and_set_bit(IDE_BUSY, &hwgroup->flags))
- printk("ide_tcq_intr_timeout: hwgroup not busy\n");
- if (hwgroup->handler == NULL)
- printk("ide_tcq_intr_timeout: missing isr!\n");
- if ((drive = hwgroup->drive) == NULL)
- printk("ide_tcq_intr_timeout: missing drive!\n");
-
- spin_unlock_irqrestore(&ide_lock, flags);
-
- /*
- * if pending commands, try service before giving up
- */
- if (ide_pending_commands(drive) && (GET_STAT() & SERVICE_STAT))
- if (ide_service(drive) == ide_started)
- return;
-
- if (drive)
- ide_tcq_invalidate_queue(drive);
-}
-
-void ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ide_lock, flags);
-
- /*
- * always just bump the timer for now, the timeout handling will
- * have to be changed to be per-command
- */
- hwgroup->timer.function = ide_tcq_intr_timeout;
- hwgroup->timer.data = (unsigned long) hwgroup;
- mod_timer(&hwgroup->timer, jiffies + 5 * HZ);
-
- hwgroup->handler = handler;
- spin_unlock_irqrestore(&ide_lock, flags);
-}
-
-/*
- * wait 400ns, then poll for busy_mask to clear from alt status
- */
-#define IDE_TCQ_WAIT (10000)
-int ide_tcq_wait_altstat(ide_drive_t *drive, byte *stat, byte busy_mask)
-{
- int i = 0;
-
- udelay(1);
-
- while ((*stat = GET_ALTSTAT()) & busy_mask) {
- udelay(10);
-
- if (unlikely(i++ > IDE_TCQ_WAIT))
- return 1;
- }
-
- return 0;
-}
-
-/*
- * issue SERVICE command to drive -- drive must have been selected first,
- * and it must have reported a need for service (status has SERVICE_STAT set)
- *
- * Also, nIEN must be set as not to need protection against ide_dmaq_intr
- */
-ide_startstop_t ide_service(ide_drive_t *drive)
-{
- struct ata_request *ar;
- byte feat, stat;
- int tag, ret;
-
- TCQ_PRINTK("%s: started service\n", drive->name);
-
- /*
- * could be called with IDE_DMA in-progress from invalidate
- * handler, refuse to do anything
- */
- if (test_bit(IDE_DMA, &HWGROUP(drive)->flags))
- return ide_stopped;
-
- /*
- * need to select the right drive first...
- */
- if (drive != HWGROUP(drive)->drive) {
- SELECT_DRIVE(drive->channel, drive);
- udelay(10);
- }
-
- drive_ctl_nien(drive, 1);
-
- /*
- * send SERVICE, wait 400ns, wait for BUSY_STAT to clear
- */
- OUT_BYTE(WIN_QUEUED_SERVICE, IDE_COMMAND_REG);
-
- if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
- printk("ide_service: BUSY clear took too long\n");
- ide_dump_status(drive, "ide_service", stat);
- ide_tcq_invalidate_queue(drive);
- return ide_stopped;
- }
-
- drive_ctl_nien(drive, 0);
-
- /*
- * FIXME, invalidate queue
- */
- if (stat & ERR_STAT) {
- ide_dump_status(drive, "ide_service", stat);
- ide_tcq_invalidate_queue(drive);
- return ide_stopped;
- }
-
- /*
- * should not happen, a buggy device could introduce loop
- */
- if ((feat = GET_FEAT()) & NSEC_REL) {
- printk("%s: release in service\n", drive->name);
- IDE_SET_CUR_TAG(drive, IDE_INACTIVE_TAG);
- return ide_stopped;
- }
-
- tag = feat >> 3;
- IDE_SET_CUR_TAG(drive, tag);
-
- TCQ_PRINTK("ide_service: stat %x, feat %x\n", stat, feat);
-
- if ((ar = IDE_CUR_TAG(drive)) == NULL) {
- printk("ide_service: missing request for tag %d\n", tag);
- return ide_stopped;
- }
-
- HWGROUP(drive)->rq = ar->ar_rq;
-
- /*
- * we'll start a dma read or write, device will trigger
- * interrupt to indicate end of transfer, release is not allowed
- */
- if (rq_data_dir(ar->ar_rq) == READ) {
- TCQ_PRINTK("ide_service: starting READ %x\n", stat);
- ret = drive->channel->dmaproc(ide_dma_read_queued, drive);
- } else {
- TCQ_PRINTK("ide_service: starting WRITE %x\n", stat);
- ret = drive->channel->dmaproc(ide_dma_write_queued, drive);
- }
-
- /*
- * dmaproc set intr handler
- */
- return !ret ? ide_started : ide_stopped;
-}
-
-ide_startstop_t ide_check_service(ide_drive_t *drive)
-{
- byte stat;
-
- TCQ_PRINTK("%s: ide_check_service\n", drive->name);
-
- if (!ide_pending_commands(drive))
- return ide_stopped;
-
- if ((stat = GET_STAT()) & SERVICE_STAT)
- return ide_service(drive);
-
- /*
- * we have pending commands, wait for interrupt
- */
- ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
- return ide_started;
-}
-
-ide_startstop_t ide_dmaq_complete(ide_drive_t *drive, byte stat)
-{
- struct ata_request *ar = IDE_CUR_TAG(drive);
- byte dma_stat;
-
- /*
- * transfer was in progress, stop DMA engine
- */
- dma_stat = drive->channel->dmaproc(ide_dma_end, drive);
-
- /*
- * must be end of I/O, check status and complete as necessary
- */
- if (unlikely(!OK_STAT(stat, READY_STAT, drive->bad_wstat | DRQ_STAT))) {
- printk("ide_dmaq_intr: %s: error status %x\n", drive->name, stat);
- ide_dump_status(drive, "ide_dmaq_intr", stat);
- ide_tcq_invalidate_queue(drive);
- return ide_stopped;
- }
-
- if (dma_stat)
- printk("%s: bad DMA status (dma_stat=%x)\n", drive->name, dma_stat);
-
- TCQ_PRINTK("ide_dmaq_intr: ending %p, tag %d\n", ar, ar->ar_tag);
- ide_end_queued_request(drive, !dma_stat, ar->ar_rq);
-
- /*
- * we completed this command, set tcq inactive and check if we
- * can service a new command
- */
- IDE_SET_CUR_TAG(drive, IDE_INACTIVE_TAG);
- return ide_check_service(drive);
-}
-
-/*
- * intr handler for queued dma operations. this can be entered for two
- * reasons:
- *
- * 1) device has completed dma transfer
- * 2) service request to start a command
- *
- * if the drive has an active tag, we first complete that request before
- * processing any pending SERVICE.
- */
-ide_startstop_t ide_dmaq_intr(ide_drive_t *drive)
-{
- byte stat = GET_STAT();
-
- TCQ_PRINTK("ide_dmaq_intr: stat=%x, tag %d\n", stat, drive->tcq->active_tag);
-
- /*
- * if a command completion interrupt is pending, do that first and
- * check service afterwards
- */
- if (drive->tcq->active_tag != IDE_INACTIVE_TAG)
- return ide_dmaq_complete(drive, stat);
-
- /*
- * service interrupt
- */
- if (stat & SERVICE_STAT) {
- TCQ_PRINTK("ide_dmaq_intr: SERV (stat=%x)\n", stat);
- return ide_service(drive);
- }
-
- printk("ide_dmaq_intr: stat=%x, not expected\n", stat);
- return ide_check_service(drive);
-}
-
-/*
- * check if the ata adapter this drive is attached to supports the
- * NOP auto-poll for multiple tcq enabled drives on one channel
- */
-static int ide_tcq_check_autopoll(ide_drive_t *drive)
-{
- struct ata_channel *ch = HWIF(drive);
- struct ata_taskfile args;
- ide_drive_t *next;
-
- /*
- * only need to probe if both drives on a channel support tcq
- */
- next = drive->next;
- if (next == drive || !next->using_tcq)
- return 0;
-
- memset(&args, 0, sizeof(args));
-
- args.taskfile.feature = 0x01;
- args.taskfile.command = WIN_NOP;
- ide_cmd_type_parser(&args);
-
- /*
- * do taskfile and check ABRT bit -- intelligent adapters will not
- * pass NOP with sub-code 0x01 to device, so the command will not
- * fail there
- */
- ide_raw_taskfile(drive, &args, NULL);
- if (args.taskfile.feature & ABRT_ERR)
- return 1;
-
- ch->auto_poll = 1;
- printk("%s: NOP Auto-poll enabled\n", ch->name);
- return 0;
-}
-
-/*
- * configure the drive for tcq
- */
-static int ide_tcq_configure(ide_drive_t *drive)
-{
- int tcq_mask = 1 << 1 | 1 << 14;
- int tcq_bits = tcq_mask | 1 << 15;
- struct ata_taskfile args;
-
- /*
- * bit 14 and 1 must be set in word 83 of the device id to indicate
- * support for dma queued protocol, and bit 15 must be cleared
- */
- if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask)
- return -EIO;
-
- memset(&args, 0, sizeof(args));
- args.taskfile.feature = SETFEATURES_EN_WCACHE;
- args.taskfile.command = WIN_SETFEATURES;
- ide_cmd_type_parser(&args);
-
- if (ide_raw_taskfile(drive, &args, NULL)) {
- printk("%s: failed to enable write cache\n", drive->name);
- return 1;
- }
-
- /*
- * disable RELease interrupt, it's quicker to poll this after
- * having sent the command opcode
- */
- memset(&args, 0, sizeof(args));
- args.taskfile.feature = SETFEATURES_DIS_RI;
- args.taskfile.command = WIN_SETFEATURES;
- ide_cmd_type_parser(&args);
-
- if (ide_raw_taskfile(drive, &args, NULL)) {
- printk("%s: disabling release interrupt fail\n", drive->name);
- return 1;
- }
-
-#ifdef IDE_TCQ_FIDDLE_SI
- /*
- * enable SERVICE interrupt
- */
- memset(&args, 0, sizeof(args));
- args.taskfile.feature = SETFEATURES_EN_SI;
- args.taskfile.command = WIN_SETFEATURES;
- ide_cmd_type_parser(&args);
-
- if (ide_raw_taskfile(drive, &args, NULL)) {
- printk("%s: enabling service interrupt fail\n", drive->name);
- return 1;
- }
-#endif
-
- if (!drive->tcq) {
- drive->tcq = kmalloc(sizeof(ide_tag_info_t), GFP_ATOMIC);
- if (!drive->tcq)
- return -ENOMEM;
-
- memset(drive->tcq, 0, sizeof(ide_tag_info_t));
- drive->tcq->active_tag = IDE_INACTIVE_TAG;
- }
-
- return 0;
-}
-
-/*
- * for now assume that command list is always as big as we need and don't
- * attempt to shrink it on tcq disable
- */
-static int ide_enable_queued(ide_drive_t *drive, int on)
-{
- int depth = drive->using_tcq ? drive->queue_depth : 0;
-
- /*
- * disable or adjust queue depth
- */
- if (!on) {
- if (drive->using_tcq)
- printk("%s: TCQ disabled\n", drive->name);
- drive->using_tcq = 0;
- return 0;
- }
-
- if (ide_tcq_configure(drive)) {
- drive->using_tcq = 0;
- return 1;
- }
-
- /*
- * possibly expand command list
- */
- if (ide_build_commandlist(drive))
- return 1;
-
- /*
- * check auto-poll support
- */
- ide_tcq_check_autopoll(drive);
-
- if (depth != drive->queue_depth)
- printk("%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
-
- drive->using_tcq = 1;
-
- /*
- * clear stats
- */
- drive->tcq->max_depth = 0;
- return 0;
-}
-
-int ide_tcq_wait_dataphase(ide_drive_t *drive)
-{
- ide_startstop_t foo;
-
- if (ide_wait_stat(&foo, drive, READY_STAT | DRQ_STAT, BUSY_STAT, WAIT_READY)) {
- printk("%s: timeout waiting for data phase\n", drive->name);
- return 1;
- }
-
- return 0;
-}
-
-int ide_tcq_dmaproc(ide_dma_action_t func, ide_drive_t *drive)
-{
- struct ata_channel *hwif = drive->channel;
- unsigned int reading = 0, enable_tcq = 1;
- struct ata_request *ar;
- byte stat, feat;
-
- switch (func) {
- /*
- * invoked from a SERVICE interrupt, command etc already known.
- * just need to start the dma engine for this tag
- */
- case ide_dma_read_queued:
- reading = 1 << 3;
- case ide_dma_write_queued:
- TCQ_PRINTK("ide_dma: setting up queued %d\n", drive->tcq->active_tag);
- BUG_ON(drive->tcq->active_tag == IDE_INACTIVE_TAG);
-
- if (!test_bit(IDE_BUSY, &HWGROUP(drive)->flags))
- printk("queued_rw: IDE_BUSY not set\n");
-
- if (ide_tcq_wait_dataphase(drive))
- return ide_stopped;
-
- if (ide_start_dma(hwif, drive, func))
- return 1;
-
- ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
- return hwif->dmaproc(ide_dma_begin, drive);
-
- /*
- * start a queued command from scratch
- */
- case ide_dma_queued_start:
- BUG_ON(drive->tcq->active_tag == IDE_INACTIVE_TAG);
- ar = IDE_CUR_TAG(drive);
-
- /*
- * set nIEN, tag start operation will enable again when
- * it is safe
- */
- drive_ctl_nien(drive, 1);
-
- OUT_BYTE(AR_TASK_CMD(ar), IDE_COMMAND_REG);
-
- if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
- ide_dump_status(drive, "queued start", stat);
- ide_tcq_invalidate_queue(drive);
- return ide_stopped;
- }
-
- drive_ctl_nien(drive, 0);
-
- if (stat & ERR_STAT) {
- ide_dump_status(drive, "tcq_start", stat);
- return ide_stopped;
- }
-
- /*
- * drive released the bus, clear active tag and
- * check for service
- */
- if ((feat = GET_FEAT()) & NSEC_REL) {
- IDE_SET_CUR_TAG(drive, IDE_INACTIVE_TAG);
- drive->tcq->immed_rel++;
-
- ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
-
- TCQ_PRINTK("REL in queued_start\n");
-
- if ((stat = GET_STAT()) & SERVICE_STAT)
- return ide_service(drive);
-
- return ide_released;
- }
-
- drive->tcq->immed_comp++;
-
- if (ide_tcq_wait_dataphase(drive))
- return ide_stopped;
-
- if (ide_start_dma(hwif, drive, func))
- return ide_stopped;
-
- TCQ_PRINTK("IMMED in queued_start\n");
-
- /*
- * need to arm handler before starting dma engine,
- * transfer could complete right away
- */
- ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
-
- if (hwif->dmaproc(ide_dma_begin, drive))
- return ide_stopped;
-
- /*
- * wait for SERVICE or completion interrupt
- */
- return ide_started;
-
- case ide_dma_queued_off:
- enable_tcq = 0;
- case ide_dma_queued_on:
- if (enable_tcq && !drive->using_dma)
- return 1;
- return ide_enable_queued(drive, enable_tcq);
- default:
- break;
- }
-
- return 1;
-}
-
-int ide_build_sglist (struct ata_channel *hwif, struct request *rq);
-ide_startstop_t ide_start_tag(ide_dma_action_t func, ide_drive_t *drive,
- struct ata_request *ar)
-{
- ide_startstop_t startstop;
-
- TCQ_PRINTK("%s: ide_start_tag: begin tag %p/%d, rq %p\n", drive->name,ar,ar->ar_tag, ar->ar_rq);
-
- /*
- * do this now, no need to run that with interrupts disabled
- */
- if (!ide_build_sglist(drive->channel, ar->ar_rq))
- return ide_stopped;
-
- IDE_SET_CUR_TAG(drive, ar->ar_tag);
- HWGROUP(drive)->rq = ar->ar_rq;
-
- startstop = ide_tcq_dmaproc(func, drive);
-
- if (unlikely(startstop == ide_stopped)) {
- IDE_SET_CUR_TAG(drive, IDE_INACTIVE_TAG);
- HWGROUP(drive)->rq = NULL;
- }
-
- return startstop;
-}
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 426d3d510d6d..b38232584a93 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -368,40 +368,6 @@ int drive_is_flashcard (ide_drive_t *drive)
return 0; /* no, it is not a flash memory card */
}
-void ide_end_queued_request(ide_drive_t *drive, int uptodate, struct request *rq)
-{
- unsigned long flags;
-
- BUG_ON(!(rq->flags & REQ_STARTED));
- BUG_ON(!rq->special);
-
- if (!end_that_request_first(rq, uptodate, rq->hard_nr_sectors)) {
- struct ata_request *ar = rq->special;
-
- add_blkdev_randomness(major(rq->rq_dev));
-
- spin_lock_irqsave(&ide_lock, flags);
-
- if ((jiffies - ar->ar_time > ATA_AR_MAX_TURNAROUND) && drive->queue_depth > 1) {
- printk(KERN_INFO "%s: exceeded max command turn-around time (%d seconds)\n", drive->name, ATA_AR_MAX_TURNAROUND / HZ);
- drive->queue_depth >>= 1;
- }
-
- if (jiffies - ar->ar_time > drive->tcq->oldest_command)
- drive->tcq->oldest_command = jiffies - ar->ar_time;
-
- ata_ar_put(drive, ar);
- end_that_request_last(rq);
- /*
- * IDE_SET_CUR_TAG(drive, IDE_INACTIVE_TAG) will do this
- * too, but it really belongs here. assumes that the
- * ended request is the active one.
- */
- HWGROUP(drive)->rq = NULL;
- spin_unlock_irqrestore(&ide_lock, flags);
- }
-}
-
int __ide_end_request(ide_drive_t *drive, int uptodate, int nr_secs)
{
struct request *rq;
@@ -430,17 +396,9 @@ int __ide_end_request(ide_drive_t *drive, int uptodate, int nr_secs)
}
if (!end_that_request_first(rq, uptodate, nr_secs)) {
- struct ata_request *ar = rq->special;
-
add_blkdev_randomness(major(rq->rq_dev));
- /*
- * request with ATA_AR_QUEUED set have already been
- * dequeued, but doing it twice is ok
- */
blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL;
- if (ar)
- ata_ar_put(drive, ar);
end_that_request_last(rq);
ret = 0;
}
@@ -776,11 +734,8 @@ void ide_end_drive_cmd(ide_drive_t *drive, byte stat, byte err)
args[6] = IN_BYTE(IDE_SELECT_REG);
}
} else if (rq->flags & REQ_DRIVE_TASKFILE) {
- struct ata_request *ar = rq->special;
- struct ata_taskfile *args = &ar->ar_task;
-
+ struct ata_taskfile *args = rq->special;
rq->errors = !OK_STAT(stat, READY_STAT, BAD_STAT);
-
if (args) {
args->taskfile.feature = err;
args->taskfile.sector_count = IN_BYTE(IDE_NSECTOR_REG);
@@ -803,7 +758,6 @@ void ide_end_drive_cmd(ide_drive_t *drive, byte stat, byte err)
args->hobfile.high_cylinder = IN_BYTE(IDE_HCYL_REG);
}
}
- ata_ar_put(drive, ar);
}
blkdev_dequeue_request(rq);
@@ -920,11 +874,6 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, byte stat)
struct request *rq;
byte err;
- /*
- * FIXME: remember to invalidate tcq queue when drive->using_tcq
- * and atomic_read(&drive->tcq->queued) /jens
- */
-
err = ide_dump_status(drive, msg, stat);
if (drive == NULL || (rq = HWGROUP(drive)->rq) == NULL)
return ide_stopped;
@@ -1127,14 +1076,11 @@ static ide_startstop_t start_request(ide_drive_t *drive, struct request *rq)
*/
if (rq->flags & REQ_DRIVE_TASKFILE) {
- struct ata_request *ar = rq->special;
- struct ata_taskfile *args;
+ struct ata_taskfile *args = rq->special;
- if (!ar)
+ if (!(args))
goto args_error;
- args = &ar->ar_task;
-
ata_taskfile(drive, args, NULL);
if (((args->command_type == IDE_DRIVE_TASK_RAW_WRITE) ||
@@ -1258,170 +1204,25 @@ void ide_stall_queue(ide_drive_t *drive, unsigned long timeout)
/*
* Select the next drive which will be serviced.
*/
-static ide_drive_t *choose_drive(ide_hwgroup_t *hwgroup)
+static inline ide_drive_t *choose_drive(ide_hwgroup_t *hwgroup)
{
- ide_drive_t *tmp;
- ide_drive_t *drive = NULL;
- unsigned long sleep = 0;
+ ide_drive_t *drive, *best;
- tmp = hwgroup->drive;
+ best = NULL;
+ drive = hwgroup->drive;
do {
- if (!list_empty(&tmp->queue.queue_head)
- && (!tmp->PADAM_sleep || time_after_eq(tmp->PADAM_sleep, jiffies))) {
- if (!drive
- || (tmp->PADAM_sleep && (!drive->PADAM_sleep || time_after(drive->PADAM_sleep, tmp->PADAM_sleep)))
- || (!drive->PADAM_sleep && time_after(drive->PADAM_service_start + 2 * drive->PADAM_service_time, tmp->PADAM_service_start + 2 * tmp->PADAM_service_time)))
+ if (!list_empty(&drive->queue.queue_head)
+ && (!drive->PADAM_sleep || time_after_eq(drive->PADAM_sleep, jiffies))) {
+ if (!best
+ || (drive->PADAM_sleep && (!best->PADAM_sleep || time_after(best->PADAM_sleep, drive->PADAM_sleep)))
+ || (!best->PADAM_sleep && time_after(best->PADAM_service_start + 2 * best->PADAM_service_time, drive->PADAM_service_start + 2 * drive->PADAM_service_time)))
{
- if (!blk_queue_plugged(&tmp->queue))
- drive = tmp;
+ if (!blk_queue_plugged(&drive->queue))
+ best = drive;
}
}
- tmp = tmp->next;
- } while (tmp != hwgroup->drive);
-
- if (drive)
- return drive;
-
- hwgroup->rq = NULL;
- drive = hwgroup->drive;
- do {
- if (drive->PADAM_sleep && (!sleep || time_after(sleep, drive->PADAM_sleep)))
- sleep = drive->PADAM_sleep;
} while ((drive = drive->next) != hwgroup->drive);
-
- if (sleep) {
- /*
- * Take a short snooze, and then wake up this hwgroup
- * again. This gives other hwgroups on the same a
- * chance to play fairly with us, just in case there
- * are big differences in relative throughputs.. don't
- * want to hog the cpu too much.
- */
- if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep))
- sleep = jiffies + WAIT_MIN_SLEEP;
-
- if (timer_pending(&hwgroup->timer))
- printk("ide_set_handler: timer already active\n");
-
- set_bit(IDE_SLEEP, &hwgroup->flags);
- mod_timer(&hwgroup->timer, sleep);
- /* we purposely leave hwgroup busy while
- * sleeping */
- } else {
- /* Ugly, but how can we sleep for the lock
- * otherwise? perhaps from tq_disk? */
- ide_release_lock(&ide_intr_lock);/* for atari only */
- clear_bit(IDE_BUSY, &hwgroup->flags);
- }
-
- return NULL;
-}
-
-/*
- * feed commands to a drive until it barfs. used to be part of ide_do_request.
- * called with ide_lock/DRIVE_LOCK held and busy hwgroup
- */
-static void ide_queue_commands(ide_drive_t *drive, int masked_irq)
-{
- ide_hwgroup_t *hwgroup = HWGROUP(drive);
- ide_startstop_t startstop = -1;
- struct request *rq;
-
- do {
- rq = NULL;
-
- if (!test_bit(IDE_BUSY, &hwgroup->flags))
- printk("%s: hwgroup not busy while queueing\n", drive->name);
-
- /*
- * abort early if we can't queue another command. for non
- * tcq, ide_can_queue is always 1 since we never get here
- * unless the drive is idle.
- */
- if (!ide_can_queue(drive)) {
- if (!ide_pending_commands(drive))
- clear_bit(IDE_BUSY, &hwgroup->flags);
- break;
- }
-
- drive->PADAM_sleep = 0;
- drive->PADAM_service_start = jiffies;
-
- if (test_bit(IDE_DMA, &hwgroup->flags)) {
- printk("ide_do_request: DMA in progress...\n");
- break;
- }
-
- /*
- * there's a small window between where the queue could be
- * replugged while we are in here when using tcq (in which
- * case the queue is probably empty anyways...), so check
- * and leave if appropriate. When not using tcq, this is
- * still a severe BUG!
- */
- if (blk_queue_plugged(&drive->queue)) {
- BUG_ON(!drive->using_tcq);
- break;
- }
-
- if (!(rq = elv_next_request(&drive->queue))) {
- if (!ide_pending_commands(drive))
- clear_bit(IDE_BUSY, &hwgroup->flags);
- hwgroup->rq = NULL;
- break;
- }
-
- /*
- * if there are queued commands, we can't start a non-fs
- * request (really, a non-queuable command) until the
- * queue is empty
- */
- if (!(rq->flags & REQ_CMD) && ide_pending_commands(drive))
- break;
-
- hwgroup->rq = rq;
-
- /*
- * Some systems have trouble with IDE IRQs arriving while
- * the driver is still setting things up. So, here we disable
- * the IRQ used by this interface while the request is being
- * started. This may look bad at first, but pretty much the
- * same thing happens anyway when any interrupt comes in, IDE
- * or otherwise -- the kernel masks the IRQ while it is being
- * handled.
- */
- if (masked_irq && HWIF(drive)->irq != masked_irq)
- disable_irq_nosync(HWIF(drive)->irq);
-
- spin_unlock(&ide_lock);
- ide__sti(); /* allow other IRQs while we start this request */
- startstop = start_request(drive, rq);
-
- spin_lock_irq(&ide_lock);
- if (masked_irq && HWIF(drive)->irq != masked_irq)
- enable_irq(HWIF(drive)->irq);
-
- /*
- * command started, we are busy
- */
- if (startstop == ide_started)
- break;
-
- /*
- * start_request() can return either ide_stopped (no command
- * was started), ide_started (command started, don't queue
- * more), or ide_released (command started, try and queue
- * more).
- */
-#if 0
- if (startstop == ide_stopped)
- set_bit(IDE_BUSY, &hwgroup->flags);
-#endif
-
- } while (1);
-
- if (startstop == ide_started)
- return;
+ return best;
}
/*
@@ -1458,34 +1259,86 @@ static void ide_do_request(ide_hwgroup_t *hwgroup, int masked_irq)
{
ide_drive_t *drive;
struct ata_channel *hwif;
+ ide_startstop_t startstop;
+ struct request *rq;
ide_get_lock(&ide_intr_lock, ide_intr, hwgroup);/* for atari only: POSSIBLY BROKEN HERE(?) */
__cli(); /* necessary paranoia: ensure IRQs are masked on local CPU */
while (!test_and_set_bit(IDE_BUSY, &hwgroup->flags)) {
-
- /*
- * will clear IDE_BUSY, if appropriate
- */
- if ((drive = choose_drive(hwgroup)) == NULL)
- break;
-
+ drive = choose_drive(hwgroup);
+ if (drive == NULL) {
+ unsigned long sleep = 0;
+ hwgroup->rq = NULL;
+ drive = hwgroup->drive;
+ do {
+ if (drive->PADAM_sleep && (!sleep || time_after(sleep, drive->PADAM_sleep)))
+ sleep = drive->PADAM_sleep;
+ } while ((drive = drive->next) != hwgroup->drive);
+ if (sleep) {
+ /*
+ * Take a short snooze, and then wake up this hwgroup again.
+ * This gives other hwgroups on the same a chance to
+ * play fairly with us, just in case there are big differences
+ * in relative throughputs.. don't want to hog the cpu too much.
+ */
+ if (0 < (signed long)(jiffies + WAIT_MIN_SLEEP - sleep))
+ sleep = jiffies + WAIT_MIN_SLEEP;
+#if 1
+ if (timer_pending(&hwgroup->timer))
+ printk("ide_set_handler: timer already active\n");
+#endif
+ set_bit(IDE_SLEEP, &hwgroup->flags);
+ mod_timer(&hwgroup->timer, sleep);
+ /* we purposely leave hwgroup busy while sleeping */
+ } else {
+ /* Ugly, but how can we sleep for the lock otherwise? perhaps from tq_disk? */
+ ide_release_lock(&ide_intr_lock);/* for atari only */
+ clear_bit(IDE_BUSY, &hwgroup->flags);
+ }
+ return; /* no more work for this hwgroup (for now) */
+ }
hwif = drive->channel;
- if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif && IDE_CONTROL_REG) {
+ if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif && hwif->io_ports[IDE_CONTROL_OFFSET]) {
/* set nIEN for previous hwif */
+
if (hwif->intrproc)
hwif->intrproc(drive);
else
- OUT_BYTE(drive->ctl|2, IDE_CONTROL_REG);
+ OUT_BYTE((drive)->ctl|2, hwif->io_ports[IDE_CONTROL_OFFSET]);
}
hwgroup->hwif = hwif;
hwgroup->drive = drive;
+ drive->PADAM_sleep = 0;
+ drive->PADAM_service_start = jiffies;
+
+ if (blk_queue_plugged(&drive->queue))
+ BUG();
+
+ /*
+ * just continuing an interrupted request maybe
+ */
+ rq = hwgroup->rq = elv_next_request(&drive->queue);
/*
- * main queueing loop
+ * Some systems have trouble with IDE IRQs arriving while
+ * the driver is still setting things up. So, here we disable
+ * the IRQ used by this interface while the request is being started.
+ * This may look bad at first, but pretty much the same thing
+ * happens anyway when any interrupt comes in, IDE or otherwise
+ * -- the kernel masks the IRQ while it is being handled.
*/
- ide_queue_commands(drive, masked_irq);
+ if (masked_irq && hwif->irq != masked_irq)
+ disable_irq_nosync(hwif->irq);
+ spin_unlock(&ide_lock);
+ ide__sti(); /* allow other IRQs while we start this request */
+ startstop = start_request(drive, rq);
+ spin_lock_irq(&ide_lock);
+ if (masked_irq && hwif->irq != masked_irq)
+ enable_irq(hwif->irq);
+ if (startstop == ide_stopped)
+ clear_bit(IDE_BUSY, &hwgroup->flags);
}
}
@@ -1512,39 +1365,21 @@ void do_ide_request(request_queue_t *q)
* un-busy the hwgroup etc, and clear any pending DMA status. we want to
* retry the current request in PIO mode instead of risking tossing it
* all away
- *
- * FIXME: needs a bit of tcq work
*/
void ide_dma_timeout_retry(ide_drive_t *drive)
{
struct ata_channel *hwif = drive->channel;
- struct request *rq = NULL;
- struct ata_request *ar = NULL;
-
- if (drive->using_tcq) {
- if (drive->tcq->active_tag != -1) {
- ar = IDE_CUR_AR(drive);
- rq = ar->ar_rq;
- }
- } else {
- rq = HWGROUP(drive)->rq;
- ar = rq->special;
- }
+ struct request *rq;
/*
* end current dma transaction
*/
- if (rq)
- hwif->dmaproc(ide_dma_end, drive);
+ hwif->dmaproc(ide_dma_end, drive);
/*
* complain a little, later we might remove some of this verbosity
*/
- printk("%s: timeout waiting for DMA", drive->name);
- if (drive->using_tcq)
- printk(" queued, active tag %d", drive->tcq->active_tag);
- printk("\n");
-
+ printk("%s: timeout waiting for DMA\n", drive->name);
hwif->dmaproc(ide_dma_timeout, drive);
/*
@@ -1560,25 +1395,15 @@ void ide_dma_timeout_retry(ide_drive_t *drive)
* un-busy drive etc (hwgroup->busy is cleared on return) and
* make sure request is sane
*/
+ rq = HWGROUP(drive)->rq;
HWGROUP(drive)->rq = NULL;
- if (!rq)
- return;
-
rq->errors = 0;
if (rq->bio) {
rq->sector = rq->bio->bi_sector;
rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
rq->buffer = NULL;
}
-
- /*
- * this request was not on the queue any more
- */
- if (ar->ar_flags & ATA_AR_QUEUED) {
- ata_ar_put(drive, ar);
- _elv_add_request(&drive->queue, rq, 0, 0);
- }
}
/*
@@ -1814,16 +1639,13 @@ void ide_intr(int irq, void *dev_id, struct pt_regs *regs)
set_recovery_timer(drive->channel);
drive->PADAM_service_time = jiffies - drive->PADAM_service_start;
if (startstop == ide_stopped) {
- if (hwgroup->handler == NULL) { /* paranoia */
+ if (hwgroup->handler == NULL) { /* paranoia */
clear_bit(IDE_BUSY, &hwgroup->flags);
- if (test_bit(IDE_DMA, &hwgroup->flags))
- printk("ide_intr: illegal clear\n");
ide_do_request(hwgroup, hwif->irq);
} else {
printk("%s: ide_intr: huh? expected NULL handler on exit\n", drive->name);
}
- } else if (startstop == ide_released)
- ide_queue_commands(drive, hwif->irq);
+ }
out_lock:
spin_unlock_irqrestore(&ide_lock, flags);
@@ -1898,7 +1720,6 @@ int ide_do_drive_cmd(ide_drive_t *drive, struct request *rq, ide_action_t action
if (drive->channel->chipset == ide_pdc4030 && rq->buffer != NULL)
return -ENOSYS; /* special drive cmds not supported */
#endif
- rq->flags |= REQ_STARTED;
rq->errors = 0;
rq->rq_status = RQ_ACTIVE;
rq->rq_dev = mk_kdev(major,(drive->select.b.unit)<<PARTN_BITS);
@@ -2222,7 +2043,6 @@ void ide_unregister(struct ata_channel *channel)
}
drive->present = 0;
blk_cleanup_queue(&drive->queue);
- ide_teardown_commandlist(drive);
}
if (d->present)
hwgroup->drive = d;
@@ -2775,86 +2595,6 @@ static int ide_ioctl (struct inode *inode, struct file *file,
}
}
-int ide_build_commandlist(ide_drive_t *drive)
-{
-#ifdef CONFIG_BLK_DEV_IDEPCI
- struct pci_dev *pdev = drive->channel->pci_dev;
-#else
- struct pci_dev *pdev = NULL;
-#endif
- struct list_head *p;
- unsigned long flags;
- struct ata_request *ar;
- int i, cur;
-
- spin_lock_irqsave(&ide_lock, flags);
-
- cur = 0;
- list_for_each(p, &drive->free_req)
- cur++;
-
- /*
- * for now, just don't shrink it...
- */
- if (drive->queue_depth <= cur) {
- spin_unlock_irqrestore(&ide_lock, flags);
- return 0;
- }
-
- for (i = cur; i < drive->queue_depth; i++) {
- ar = kmalloc(sizeof(*ar), GFP_ATOMIC);
- if (!ar)
- break;
-
- memset(ar, 0, sizeof(*ar));
- INIT_LIST_HEAD(&ar->ar_queue);
-
- ar->ar_sg_table = kmalloc(PRD_SEGMENTS * sizeof(struct scatterlist), GFP_ATOMIC);
- if (!ar->ar_sg_table) {
- kfree(ar);
- break;
- }
-
- ar->ar_dmatable_cpu = pci_alloc_consistent(pdev, PRD_SEGMENTS * PRD_BYTES, &ar->ar_dmatable);
- if (!ar->ar_dmatable_cpu) {
- kfree(ar->ar_sg_table);
- kfree(ar);
- break;
- }
-
- /*
- * pheew, all done, add to list
- */
- list_add_tail(&ar->ar_queue, &drive->free_req);
- ++cur;
- }
- drive->queue_depth = cur;
- spin_unlock_irqrestore(&ide_lock, flags);
- return 0;
-}
-
-int ide_init_commandlist(ide_drive_t *drive)
-{
- INIT_LIST_HEAD(&drive->free_req);
-
- return ide_build_commandlist(drive);
-}
-
-void ide_teardown_commandlist(ide_drive_t *drive)
-{
- struct pci_dev *pdev= drive->channel->pci_dev;
- struct list_head *entry;
-
- list_for_each(entry, &drive->free_req) {
- struct ata_request *ar = list_ata_entry(entry);
-
- list_del(&ar->ar_queue);
- kfree(ar->ar_sg_table);
- pci_free_consistent(pdev, PRD_SEGMENTS * PRD_BYTES, ar->ar_dmatable_cpu, ar->ar_dmatable);
- kfree(ar);
- }
-}
-
static int ide_check_media_change (kdev_t i_rdev)
{
ide_drive_t *drive;
@@ -3426,9 +3166,6 @@ int ide_register_subdriver(ide_drive_t *drive, struct ata_operations *driver)
drive->channel->dmaproc(ide_dma_off_quietly, drive);
drive->channel->dmaproc(ide_dma_check, drive);
-#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
- drive->channel->dmaproc(ide_dma_queued_on, drive);
-#endif /* CONFIG_BLK_DEV_IDE_TCQ_DEFAULT */
}
/* Only CD-ROMs and tape drives support DSC overlap. */
drive->dsc_overlap = (drive->next != drive
diff --git a/drivers/ide/pdc202xx.c b/drivers/ide/pdc202xx.c
index 0f4f8525abed..1ffedc641081 100644
--- a/drivers/ide/pdc202xx.c
+++ b/drivers/ide/pdc202xx.c
@@ -1057,12 +1057,6 @@ somebody_else:
case ide_dma_timeout:
if (drive->channel->resetproc != NULL)
drive->channel->resetproc(drive);
- /*
- * we cannot support queued operations on promise, so fail to
- * to enable it...
- */
- case ide_dma_queued_on:
- return 1;
default:
break;
}