aboutsummaryrefslogtreecommitdiff
path: root/drivers/message
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-12 10:02:03 -0500
committerJames Bottomley <James.Bottomley@HansenPartnership.com>2009-06-12 10:02:03 -0500
commit82681a318f9f028ea64e61f24bbd9ac535531921 (patch)
tree529b6a5b4fd040fb54b7672b1a224ebd47445876 /drivers/message
parent3860c97bd60a4525bb62eb90e3e7d2f02662ac59 (diff)
parent8ebf975608aaebd7feb33d77f07ba21a6380e086 (diff)
[SCSI] Merge branch 'linus'
Conflicts: drivers/message/fusion/mptsas.c fixed up conflict between req->data_len accessors and mptsas driver updates. Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/fusion/mptsas.c22
-rw-r--r--drivers/message/i2o/i2o_block.c43
2 files changed, 31 insertions, 34 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 14c490a767a..20e0b447e8e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2135,8 +2135,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
- ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
- rsp->bio->bi_vcnt, rsp->data_len);
+ ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@@ -2153,7 +2153,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
- smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
+ smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
@@ -2179,10 +2179,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION)
<< MPI_SGE_FLAGS_SHIFT;
- flagsLength |= (req->data_len - 4);
+ flagsLength |= (blk_rq_bytes(req) - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
- req->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
ioc->add_sge(psge, flagsLength, dma_addr_out);
@@ -2195,9 +2195,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI_SGE_FLAGS_END_OF_BUFFER;
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
- flagsLength |= rsp->data_len + 4;
+ flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
- rsp->data_len, PCI_DMA_BIDIRECTIONAL);
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
ioc->add_sge(psge, flagsLength, dma_addr_in);
@@ -2221,8 +2221,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
- req->data_len = 0;
- rsp->data_len -= smprep->ResponseDataLength;
+ req->resid_len = 0;
+ rsp->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT
"%s: smp passthru reply failed to be returned\n",
@@ -2231,10 +2231,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
unmap:
if (dma_addr_out)
- pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
- pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
+ pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index a443e136dc4..335d4c78a77 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
struct request_queue *q = req->q;
unsigned long flags;
- if (blk_end_request(req, error, nr_bytes)) {
- int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
-
- if (blk_pc_request(req))
- leftover = req->data_len;
-
+ if (blk_end_request(req, error, nr_bytes))
if (error)
- blk_end_request(req, -EIO, leftover);
- }
+ blk_end_request_all(req, -EIO);
spin_lock_irqsave(q->queue_lock, flags);
@@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
break;
case CACHE_SMARTFETCH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x201F0008;
else
ctl_flags = 0x001F0000;
@@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTBACK:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTTHROUGH:
- if (req->nr_sectors > 16)
+ if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
@@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
if (c->adaptec) {
u8 cmd[10];
u32 scsi_flags;
- u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
+ u16 hwsec;
+ hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
memset(cmd, 0, 10);
sgl_offset = SGL_OFFSET_12;
@@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
*mptr++ = cpu_to_le32(scsi_flags);
- *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
- *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
+ *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
+ *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
memcpy(mptr, cmd, 10);
mptr += 4;
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
} else
#endif
{
msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
*mptr++ = cpu_to_le32(ctl_flags);
- *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
*mptr++ =
- cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
*mptr++ =
- cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
+ cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
struct request *req;
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req)
break;
@@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
- blkdev_dequeue_request(req);
+ blk_start_request(req);
continue;
} else
osm_info("transfer error\n");
@@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
blk_stop_queue(q);
break;
}
- } else
- end_request(req, 0);
+ } else {
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
}
};
@@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
*/
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
- blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
+ blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);