166 lines
6.1 KiB
Diff
166 lines
6.1 KiB
Diff
From 74aaa42e1f25309a163acd00083ecbbc186fbb47 Mon Sep 17 00:00:00 2001
|
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
|
<marmarek@invisiblethingslab.com>
|
|
Date: Wed, 16 Dec 2015 06:07:14 +0100
|
|
Subject: [PATCH 13/13] xen-blkfront: prepare request locally, only then put it
|
|
on the shared ring
|
|
MIME-Version: 1.0
|
|
Content-Type: text/plain; charset=UTF-8
|
|
Content-Transfer-Encoding: 8bit
|
|
Organization: Invisible Things Lab
|
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
|
|
|
Do not reuse data which theoretically might be already modified by the
|
|
backend. This is mostly about private copy of the request
|
|
(info->shadow[id].req) - make sure the request saved there is really the
|
|
one just filled.
|
|
|
|
This is part of XSA155.
|
|
|
|
CC: stable@vger.kernel.org
|
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
|
---
|
|
drivers/block/xen-blkfront.c | 56 ++++++++++++++++++++++++--------------------
|
|
1 file changed, 30 insertions(+), 26 deletions(-)
|
|
|
|
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
|
|
index 5d7eb04..514cf18 100644
|
|
--- a/drivers/block/xen-blkfront.c
|
|
+++ b/drivers/block/xen-blkfront.c
|
|
@@ -389,7 +389,7 @@ static int blkif_ioctl(struct block_devi
|
|
static int blkif_queue_request(struct request *req)
|
|
{
|
|
struct blkfront_info *info = req->rq_disk->private_data;
|
|
- struct blkif_request *ring_req;
|
|
+ struct blkif_request ring_req;
|
|
unsigned long id;
|
|
unsigned int fsect, lsect;
|
|
int i, ref, n;
|
|
@@ -435,7 +435,7 @@
|
|
new_persistent_gnts = 0;
|
|
|
|
/* Fill out a communications ring structure. */
|
|
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
|
|
+ RING_COPY_REQUEST(&info->ring, info->ring.req_prod_pvt, &ring_req);
|
|
id = get_id_from_freelist(info);
|
|
info->shadow[id].request = req;
|
|
|
|
@@ -435,37 +435,37 @@ static int blkif_queue_request(struct re
|
|
info->shadow[id].request = req;
|
|
|
|
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
|
|
- ring_req->operation = BLKIF_OP_DISCARD;
|
|
- ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
|
|
- ring_req->u.discard.id = id;
|
|
- ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
|
+ ring_req.operation = BLKIF_OP_DISCARD;
|
|
+ ring_req.u.discard.nr_sectors = blk_rq_sectors(req);
|
|
+ ring_req.u.discard.id = id;
|
|
+ ring_req.u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
|
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
|
|
- ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
|
|
+ ring_req.u.discard.flag = BLKIF_DISCARD_SECURE;
|
|
else
|
|
- ring_req->u.discard.flag = 0;
|
|
+ ring_req.u.discard.flag = 0;
|
|
} else {
|
|
BUG_ON(info->max_indirect_segments == 0 &&
|
|
req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
|
BUG_ON(info->max_indirect_segments &&
|
|
req->nr_phys_segments > info->max_indirect_segments);
|
|
nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
|
|
- ring_req->u.rw.id = id;
|
|
+ ring_req.u.rw.id = id;
|
|
if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
|
|
/*
|
|
* The indirect operation can only be a BLKIF_OP_READ or
|
|
* BLKIF_OP_WRITE
|
|
*/
|
|
BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
|
|
- ring_req->operation = BLKIF_OP_INDIRECT;
|
|
- ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
|
|
+ ring_req.operation = BLKIF_OP_INDIRECT;
|
|
+ ring_req.u.indirect.indirect_op = rq_data_dir(req) ?
|
|
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
|
- ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
|
- ring_req->u.indirect.handle = info->handle;
|
|
- ring_req->u.indirect.nr_segments = nseg;
|
|
+ ring_req.u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
|
+ ring_req.u.indirect.handle = info->handle;
|
|
+ ring_req.u.indirect.nr_segments = nseg;
|
|
} else {
|
|
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
|
- ring_req->u.rw.handle = info->handle;
|
|
- ring_req->operation = rq_data_dir(req) ?
|
|
+ ring_req.u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
|
+ ring_req.u.rw.handle = info->handle;
|
|
+ ring_req.operation = rq_data_dir(req) ?
|
|
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
|
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
|
|
/*
|
|
@@ -481,24 +481,24 @@ static int blkif_queue_request(struct re
|
|
switch (info->feature_flush &
|
|
((REQ_FLUSH|REQ_FUA))) {
|
|
case REQ_FLUSH|REQ_FUA:
|
|
- ring_req->operation =
|
|
+ ring_req.operation =
|
|
BLKIF_OP_WRITE_BARRIER;
|
|
break;
|
|
case REQ_FLUSH:
|
|
- ring_req->operation =
|
|
+ ring_req.operation =
|
|
BLKIF_OP_FLUSH_DISKCACHE;
|
|
break;
|
|
default:
|
|
- ring_req->operation = 0;
|
|
+ ring_req.operation = 0;
|
|
}
|
|
}
|
|
- ring_req->u.rw.nr_segments = nseg;
|
|
+ ring_req.u.rw.nr_segments = nseg;
|
|
}
|
|
for_each_sg(info->shadow[id].sg, sg, nseg, i) {
|
|
fsect = sg->offset >> 9;
|
|
lsect = fsect + (sg->length >> 9) - 1;
|
|
|
|
- if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
|
|
+ if ((ring_req.operation == BLKIF_OP_INDIRECT) &&
|
|
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
|
|
unsigned long uninitialized_var(pfn);
|
|
|
|
@@ -504,7 +504,7 @@ static int blkif_queue_request(struct re
|
|
gnt_list_entry = get_grant(&gref_head, pfn, info);
|
|
info->shadow[id].indirect_grants[n] = gnt_list_entry;
|
|
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
|
|
- ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
|
|
+ ring_req.u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
|
|
}
|
|
|
|
gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
|
|
@@ -537,8 +537,8 @@ static int blkif_queue_request(struct re
|
|
kunmap_atomic(bvec_data);
|
|
kunmap_atomic(shared_data);
|
|
}
|
|
- if (ring_req->operation != BLKIF_OP_INDIRECT) {
|
|
- ring_req->u.rw.seg[i] =
|
|
+ if (ring_req.operation != BLKIF_OP_INDIRECT) {
|
|
+ ring_req.u.rw.seg[i] =
|
|
(struct blkif_request_segment) {
|
|
.gref = ref,
|
|
.first_sect = fsect,
|
|
@@ -556,10 +556,13 @@ static int blkif_queue_request(struct re
|
|
kunmap_atomic(segments);
|
|
}
|
|
|
|
+ /* make the request available to the backend */
|
|
+ *RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt) = ring_req;
|
|
+ wmb();
|
|
info->ring.req_prod_pvt++;
|
|
|
|
/* Keep a private copy so we can reissue requests when recovering. */
|
|
- info->shadow[id].req = *ring_req;
|
|
+ info->shadow[id].req = ring_req;
|
|
|
|
if (new_persistent_gnts)
|
|
gnttab_free_grant_references(gref_head);
|