qubes-linux-kernel/patches.xen/xsa155-linux44-0013-xen-blkfront-prepare-request-locally-only-then-put-i.patch
Marek Marczykowski-Górecki d78c498a97
version 4.4.8-9
As usual - dropped patched already applied there, other updated.
2016-05-08 20:45:02 +02:00

162 lines
5.6 KiB
Diff

From 74aaa42e1f25309a163acd00083ecbbc186fbb47 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 06:07:14 +0100
Subject: [PATCH 13/13] xen-blkfront: prepare request locally, only then put it
on the shared ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Do not reuse data which theoretically might be already modified by the
backend. This is mostly about private copy of the request
(info->shadow[id].req) - make sure the request saved there is really the
one just filled.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
drivers/block/xen-blkfront.c | 56 ++++++++++++++++++++++++--------------------
1 file changed, 30 insertions(+), 26 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5d7eb04..514cf18 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -463,27 +463,29 @@ static int blkif_ioctl(struct block_devi
static int blkif_queue_discard_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request ring_req = { };
unsigned long id;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = get_id_from_freelist(info);
info->shadow[id].request = req;
- ring_req->operation = BLKIF_OP_DISCARD;
- ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
- ring_req->u.discard.id = id;
- ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.operation = BLKIF_OP_DISCARD;
+ ring_req.u.discard.nr_sectors = blk_rq_sectors(req);
+ ring_req.u.discard.id = id;
+ ring_req.u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
- ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+ ring_req.u.discard.flag = BLKIF_DISCARD_SECURE;
else
- ring_req->u.discard.flag = 0;
+ ring_req.u.discard.flag = 0;
+ /* make the request available to the backend */
+ *RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt) = ring_req;
+ wmb();
info->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ info->shadow[id].req = ring_req;
return 0;
}
@@ -573,7 +575,7 @@ static void blkif_setup_rw_req_grant(uns
static int blkif_queue_rw_req(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request ring_req = { };
unsigned long id;
int i;
struct setup_rw_req setup = {
@@ -435,7 +435,6 @@
new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
id = get_id_from_freelist(info);
info->shadow[id].request = req;
@@ -632,7 +633,7 @@ static int blkif_queue_rw_req(struct req
for_each_sg(info->shadow[id].sg, sg, num_sg, i)
num_grant += gnttab_count_grant(sg->offset, sg->length);
- ring_req->u.rw.id = id;
+ ring_req.u.rw.id = id;
info->shadow[id].num_sg = num_sg;
if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
/*
@@ -640,16 +641,16 @@ static int blkif_queue_rw_req(struct req
* BLKIF_OP_WRITE
*/
BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
- ring_req->operation = BLKIF_OP_INDIRECT;
- ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
+ ring_req.operation = BLKIF_OP_INDIRECT;
+ ring_req.u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.indirect.handle = info->handle;
- ring_req->u.indirect.nr_segments = num_grant;
+ ring_req.u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.u.indirect.handle = info->handle;
+ ring_req.u.indirect.nr_segments = num_grant;
} else {
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.rw.handle = info->handle;
- ring_req->operation = rq_data_dir(req) ?
+ ring_req.u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.u.rw.handle = info->handle;
+ ring_req.operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
/*
@@ -662,21 +663,21 @@ static int blkif_queue_rw_req(struct req
switch (info->feature_flush &
((REQ_FLUSH|REQ_FUA))) {
case REQ_FLUSH|REQ_FUA:
- ring_req->operation =
+ ring_req.operation =
BLKIF_OP_WRITE_BARRIER;
break;
case REQ_FLUSH:
- ring_req->operation =
+ ring_req.operation =
BLKIF_OP_FLUSH_DISKCACHE;
break;
default:
- ring_req->operation = 0;
+ ring_req.operation = 0;
}
}
- ring_req->u.rw.nr_segments = num_grant;
+ ring_req.u.rw.nr_segments = num_grant;
}
- setup.ring_req = ring_req;
+ setup.ring_req = &ring_req;
setup.id = id;
for_each_sg(info->shadow[id].sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@@ -698,10 +699,13 @@ static int blkif_queue_rw_req(struct req
if (setup.segments)
kunmap_atomic(setup.segments);
+ /* make the request available to the backend */
+ *RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt) = ring_req;
+ wmb();
info->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ info->shadow[id].req = ring_req;
if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);