qubes-linux-kernel/0012-xen-blkfront-prepare-request-locally-only-then-put-i.patch

192 lines
7.0 KiB
Diff
Raw Normal View History

2019-11-30 18:31:24 +00:00
From 115094605c08f2e2790f6110f7fdc002122e0788 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 06:07:14 +0100
2018-08-15 12:57:19 +00:00
Subject: [PATCH] xen-blkfront: prepare request locally, only then put it on
the shared ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Do not reuse data which theoretically might be already modified by the
backend. This is mostly about private copy of the request
(info->shadow[id].req) - make sure the request saved there is really the
one just filled.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
2018-08-15 12:57:19 +00:00
drivers/block/xen-blkfront.c | 76 +++++++++++++++++++++---------------
1 file changed, 44 insertions(+), 32 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
2019-07-27 21:01:02 +00:00
index 3f6dbaf0265b..37235ab63ca9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -527,19 +527,16 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
struct request *req,
- struct blkif_request **ring_req)
+ struct blkif_request *ring_req)
{
unsigned long id;
- *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
- rinfo->ring.req_prod_pvt++;
-
id = get_id_from_freelist(rinfo);
rinfo->shadow[id].request = req;
rinfo->shadow[id].status = REQ_WAITING;
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
- (*ring_req)->u.rw.id = id;
+ ring_req->u.rw.id = id;
return id;
}
@@ -547,23 +544,28 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
{
struct blkfront_info *info = rinfo->dev_info;
- struct blkif_request *ring_req;
+ struct blkif_request ring_req = { 0 };
unsigned long id;
/* Fill out a communications ring structure. */
id = blkif_ring_get_request(rinfo, req, &ring_req);
- ring_req->operation = BLKIF_OP_DISCARD;
- ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
- ring_req->u.discard.id = id;
- ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.operation = BLKIF_OP_DISCARD;
+ ring_req.u.discard.nr_sectors = blk_rq_sectors(req);
+ ring_req.u.discard.id = id;
+ ring_req.u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
- ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+ ring_req.u.discard.flag = BLKIF_DISCARD_SECURE;
else
- ring_req->u.discard.flag = 0;
+ ring_req.u.discard.flag = 0;
+
+ /* make the request available to the backend */
+ *RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt) = ring_req;
+ wmb();
+ rinfo->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
- rinfo->shadow[id].req = *ring_req;
+ rinfo->shadow[id].req = ring_req;
return 0;
}
@@ -695,7 +697,7 @@ static void blkif_setup_extra_req(struct blkif_request *first,
static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
{
struct blkfront_info *info = rinfo->dev_info;
- struct blkif_request *ring_req, *extra_ring_req = NULL;
+ struct blkif_request ring_req = { 0 }, extra_ring_req = { 0 };
unsigned long id, extra_id = NO_ASSOCIATED_ID;
bool require_extra_req = false;
int i;
@@ -760,16 +762,16 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* BLKIF_OP_WRITE
*/
BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
- ring_req->operation = BLKIF_OP_INDIRECT;
- ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
+ ring_req.operation = BLKIF_OP_INDIRECT;
+ ring_req.u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.indirect.handle = info->handle;
- ring_req->u.indirect.nr_segments = num_grant;
+ ring_req.u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.u.indirect.handle = info->handle;
+ ring_req.u.indirect.nr_segments = num_grant;
} else {
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.rw.handle = info->handle;
- ring_req->operation = rq_data_dir(req) ?
+ ring_req.u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.u.rw.handle = info->handle;
+ ring_req.operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
/*
@@ -780,15 +782,15 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
* since it is guaranteed ordered WRT previous writes.)
*/
if (info->feature_flush && info->feature_fua)
- ring_req->operation =
+ ring_req.operation =
BLKIF_OP_WRITE_BARRIER;
else if (info->feature_flush)
- ring_req->operation =
+ ring_req.operation =
BLKIF_OP_FLUSH_DISKCACHE;
else
- ring_req->operation = 0;
+ ring_req.operation = 0;
}
- ring_req->u.rw.nr_segments = num_grant;
+ ring_req.u.rw.nr_segments = num_grant;
if (unlikely(require_extra_req)) {
extra_id = blkif_ring_get_request(rinfo, req,
&extra_ring_req);
@@ -798,7 +800,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
*/
rinfo->shadow[extra_id].num_sg = 0;
- blkif_setup_extra_req(ring_req, extra_ring_req);
+ blkif_setup_extra_req(&ring_req, &extra_ring_req);
/* Link the 2 requests together */
rinfo->shadow[extra_id].associated_id = id;
@@ -806,12 +808,12 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
}
}
- setup.ring_req = ring_req;
+ setup.ring_req = &ring_req;
setup.id = id;
setup.require_extra_req = require_extra_req;
if (unlikely(require_extra_req))
- setup.extra_ring_req = extra_ring_req;
+ setup.extra_ring_req = &extra_ring_req;
for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
BUG_ON(sg->offset + sg->length > PAGE_SIZE);
@@ -833,10 +835,20 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
if (setup.segments)
kunmap_atomic(setup.segments);
+ /* make the request available to the backend */
+ *RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt) = ring_req;
+ wmb();
+ rinfo->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
- rinfo->shadow[id].req = *ring_req;
- if (unlikely(require_extra_req))
- rinfo->shadow[extra_id].req = *extra_ring_req;
+ rinfo->shadow[id].req = ring_req;
+
+ if (unlikely(require_extra_req)) {
+ *RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt) = extra_ring_req;
+ wmb();
+ rinfo->ring.req_prod_pvt++;
+ /* Keep a private copy so we can reissue requests when recovering. */
+ rinfo->shadow[extra_id].req = extra_ring_req;
+ }
if (new_persistent_gnts)
gnttab_free_grant_references(setup.gref_head);
2018-08-15 12:57:19 +00:00
--
2019-11-30 18:31:24 +00:00
2.21.0
2018-08-15 12:57:19 +00:00