Apply XSA-155 patches (frontends part)

pull/3/head
Marek Marczykowski-Górecki 9 years ago
parent 7f4bcb4fdc
commit f43d14ef34
No known key found for this signature in database
GPG Key ID: 063938BA42CFA724

@ -0,0 +1,60 @@
From 8322f4eddaf1fe5a9bdf5252c8140daa8bad60fd Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Tue, 15 Dec 2015 21:35:14 +0100
Subject: [PATCH 08/13] xen: Add RING_COPY_RESPONSE()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Using RING_GET_RESPONSE() on a shared ring is easy to use incorrectly
(i.e., by not considering that the other end may alter the data in the
shared ring while it is being inspected). Safe usage of a response
generally requires taking a local copy.
Provide a RING_COPY_RESPONSE() macro to use instead of
RING_GET_RESPONSE() and an open-coded memcpy(). This takes care of
ensuring that the copy is done correctly regardless of any possible
compiler optimizations.
Use a volatile source to prevent the compiler from reordering or
omitting the copy.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
include/xen/interface/io/ring.h | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 7dc685b..312415c 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -198,6 +198,20 @@ struct __name##_back_ring { \
#define RING_GET_RESPONSE(_r, _idx) \
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
+/*
+ * Get a local copy of a response.
+ *
+ * Use this in preference to RING_GET_RESPONSE() so all processing is
+ * done on a local copy that cannot be modified by the other end.
+ *
+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
+ * to be ineffective where _rsp is a struct which consists of only bitfields.
+ */
+#define RING_COPY_RESPONSE(_r, _idx, _rsp) do { \
+ /* Use volatile to force the copy into _rsp. */ \
+ *(_rsp) = *(volatile typeof(_rsp))RING_GET_RESPONSE(_r, _idx); \
+} while (0)
+
/* Loop termination condition: Would the specified index overflow the ring? */
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
--
2.1.0

@ -0,0 +1,37 @@
From 76a020d3b2023ca02961eab38318ef2d6f1338d9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 05:22:24 +0100
Subject: [PATCH 11/13] xen-netfront: add range check for Tx response id
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Tx response ID is fetched from shared page, so make sure it is sane
before using it as an array index.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
drivers/net/xen-netfront.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 959e479..94309e6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -379,6 +379,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
continue;
id = txrsp.id;
+ BUG_ON(id >= NET_TX_RING_SIZE);
skb = queue->tx_skbs[id].skb;
if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) {
--
2.1.0

@ -0,0 +1,121 @@
From ef0d243bfeaf1da8854c26f89536dc1b69c56602 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 05:51:10 +0100
Subject: [PATCH 12/13] xen-blkfront: make local copy of response before using
it
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Data on the shared page can be changed at any time by the backend. Make
a local copy, which is no longer controlled by the backend. And only
then access it.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
drivers/block/xen-blkfront.c | 34 +++++++++++++++++-----------------
1 file changed, 17 insertions(+), 17 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 2fee2ee..5d7eb04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1296,7 +1296,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
struct request *req;
- struct blkif_response *bret;
+ struct blkif_response bret;
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
@@ -1316,8 +1316,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
- bret = RING_GET_RESPONSE(&info->ring, i);
- id = bret->id;
+ RING_COPY_RESPONSE(&info->ring, i, &bret);
+ id = bret.id;
/*
* The backend has messed up and given us an id that we would
* never have given to it (we stamp it up to BLK_RING_SIZE -
@@ -1325,29 +1325,29 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
*/
if (id >= BLK_RING_SIZE) {
WARN(1, "%s: response to %s has incorrect id (%ld)\n",
- info->gd->disk_name, op_name(bret->operation), id);
+ info->gd->disk_name, op_name(bret.operation), id);
/* We can't safely get the 'struct request' as
* the id is busted. */
continue;
}
req = info->shadow[id].request;
- if (bret->operation != BLKIF_OP_DISCARD)
- blkif_completion(&info->shadow[id], info, bret);
+ if (bret.operation != BLKIF_OP_DISCARD)
+ blkif_completion(&info->shadow[id], info, &bret);
if (add_id_to_freelist(info, id)) {
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
- info->gd->disk_name, op_name(bret->operation), id);
+ info->gd->disk_name, op_name(bret.operation), id);
continue;
}
- error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
- switch (bret->operation) {
+ error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ switch (bret.operation) {
case BLKIF_OP_DISCARD:
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -1358,15 +1358,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
}
- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
+ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
- info->gd->disk_name, op_name(bret->operation));
+ info->gd->disk_name, op_name(bret.operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
@@ -1378,9 +1378,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
/* fall through */
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
- if (unlikely(bret->status != BLKIF_RSP_OKAY))
+ if (unlikely(bret.status != BLKIF_RSP_OKAY))
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
- "request: %x\n", bret->status);
+ "request: %x\n", bret.status);
__blk_end_request_all(req, error);
break;
--
2.1.0

@ -0,0 +1,179 @@
From 3a1006355114da4b8fc4b935a64928b7f6ae374f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 05:09:55 +0100
Subject: [PATCH 09/13] xen-netfront: copy response out of shared buffer before
accessing it
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Make local copy of the response, otherwise backend might modify it while
frontend is already processing it - leading to time of check / time of
use issue.
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
drivers/net/xen-netfront.c | 51 +++++++++++++++++++++++-----------------------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d6abf19..2af5100 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -372,13 +372,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
- struct xen_netif_tx_response *txrsp;
+ struct xen_netif_tx_response txrsp;
- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
- if (txrsp->status == XEN_NETIF_RSP_NULL)
+ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
+ if (txrsp.status == XEN_NETIF_RSP_NULL)
continue;
- id = txrsp->id;
+ id = txrsp.id;
skb = queue->tx_skbs[id].skb;
if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) {
@@ -721,7 +721,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
RING_IDX rp)
{
- struct xen_netif_extra_info *extra;
+ struct xen_netif_extra_info extra;
struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons;
int err = 0;
@@ -737,24 +737,23 @@ static int xennet_get_extras(struct netfront_queue *queue,
break;
}
- extra = (struct xen_netif_extra_info *)
- RING_GET_RESPONSE(&queue->rx, ++cons);
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
- if (unlikely(!extra->type ||
- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
+ if (unlikely(!extra.type ||
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n",
- extra->type);
+ extra.type);
err = -EINVAL;
} else {
- memcpy(&extras[extra->type - 1], extra,
- sizeof(*extra));
+ memcpy(&extras[extra.type - 1], &extra,
+ sizeof(extra));
}
skb = xennet_get_rx_skb(queue, cons);
ref = xennet_get_rx_ref(queue, cons);
xennet_move_rx_slot(queue, skb, ref);
- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
queue->rx.rsp_cons = cons;
return err;
@@ -764,28 +763,28 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct netfront_rx_info *rinfo, RING_IDX rp,
struct sk_buff_head *list)
{
- struct xen_netif_rx_response *rx = &rinfo->rx;
+ struct xen_netif_rx_response rx = rinfo->rx;
struct xen_netif_extra_info *extras = rinfo->extras;
struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
- int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+ int max = MAX_SKB_FRAGS + (rx.status <= RX_COPY_THRESHOLD);
int slots = 1;
int err = 0;
unsigned long ret;
- if (rx->flags & XEN_NETRXF_extra_info) {
+ if (rx.flags & XEN_NETRXF_extra_info) {
err = xennet_get_extras(queue, extras, rp);
cons = queue->rx.rsp_cons;
}
for (;;) {
- if (unlikely(rx->status < 0 ||
- rx->offset + rx->status > PAGE_SIZE)) {
+ if (unlikely(rx.status < 0 ||
+ rx.offset + rx.status > PAGE_SIZE)) {
if (net_ratelimit())
dev_warn(dev, "rx->offset: %x, size: %u\n",
- rx->offset, rx->status);
+ rx.offset, rx.status);
xennet_move_rx_slot(queue, skb, ref);
err = -EINVAL;
goto next;
@@ -799,7 +798,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
if (ref == GRANT_INVALID_REF) {
if (net_ratelimit())
dev_warn(dev, "Bad rx response id %d.\n",
- rx->id);
+ rx.id);
err = -EINVAL;
goto next;
}
@@ -812,7 +811,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
__skb_queue_tail(list, skb);
next:
- if (!(rx->flags & XEN_NETRXF_more_data))
+ if (!(rx.flags & XEN_NETRXF_more_data))
break;
if (cons + slots == rp) {
@@ -822,7 +821,7 @@ next:
break;
}
- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
+ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx);
skb = xennet_get_rx_skb(queue, cons + slots);
ref = xennet_get_rx_ref(queue, cons + slots);
slots++;
@@ -878,9 +877,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
- struct xen_netif_rx_response *rx =
- RING_GET_RESPONSE(&queue->rx, ++cons);
+ struct xen_netif_rx_response rx;
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
@@ -891,7 +890,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
- rx->offset, rx->status, PAGE_SIZE);
+ rx.offset, rx.status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
@@ -987,7 +986,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
i = queue->rx.rsp_cons;
work_done = 0;
while ((i != rp) && (work_done < budget)) {
- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
+ RING_COPY_RESPONSE(&queue->rx, i, rx);
memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
--
2.1.0

@ -0,0 +1,165 @@
From 74aaa42e1f25309a163acd00083ecbbc186fbb47 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 06:07:14 +0100
Subject: [PATCH 13/13] xen-blkfront: prepare request locally, only then put it
on the shared ring
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Do not reuse data which theoretically might be already modified by the
backend. This is mostly about private copy of the request
(info->shadow[id].req) - make sure the request saved there is really the
one just filled.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
drivers/block/xen-blkfront.c | 56 ++++++++++++++++++++++++--------------------
1 file changed, 30 insertions(+), 26 deletions(-)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5d7eb04..514cf18 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -389,7 +389,7 @@ static int blkif_ioctl(struct block_devi
static int blkif_queue_request(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
- struct blkif_request *ring_req;
+ struct blkif_request ring_req;
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
@@ -435,7 +435,7 @@
new_persistent_gnts = 0;
/* Fill out a communications ring structure. */
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
+ RING_COPY_REQUEST(&info->ring, info->ring.req_prod_pvt, &ring_req);
id = get_id_from_freelist(info);
info->shadow[id].request = req;
@@ -435,37 +435,37 @@ static int blkif_queue_request(struct re
info->shadow[id].request = req;
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
- ring_req->operation = BLKIF_OP_DISCARD;
- ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
- ring_req->u.discard.id = id;
- ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.operation = BLKIF_OP_DISCARD;
+ ring_req.u.discard.nr_sectors = blk_rq_sectors(req);
+ ring_req.u.discard.id = id;
+ ring_req.u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
- ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+ ring_req.u.discard.flag = BLKIF_DISCARD_SECURE;
else
- ring_req->u.discard.flag = 0;
+ ring_req.u.discard.flag = 0;
} else {
BUG_ON(info->max_indirect_segments == 0 &&
req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
BUG_ON(info->max_indirect_segments &&
req->nr_phys_segments > info->max_indirect_segments);
nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
- ring_req->u.rw.id = id;
+ ring_req.u.rw.id = id;
if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
/*
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
- ring_req->operation = BLKIF_OP_INDIRECT;
- ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
+ ring_req.operation = BLKIF_OP_INDIRECT;
+ ring_req.u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.indirect.handle = info->handle;
- ring_req->u.indirect.nr_segments = nseg;
+ ring_req.u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.u.indirect.handle = info->handle;
+ ring_req.u.indirect.nr_segments = nseg;
} else {
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->u.rw.handle = info->handle;
- ring_req->operation = rq_data_dir(req) ?
+ ring_req.u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
+ ring_req.u.rw.handle = info->handle;
+ ring_req.operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
/*
@@ -481,24 +481,24 @@ static int blkif_queue_request(struct re
switch (info->feature_flush &
((REQ_FLUSH|REQ_FUA))) {
case REQ_FLUSH|REQ_FUA:
- ring_req->operation =
+ ring_req.operation =
BLKIF_OP_WRITE_BARRIER;
break;
case REQ_FLUSH:
- ring_req->operation =
+ ring_req.operation =
BLKIF_OP_FLUSH_DISKCACHE;
break;
default:
- ring_req->operation = 0;
+ ring_req.operation = 0;
}
}
- ring_req->u.rw.nr_segments = nseg;
+ ring_req.u.rw.nr_segments = nseg;
}
for_each_sg(info->shadow[id].sg, sg, nseg, i) {
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
- if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
+ if ((ring_req.operation == BLKIF_OP_INDIRECT) &&
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
unsigned long uninitialized_var(pfn);
@@ -504,7 +504,7 @@ static int blkif_queue_request(struct re
gnt_list_entry = get_grant(&gref_head, pfn, info);
info->shadow[id].indirect_grants[n] = gnt_list_entry;
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
- ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
+ ring_req.u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
}
gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
@@ -537,8 +537,8 @@ static int blkif_queue_request(struct re
kunmap_atomic(bvec_data);
kunmap_atomic(shared_data);
}
- if (ring_req->operation != BLKIF_OP_INDIRECT) {
- ring_req->u.rw.seg[i] =
+ if (ring_req.operation != BLKIF_OP_INDIRECT) {
+ ring_req.u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
@@ -556,10 +556,13 @@ static int blkif_queue_request(struct re
kunmap_atomic(segments);
}
+ /* make the request available to the backend */
+ *RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt) = ring_req;
+ wmb();
info->ring.req_prod_pvt++;
/* Keep a private copy so we can reissue requests when recovering. */
- info->shadow[id].req = *ring_req;
+ info->shadow[id].req = ring_req;
if (new_persistent_gnts)
gnttab_free_grant_references(gref_head);

@ -0,0 +1,73 @@
From 2adc557330dde5b474d885518d2663180d3c8f45 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
<marmarek@invisiblethingslab.com>
Date: Wed, 16 Dec 2015 05:19:37 +0100
Subject: [PATCH 10/13] xen-netfront: do not use data already exposed to
backend
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Organization: Invisible Things Lab
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Backend may freely modify anything on shared page, so use data which was
supposed to be written there, instead of reading it back from the shared
page.
This is part of XSA155.
CC: stable@vger.kernel.org
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
---
drivers/net/xen-netfront.c | 32 +++++++++++---------------------
1 file changed, 11 insertions(+), 21 deletions(-)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2af5100..959e479 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -452,17 +452,19 @@ static struct xen_netif_tx_request *xenn
struct sk_buff *skb, struct page *page,
unsigned int offset, unsigned int len)
{
+ unsigned int this_len;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (len) {
tx->flags |= XEN_NETTXF_more_data;
+ this_len = min_t(unsigned int, PAGE_SIZE - offset, len);
tx = xennet_make_one_txreq(queue, skb_get(skb),
- page, offset, len);
+ page, offset, this_len);
page++;
offset = 0;
- len -= tx->size;
+ len -= this_len;
}
return tx;
@@ -522,7 +524,7 @@ static int xennet_start_xmit(struct sk_b
int slots;
struct page *page;
unsigned int offset;
- unsigned int len;
+ unsigned int len, this_len;
unsigned long flags;
struct netfront_queue *queue = NULL;
unsigned int num_queues = dev->real_num_tx_queues;
@@ -567,11 +569,12 @@ static int xennet_start_xmit(struct sk_b
}
/* First request for the linear area. */
+ this_len = min_t(unsigned int, PAGE_SIZE - offset, len);
first_tx = tx = xennet_make_one_txreq(queue, skb,
page, offset, len);
page++;
offset = 0;
- len -= tx->size;
+ len -= this_len;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */

@ -17,3 +17,9 @@ patches.xen/xsa155-linux-xsa155-0004-xen-blkback-only-read-request-operation-fro
patches.xen/xsa155-linux43-0005-xen-blkback-read-from-indirect-descriptors-only-once.patch
patches.xen/xsa155-linux-xsa155-0006-xen-scsiback-safely-copy-requests.patch
patches.xen/xsa155-linux-xsa155-0007-xen-pciback-Save-xen_pci_op-commands-before-processi.patch
patches.xen/xsa155-linux-0008-xen-Add-RING_COPY_RESPONSE.patch
patches.xen/xsa155-linux318-0009-xen-netfront-copy-response-out-of-shared-buffer-befo.patch
patches.xen/xsa155-linux41-0010-xen-netfront-do-not-use-data-already-exposed-to-back.patch
patches.xen/xsa155-linux-0011-xen-netfront-add-range-check-for-Tx-response-id.patch
patches.xen/xsa155-linux312-0012-xen-blkfront-make-local-copy-of-response-before-usin.patch
patches.xen/xsa155-linux319-0013-xen-blkfront-prepare-request-locally-only-then-put-i.patch

Loading…
Cancel
Save