Compare commits
6 Commits
master
...
stable-3.1
Author | SHA1 | Date |
---|---|---|
Marek Marczykowski-Górecki | 5cc9414f66 | 9 years ago |
Marek Marczykowski-Górecki | 433398cd67 | 9 years ago |
Marek Marczykowski-Górecki | 06ce6989b0 | 9 years ago |
Marek Marczykowski-Górecki | 8a88fa6b21 | 9 years ago |
Marek Marczykowski-Górecki | 30606502a8 | 9 years ago |
Marek Marczykowski-Górecki | 3387f469d9 | 9 years ago |
@ -0,0 +1,26 @@
|
|||||||
|
When it get to free_page(queue->grant_tx_page[i]), the use counter on this page
|
||||||
|
is already 0, which cause a crash. Not sure if this is the proper fix
|
||||||
|
(according to git log this may introduce some memory leak), but at least it
|
||||||
|
prevent the crash.
|
||||||
|
|
||||||
|
Details in this thread:
|
||||||
|
http://xen.markmail.org/thread/pw5edbtqienjx4q5
|
||||||
|
|
||||||
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
||||||
|
index f821a97..a5efbb0 100644
|
||||||
|
--- a/drivers/net/xen-netfront.c
|
||||||
|
+++ b/drivers/net/xen-netfront.c
|
||||||
|
@@ -1065,9 +1069,10 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
|
||||||
|
|
||||||
|
skb = queue->tx_skbs[i].skb;
|
||||||
|
get_page(queue->grant_tx_page[i]);
|
||||||
|
- gnttab_end_foreign_access(queue->grant_tx_ref[i],
|
||||||
|
- GNTMAP_readonly,
|
||||||
|
- (unsigned long)page_address(queue->grant_tx_page[i]));
|
||||||
|
+ gnttab_end_foreign_access_ref(
|
||||||
|
+ queue->grant_tx_ref[i], GNTMAP_readonly);
|
||||||
|
+ gnttab_release_grant_reference(
|
||||||
|
+ &queue->gref_tx_head, queue->grant_tx_ref[i]);
|
||||||
|
queue->grant_tx_page[i] = NULL;
|
||||||
|
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
|
||||||
|
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
|
@ -0,0 +1,60 @@
|
|||||||
|
From 8322f4eddaf1fe5a9bdf5252c8140daa8bad60fd Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
||||||
|
<marmarek@invisiblethingslab.com>
|
||||||
|
Date: Tue, 15 Dec 2015 21:35:14 +0100
|
||||||
|
Subject: [PATCH 08/13] xen: Add RING_COPY_RESPONSE()
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
Organization: Invisible Things Lab
|
||||||
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
|
||||||
|
Using RING_GET_RESPONSE() on a shared ring is easy to use incorrectly
|
||||||
|
(i.e., by not considering that the other end may alter the data in the
|
||||||
|
shared ring while it is being inspected). Safe usage of a response
|
||||||
|
generally requires taking a local copy.
|
||||||
|
|
||||||
|
Provide a RING_COPY_RESPONSE() macro to use instead of
|
||||||
|
RING_GET_RESPONSE() and an open-coded memcpy(). This takes care of
|
||||||
|
ensuring that the copy is done correctly regardless of any possible
|
||||||
|
compiler optimizations.
|
||||||
|
|
||||||
|
Use a volatile source to prevent the compiler from reordering or
|
||||||
|
omitting the copy.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
---
|
||||||
|
include/xen/interface/io/ring.h | 14 ++++++++++++++
|
||||||
|
1 file changed, 14 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
|
||||||
|
index 7dc685b..312415c 100644
|
||||||
|
--- a/include/xen/interface/io/ring.h
|
||||||
|
+++ b/include/xen/interface/io/ring.h
|
||||||
|
@@ -198,6 +198,20 @@ struct __name##_back_ring { \
|
||||||
|
#define RING_GET_RESPONSE(_r, _idx) \
|
||||||
|
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
||||||
|
|
||||||
|
+/*
|
||||||
|
+ * Get a local copy of a response.
|
||||||
|
+ *
|
||||||
|
+ * Use this in preference to RING_GET_RESPONSE() so all processing is
|
||||||
|
+ * done on a local copy that cannot be modified by the other end.
|
||||||
|
+ *
|
||||||
|
+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
|
||||||
|
+ * to be ineffective where _rsp is a struct which consists of only bitfields.
|
||||||
|
+ */
|
||||||
|
+#define RING_COPY_RESPONSE(_r, _idx, _rsp) do { \
|
||||||
|
+ /* Use volatile to force the copy into _rsp. */ \
|
||||||
|
+ *(_rsp) = *(volatile typeof(_rsp))RING_GET_RESPONSE(_r, _idx); \
|
||||||
|
+} while (0)
|
||||||
|
+
|
||||||
|
/* Loop termination condition: Would the specified index overflow the ring? */
|
||||||
|
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
||||||
|
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,37 @@
|
|||||||
|
From 76a020d3b2023ca02961eab38318ef2d6f1338d9 Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
||||||
|
<marmarek@invisiblethingslab.com>
|
||||||
|
Date: Wed, 16 Dec 2015 05:22:24 +0100
|
||||||
|
Subject: [PATCH 11/13] xen-netfront: add range check for Tx response id
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
Organization: Invisible Things Lab
|
||||||
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
|
||||||
|
Tx response ID is fetched from shared page, so make sure it is sane
|
||||||
|
before using it as an array index.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
---
|
||||||
|
drivers/net/xen-netfront.c | 1 +
|
||||||
|
1 file changed, 1 insertion(+)
|
||||||
|
|
||||||
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
||||||
|
index 959e479..94309e6 100644
|
||||||
|
--- a/drivers/net/xen-netfront.c
|
||||||
|
+++ b/drivers/net/xen-netfront.c
|
||||||
|
@@ -379,6 +379,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
id = txrsp.id;
|
||||||
|
+ BUG_ON(id >= NET_TX_RING_SIZE);
|
||||||
|
skb = queue->tx_skbs[id].skb;
|
||||||
|
if (unlikely(gnttab_query_foreign_access(
|
||||||
|
queue->grant_tx_ref[id]) != 0)) {
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,57 @@
|
|||||||
|
From 4e2bc423e0cef0a42f93d989c0980301df1bd462 Mon Sep 17 00:00:00 2001
|
||||||
|
From: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Date: Fri, 30 Oct 2015 14:58:08 +0000
|
||||||
|
Subject: [PATCH 1/7] xen: Add RING_COPY_REQUEST()
|
||||||
|
|
||||||
|
Using RING_GET_REQUEST() on a shared ring is easy to use incorrectly
|
||||||
|
(i.e., by not considering that the other end may alter the data in the
|
||||||
|
shared ring while it is being inspected). Safe usage of a request
|
||||||
|
generally requires taking a local copy.
|
||||||
|
|
||||||
|
Provide a RING_COPY_REQUEST() macro to use instead of
|
||||||
|
RING_GET_REQUEST() and an open-coded memcpy(). This takes care of
|
||||||
|
ensuring that the copy is done correctly regardless of any possible
|
||||||
|
compiler optimizations.
|
||||||
|
|
||||||
|
Use a volatile source to prevent the compiler from reordering or
|
||||||
|
omitting the copy.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
---
|
||||||
|
v2: Update about GCC and bitfields.
|
||||||
|
---
|
||||||
|
include/xen/interface/io/ring.h | 14 ++++++++++++++
|
||||||
|
1 file changed, 14 insertions(+)
|
||||||
|
|
||||||
|
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
|
||||||
|
index 7d28aff..7dc685b 100644
|
||||||
|
--- a/include/xen/interface/io/ring.h
|
||||||
|
+++ b/include/xen/interface/io/ring.h
|
||||||
|
@@ -181,6 +181,20 @@ struct __name##_back_ring { \
|
||||||
|
#define RING_GET_REQUEST(_r, _idx) \
|
||||||
|
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
|
||||||
|
|
||||||
|
+/*
|
||||||
|
+ * Get a local copy of a request.
|
||||||
|
+ *
|
||||||
|
+ * Use this in preference to RING_GET_REQUEST() so all processing is
|
||||||
|
+ * done on a local copy that cannot be modified by the other end.
|
||||||
|
+ *
|
||||||
|
+ * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
|
||||||
|
+ * to be ineffective where _req is a struct which consists of only bitfields.
|
||||||
|
+ */
|
||||||
|
+#define RING_COPY_REQUEST(_r, _idx, _req) do { \
|
||||||
|
+ /* Use volatile to force the copy into _req. */ \
|
||||||
|
+ *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
|
||||||
|
+} while (0)
|
||||||
|
+
|
||||||
|
#define RING_GET_RESPONSE(_r, _idx) \
|
||||||
|
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
||||||
|
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,40 @@
|
|||||||
|
From 100ac372a0e07ccc8c508c3884fa9020cfe08094 Mon Sep 17 00:00:00 2001
|
||||||
|
From: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Date: Fri, 30 Oct 2015 15:16:01 +0000
|
||||||
|
Subject: [PATCH 2/7] xen-netback: don't use last request to determine minimum
|
||||||
|
Tx credit
|
||||||
|
|
||||||
|
The last from guest transmitted request gives no indication about the
|
||||||
|
minimum amount of credit that the guest might need to send a packet
|
||||||
|
since the last packet might have been a small one.
|
||||||
|
|
||||||
|
Instead allow for the worst case 128 KiB packet.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
---
|
||||||
|
drivers/net/xen-netback/netback.c | 4 +---
|
||||||
|
1 file changed, 1 insertion(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
|
||||||
|
index e481f37..b683581 100644
|
||||||
|
--- a/drivers/net/xen-netback/netback.c
|
||||||
|
+++ b/drivers/net/xen-netback/netback.c
|
||||||
|
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
|
||||||
|
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
|
||||||
|
* Otherwise the interface can seize up due to insufficient credit.
|
||||||
|
*/
|
||||||
|
- max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
|
||||||
|
- max_burst = min(max_burst, 131072UL);
|
||||||
|
- max_burst = max(max_burst, queue->credit_bytes);
|
||||||
|
+ max_burst = max(131072UL, queue->credit_bytes);
|
||||||
|
|
||||||
|
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
|
||||||
|
max_credit = queue->remaining_credit + queue->credit_bytes;
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,131 @@
|
|||||||
|
From 4127e9ccae0eda622421d21132846abdf74f66ed Mon Sep 17 00:00:00 2001
|
||||||
|
From: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Date: Fri, 30 Oct 2015 15:17:06 +0000
|
||||||
|
Subject: [PATCH 3/7] xen-netback: use RING_COPY_REQUEST() throughout
|
||||||
|
|
||||||
|
Instead of open-coding memcpy()s and directly accessing Tx and Rx
|
||||||
|
requests, use the new RING_COPY_REQUEST() that ensures the local copy
|
||||||
|
is correct.
|
||||||
|
|
||||||
|
This is more than is strictly necessary for guest Rx requests since
|
||||||
|
only the id and gref fields are used and it is harmless if the
|
||||||
|
frontend modifies these.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
---
|
||||||
|
drivers/net/xen-netback/netback.c | 30 ++++++++++++++----------------
|
||||||
|
1 file changed, 14 insertions(+), 16 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
|
||||||
|
index b683581..1049c34 100644
|
||||||
|
--- a/drivers/net/xen-netback/netback.c
|
||||||
|
+++ b/drivers/net/xen-netback/netback.c
|
||||||
|
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
|
||||||
|
struct netrx_pending_operations *npo)
|
||||||
|
{
|
||||||
|
struct xenvif_rx_meta *meta;
|
||||||
|
- struct xen_netif_rx_request *req;
|
||||||
|
+ struct xen_netif_rx_request req;
|
||||||
|
|
||||||
|
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
||||||
|
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||||
|
|
||||||
|
meta = npo->meta + npo->meta_prod++;
|
||||||
|
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
|
||||||
|
meta->gso_size = 0;
|
||||||
|
meta->size = 0;
|
||||||
|
- meta->id = req->id;
|
||||||
|
+ meta->id = req.id;
|
||||||
|
|
||||||
|
npo->copy_off = 0;
|
||||||
|
- npo->copy_gref = req->gref;
|
||||||
|
+ npo->copy_gref = req.gref;
|
||||||
|
|
||||||
|
return meta;
|
||||||
|
}
|
||||||
|
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
||||||
|
struct xenvif *vif = netdev_priv(skb->dev);
|
||||||
|
int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||||
|
int i;
|
||||||
|
- struct xen_netif_rx_request *req;
|
||||||
|
+ struct xen_netif_rx_request req;
|
||||||
|
struct xenvif_rx_meta *meta;
|
||||||
|
unsigned char *data;
|
||||||
|
int head = 1;
|
||||||
|
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
||||||
|
|
||||||
|
/* Set up a GSO prefix descriptor, if necessary */
|
||||||
|
if ((1 << gso_type) & vif->gso_prefix_mask) {
|
||||||
|
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
||||||
|
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||||
|
meta = npo->meta + npo->meta_prod++;
|
||||||
|
meta->gso_type = gso_type;
|
||||||
|
meta->gso_size = skb_shinfo(skb)->gso_size;
|
||||||
|
meta->size = 0;
|
||||||
|
- meta->id = req->id;
|
||||||
|
+ meta->id = req.id;
|
||||||
|
}
|
||||||
|
|
||||||
|
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
|
||||||
|
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
|
||||||
|
meta = npo->meta + npo->meta_prod++;
|
||||||
|
|
||||||
|
if ((1 << gso_type) & vif->gso_mask) {
|
||||||
|
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
|
||||||
|
}
|
||||||
|
|
||||||
|
meta->size = 0;
|
||||||
|
- meta->id = req->id;
|
||||||
|
+ meta->id = req.id;
|
||||||
|
npo->copy_off = 0;
|
||||||
|
- npo->copy_gref = req->gref;
|
||||||
|
+ npo->copy_gref = req.gref;
|
||||||
|
|
||||||
|
data = skb->data;
|
||||||
|
while (data < skb_tail_pointer(skb)) {
|
||||||
|
@@ -709,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
|
||||||
|
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||||
|
if (cons == end)
|
||||||
|
break;
|
||||||
|
- txp = RING_GET_REQUEST(&queue->tx, cons++);
|
||||||
|
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
|
||||||
|
} while (1);
|
||||||
|
queue->tx.req_cons = cons;
|
||||||
|
}
|
||||||
|
@@ -776,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
|
||||||
|
if (drop_err)
|
||||||
|
txp = &dropped_tx;
|
||||||
|
|
||||||
|
- memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
|
||||||
|
- sizeof(*txp));
|
||||||
|
+ RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
|
||||||
|
|
||||||
|
/* If the guest submitted a frame >= 64 KiB then
|
||||||
|
* first->size overflowed and following slots will
|
||||||
|
@@ -1110,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
|
||||||
|
return -EBADR;
|
||||||
|
}
|
||||||
|
|
||||||
|
- memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
|
||||||
|
- sizeof(extra));
|
||||||
|
+ RING_COPY_REQUEST(&queue->tx, cons, &extra);
|
||||||
|
if (unlikely(!extra.type ||
|
||||||
|
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||||
|
queue->tx.req_cons = ++cons;
|
||||||
|
@@ -1320,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||||
|
|
||||||
|
idx = queue->tx.req_cons;
|
||||||
|
rmb(); /* Ensure that we see the request before we copy it. */
|
||||||
|
- memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
|
||||||
|
+ RING_COPY_REQUEST(&queue->tx, idx, &txreq);
|
||||||
|
|
||||||
|
/* Credit-based scheduling. */
|
||||||
|
if (txreq.size > queue->remaining_credit &&
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,54 @@
|
|||||||
|
From 084b8c2e77f1ac07e4a3a121ff957c49a9379385 Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
|
||||||
|
Date: Tue, 3 Nov 2015 16:34:09 +0000
|
||||||
|
Subject: [PATCH 4/7] xen-blkback: only read request operation from shared ring
|
||||||
|
once
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
|
||||||
|
A compiler may load a switch statement value multiple times, which could
|
||||||
|
be bad when the value is in memory shared with the frontend.
|
||||||
|
|
||||||
|
When converting a non-native request to a native one, ensure that
|
||||||
|
src->operation is only loaded once by using READ_ONCE().
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
---
|
||||||
|
drivers/block/xen-blkback/common.h | 8 ++++----
|
||||||
|
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
|
||||||
|
index 68e87a0..c929ae2 100644
|
||||||
|
--- a/drivers/block/xen-blkback/common.h
|
||||||
|
+++ b/drivers/block/xen-blkback/common.h
|
||||||
|
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
|
||||||
|
struct blkif_x86_32_request *src)
|
||||||
|
{
|
||||||
|
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
||||||
|
- dst->operation = src->operation;
|
||||||
|
- switch (src->operation) {
|
||||||
|
+ dst->operation = READ_ONCE(src->operation);
|
||||||
|
+ switch (dst->operation) {
|
||||||
|
case BLKIF_OP_READ:
|
||||||
|
case BLKIF_OP_WRITE:
|
||||||
|
case BLKIF_OP_WRITE_BARRIER:
|
||||||
|
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
|
||||||
|
struct blkif_x86_64_request *src)
|
||||||
|
{
|
||||||
|
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
||||||
|
- dst->operation = src->operation;
|
||||||
|
- switch (src->operation) {
|
||||||
|
+ dst->operation = READ_ONCE(src->operation);
|
||||||
|
+ switch (dst->operation) {
|
||||||
|
case BLKIF_OP_READ:
|
||||||
|
case BLKIF_OP_WRITE:
|
||||||
|
case BLKIF_OP_WRITE_BARRIER:
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,37 @@
|
|||||||
|
From 89739c14c72e5c1626a5cd5e09cbb2efeaadb6d8 Mon Sep 17 00:00:00 2001
|
||||||
|
From: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Date: Mon, 16 Nov 2015 18:02:32 +0000
|
||||||
|
Subject: [PATCH 6/7] xen-scsiback: safely copy requests
|
||||||
|
|
||||||
|
The copy of the ring request was lacking a following barrier(),
|
||||||
|
potentially allowing the compiler to optimize the copy away.
|
||||||
|
|
||||||
|
Use RING_COPY_REQUEST() to ensure the request is copied to local
|
||||||
|
memory.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Reviewed-by: Juergen Gross <jgross@suse.com>
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
---
|
||||||
|
drivers/xen/xen-scsiback.c | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
|
||||||
|
index 43bcae8..ad4eb10 100644
|
||||||
|
--- a/drivers/xen/xen-scsiback.c
|
||||||
|
+++ b/drivers/xen/xen-scsiback.c
|
||||||
|
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
|
||||||
|
if (!pending_req)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
- ring_req = *RING_GET_REQUEST(ring, rc);
|
||||||
|
+ RING_COPY_REQUEST(ring, rc, &ring_req);
|
||||||
|
ring->req_cons = ++rc;
|
||||||
|
|
||||||
|
err = prepare_pending_reqs(info, &ring_req, pending_req);
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,81 @@
|
|||||||
|
From f6f4388c917ce96b075a239a4535b8efc6064d14 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
Date: Mon, 16 Nov 2015 12:40:48 -0500
|
||||||
|
Subject: [PATCH 7/7] xen/pciback: Save xen_pci_op commands before processing
|
||||||
|
it
|
||||||
|
|
||||||
|
Double fetch vulnerabilities that happen when a variable is
|
||||||
|
fetched twice from shared memory but a security check is only
|
||||||
|
performed the first time.
|
||||||
|
|
||||||
|
The xen_pcibk_do_op function performs a switch statements on the op->cmd
|
||||||
|
value which is stored in shared memory. Interestingly this can result
|
||||||
|
in a double fetch vulnerability depending on the performed compiler
|
||||||
|
optimization.
|
||||||
|
|
||||||
|
This patch fixes it by saving the xen_pci_op command before
|
||||||
|
processing it. We also use 'barrier' to make sure that the
|
||||||
|
compiler does not perform any optimization.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
Signed-off-by: Jan Beulich <JBeulich@suse.com>
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
---
|
||||||
|
drivers/xen/xen-pciback/pciback.h | 1 +
|
||||||
|
drivers/xen/xen-pciback/pciback_ops.c | 15 ++++++++++++++-
|
||||||
|
2 files changed, 15 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
|
||||||
|
index 58e38d5..4d529f3 100644
|
||||||
|
--- a/drivers/xen/xen-pciback/pciback.h
|
||||||
|
+++ b/drivers/xen/xen-pciback/pciback.h
|
||||||
|
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
|
||||||
|
struct xen_pci_sharedinfo *sh_info;
|
||||||
|
unsigned long flags;
|
||||||
|
struct work_struct op_work;
|
||||||
|
+ struct xen_pci_op op;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct xen_pcibk_dev_data {
|
||||||
|
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
|
||||||
|
index c4a0666..a0e0e3e 100644
|
||||||
|
--- a/drivers/xen/xen-pciback/pciback_ops.c
|
||||||
|
+++ b/drivers/xen/xen-pciback/pciback_ops.c
|
||||||
|
@@ -298,9 +298,11 @@ void xen_pcibk_do_op(struct work_struct *data)
|
||||||
|
container_of(data, struct xen_pcibk_device, op_work);
|
||||||
|
struct pci_dev *dev;
|
||||||
|
struct xen_pcibk_dev_data *dev_data = NULL;
|
||||||
|
- struct xen_pci_op *op = &pdev->sh_info->op;
|
||||||
|
+ struct xen_pci_op *op = &pdev->op;
|
||||||
|
int test_intx = 0;
|
||||||
|
|
||||||
|
+ *op = pdev->sh_info->op;
|
||||||
|
+ barrier();
|
||||||
|
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
|
||||||
|
|
||||||
|
if (dev == NULL)
|
||||||
|
@@ -342,6 +344,17 @@ void xen_pcibk_do_op(struct work_struct *data)
|
||||||
|
if ((dev_data->enable_intx != test_intx))
|
||||||
|
xen_pcibk_control_isr(dev, 0 /* no reset */);
|
||||||
|
}
|
||||||
|
+ pdev->sh_info->op.err = op->err;
|
||||||
|
+ pdev->sh_info->op.value = op->value;
|
||||||
|
+#ifdef CONFIG_PCI_MSI
|
||||||
|
+ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
|
||||||
|
+ unsigned int i;
|
||||||
|
+
|
||||||
|
+ for (i = 0; i < op->value; i++)
|
||||||
|
+ pdev->sh_info->op.msix_entries[i].vector =
|
||||||
|
+ op->msix_entries[i].vector;
|
||||||
|
+ }
|
||||||
|
+#endif
|
||||||
|
/* Tell the driver domain that we're done. */
|
||||||
|
wmb();
|
||||||
|
clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,121 @@
|
|||||||
|
From ef0d243bfeaf1da8854c26f89536dc1b69c56602 Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
||||||
|
<marmarek@invisiblethingslab.com>
|
||||||
|
Date: Wed, 16 Dec 2015 05:51:10 +0100
|
||||||
|
Subject: [PATCH 12/13] xen-blkfront: make local copy of response before using
|
||||||
|
it
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
Organization: Invisible Things Lab
|
||||||
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
|
||||||
|
Data on the shared page can be changed at any time by the backend. Make
|
||||||
|
a local copy, which is no longer controlled by the backend. And only
|
||||||
|
then access it.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
---
|
||||||
|
drivers/block/xen-blkfront.c | 34 +++++++++++++++++-----------------
|
||||||
|
1 file changed, 17 insertions(+), 17 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
|
||||||
|
index 2fee2ee..5d7eb04 100644
|
||||||
|
--- a/drivers/block/xen-blkfront.c
|
||||||
|
+++ b/drivers/block/xen-blkfront.c
|
||||||
|
@@ -1296,7 +1296,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||||
|
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||||
|
{
|
||||||
|
struct request *req;
|
||||||
|
- struct blkif_response *bret;
|
||||||
|
+ struct blkif_response bret;
|
||||||
|
RING_IDX i, rp;
|
||||||
|
unsigned long flags;
|
||||||
|
struct blkfront_info *info = (struct blkfront_info *)dev_id;
|
||||||
|
@@ -1316,8 +1316,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||||
|
for (i = info->ring.rsp_cons; i != rp; i++) {
|
||||||
|
unsigned long id;
|
||||||
|
|
||||||
|
- bret = RING_GET_RESPONSE(&info->ring, i);
|
||||||
|
- id = bret->id;
|
||||||
|
+ RING_COPY_RESPONSE(&info->ring, i, &bret);
|
||||||
|
+ id = bret.id;
|
||||||
|
/*
|
||||||
|
* The backend has messed up and given us an id that we would
|
||||||
|
* never have given to it (we stamp it up to BLK_RING_SIZE -
|
||||||
|
@@ -1325,29 +1325,29 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||||
|
*/
|
||||||
|
if (id >= BLK_RING_SIZE) {
|
||||||
|
WARN(1, "%s: response to %s has incorrect id (%ld)\n",
|
||||||
|
- info->gd->disk_name, op_name(bret->operation), id);
|
||||||
|
+ info->gd->disk_name, op_name(bret.operation), id);
|
||||||
|
/* We can't safely get the 'struct request' as
|
||||||
|
* the id is busted. */
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
req = info->shadow[id].request;
|
||||||
|
|
||||||
|
- if (bret->operation != BLKIF_OP_DISCARD)
|
||||||
|
- blkif_completion(&info->shadow[id], info, bret);
|
||||||
|
+ if (bret.operation != BLKIF_OP_DISCARD)
|
||||||
|
+ blkif_completion(&info->shadow[id], info, &bret);
|
||||||
|
|
||||||
|
if (add_id_to_freelist(info, id)) {
|
||||||
|
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
|
||||||
|
- info->gd->disk_name, op_name(bret->operation), id);
|
||||||
|
+ info->gd->disk_name, op_name(bret.operation), id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
- error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
|
||||||
|
- switch (bret->operation) {
|
||||||
|
+ error = (bret.status == BLKIF_RSP_OKAY) ? 0 : -EIO;
|
||||||
|
+ switch (bret.operation) {
|
||||||
|
case BLKIF_OP_DISCARD:
|
||||||
|
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||||
|
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||||
|
struct request_queue *rq = info->rq;
|
||||||
|
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
||||||
|
- info->gd->disk_name, op_name(bret->operation));
|
||||||
|
+ info->gd->disk_name, op_name(bret.operation));
|
||||||
|
error = -EOPNOTSUPP;
|
||||||
|
info->feature_discard = 0;
|
||||||
|
info->feature_secdiscard = 0;
|
||||||
|
@@ -1358,15 +1358,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||||
|
break;
|
||||||
|
case BLKIF_OP_FLUSH_DISKCACHE:
|
||||||
|
case BLKIF_OP_WRITE_BARRIER:
|
||||||
|
- if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||||
|
+ if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||||
|
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
||||||
|
- info->gd->disk_name, op_name(bret->operation));
|
||||||
|
+ info->gd->disk_name, op_name(bret.operation));
|
||||||
|
error = -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
- if (unlikely(bret->status == BLKIF_RSP_ERROR &&
|
||||||
|
+ if (unlikely(bret.status == BLKIF_RSP_ERROR &&
|
||||||
|
info->shadow[id].req.u.rw.nr_segments == 0)) {
|
||||||
|
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
|
||||||
|
- info->gd->disk_name, op_name(bret->operation));
|
||||||
|
+ info->gd->disk_name, op_name(bret.operation));
|
||||||
|
error = -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
if (unlikely(error)) {
|
||||||
|
@@ -1378,9 +1378,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||||
|
/* fall through */
|
||||||
|
case BLKIF_OP_READ:
|
||||||
|
case BLKIF_OP_WRITE:
|
||||||
|
- if (unlikely(bret->status != BLKIF_RSP_OKAY))
|
||||||
|
+ if (unlikely(bret.status != BLKIF_RSP_OKAY))
|
||||||
|
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
|
||||||
|
- "request: %x\n", bret->status);
|
||||||
|
+ "request: %x\n", bret.status);
|
||||||
|
|
||||||
|
__blk_end_request_all(req, error);
|
||||||
|
break;
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,179 @@
|
|||||||
|
From 3a1006355114da4b8fc4b935a64928b7f6ae374f Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
||||||
|
<marmarek@invisiblethingslab.com>
|
||||||
|
Date: Wed, 16 Dec 2015 05:09:55 +0100
|
||||||
|
Subject: [PATCH 09/13] xen-netfront: copy response out of shared buffer before
|
||||||
|
accessing it
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
Organization: Invisible Things Lab
|
||||||
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
|
||||||
|
Make local copy of the response, otherwise backend might modify it while
|
||||||
|
frontend is already processing it - leading to time of check / time of
|
||||||
|
use issue.
|
||||||
|
|
||||||
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
---
|
||||||
|
drivers/net/xen-netfront.c | 51 +++++++++++++++++++++++-----------------------
|
||||||
|
1 file changed, 25 insertions(+), 26 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
||||||
|
index d6abf19..2af5100 100644
|
||||||
|
--- a/drivers/net/xen-netfront.c
|
||||||
|
+++ b/drivers/net/xen-netfront.c
|
||||||
|
@@ -372,13 +372,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||||
|
rmb(); /* Ensure we see responses up to 'rp'. */
|
||||||
|
|
||||||
|
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
|
||||||
|
- struct xen_netif_tx_response *txrsp;
|
||||||
|
+ struct xen_netif_tx_response txrsp;
|
||||||
|
|
||||||
|
- txrsp = RING_GET_RESPONSE(&queue->tx, cons);
|
||||||
|
- if (txrsp->status == XEN_NETIF_RSP_NULL)
|
||||||
|
+ RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
|
||||||
|
+ if (txrsp.status == XEN_NETIF_RSP_NULL)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
- id = txrsp->id;
|
||||||
|
+ id = txrsp.id;
|
||||||
|
skb = queue->tx_skbs[id].skb;
|
||||||
|
if (unlikely(gnttab_query_foreign_access(
|
||||||
|
queue->grant_tx_ref[id]) != 0)) {
|
||||||
|
@@ -721,7 +721,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
||||||
|
RING_IDX rp)
|
||||||
|
|
||||||
|
{
|
||||||
|
- struct xen_netif_extra_info *extra;
|
||||||
|
+ struct xen_netif_extra_info extra;
|
||||||
|
struct device *dev = &queue->info->netdev->dev;
|
||||||
|
RING_IDX cons = queue->rx.rsp_cons;
|
||||||
|
int err = 0;
|
||||||
|
@@ -737,24 +737,23 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
- extra = (struct xen_netif_extra_info *)
|
||||||
|
- RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||||
|
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
|
||||||
|
|
||||||
|
- if (unlikely(!extra->type ||
|
||||||
|
- extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||||
|
+ if (unlikely(!extra.type ||
|
||||||
|
+ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||||
|
if (net_ratelimit())
|
||||||
|
dev_warn(dev, "Invalid extra type: %d\n",
|
||||||
|
- extra->type);
|
||||||
|
+ extra.type);
|
||||||
|
err = -EINVAL;
|
||||||
|
} else {
|
||||||
|
- memcpy(&extras[extra->type - 1], extra,
|
||||||
|
- sizeof(*extra));
|
||||||
|
+ memcpy(&extras[extra.type - 1], &extra,
|
||||||
|
+ sizeof(extra));
|
||||||
|
}
|
||||||
|
|
||||||
|
skb = xennet_get_rx_skb(queue, cons);
|
||||||
|
ref = xennet_get_rx_ref(queue, cons);
|
||||||
|
xennet_move_rx_slot(queue, skb, ref);
|
||||||
|
- } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
||||||
|
+ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
||||||
|
|
||||||
|
queue->rx.rsp_cons = cons;
|
||||||
|
return err;
|
||||||
|
@@ -764,28 +763,28 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
||||||
|
struct netfront_rx_info *rinfo, RING_IDX rp,
|
||||||
|
struct sk_buff_head *list)
|
||||||
|
{
|
||||||
|
- struct xen_netif_rx_response *rx = &rinfo->rx;
|
||||||
|
+ struct xen_netif_rx_response rx = rinfo->rx;
|
||||||
|
struct xen_netif_extra_info *extras = rinfo->extras;
|
||||||
|
struct device *dev = &queue->info->netdev->dev;
|
||||||
|
RING_IDX cons = queue->rx.rsp_cons;
|
||||||
|
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
|
||||||
|
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
|
||||||
|
- int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
|
||||||
|
+ int max = MAX_SKB_FRAGS + (rx.status <= RX_COPY_THRESHOLD);
|
||||||
|
int slots = 1;
|
||||||
|
int err = 0;
|
||||||
|
unsigned long ret;
|
||||||
|
|
||||||
|
- if (rx->flags & XEN_NETRXF_extra_info) {
|
||||||
|
+ if (rx.flags & XEN_NETRXF_extra_info) {
|
||||||
|
err = xennet_get_extras(queue, extras, rp);
|
||||||
|
cons = queue->rx.rsp_cons;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
- if (unlikely(rx->status < 0 ||
|
||||||
|
- rx->offset + rx->status > PAGE_SIZE)) {
|
||||||
|
+ if (unlikely(rx.status < 0 ||
|
||||||
|
+ rx.offset + rx.status > PAGE_SIZE)) {
|
||||||
|
if (net_ratelimit())
|
||||||
|
dev_warn(dev, "rx->offset: %x, size: %u\n",
|
||||||
|
- rx->offset, rx->status);
|
||||||
|
+ rx.offset, rx.status);
|
||||||
|
xennet_move_rx_slot(queue, skb, ref);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto next;
|
||||||
|
@@ -799,7 +798,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
||||||
|
if (ref == GRANT_INVALID_REF) {
|
||||||
|
if (net_ratelimit())
|
||||||
|
dev_warn(dev, "Bad rx response id %d.\n",
|
||||||
|
- rx->id);
|
||||||
|
+ rx.id);
|
||||||
|
err = -EINVAL;
|
||||||
|
goto next;
|
||||||
|
}
|
||||||
|
@@ -812,7 +811,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
||||||
|
__skb_queue_tail(list, skb);
|
||||||
|
|
||||||
|
next:
|
||||||
|
- if (!(rx->flags & XEN_NETRXF_more_data))
|
||||||
|
+ if (!(rx.flags & XEN_NETRXF_more_data))
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (cons + slots == rp) {
|
||||||
|
@@ -822,7 +821,7 @@ next:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
- rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
|
||||||
|
+ RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx);
|
||||||
|
skb = xennet_get_rx_skb(queue, cons + slots);
|
||||||
|
ref = xennet_get_rx_ref(queue, cons + slots);
|
||||||
|
slots++;
|
||||||
|
@@ -878,9 +877,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
||||||
|
struct sk_buff *nskb;
|
||||||
|
|
||||||
|
while ((nskb = __skb_dequeue(list))) {
|
||||||
|
- struct xen_netif_rx_response *rx =
|
||||||
|
- RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||||
|
+ struct xen_netif_rx_response rx;
|
||||||
|
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||||
|
+ RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
|
||||||
|
|
||||||
|
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||||
|
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||||
|
@@ -891,7 +890,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
||||||
|
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
|
||||||
|
|
||||||
|
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
|
||||||
|
- rx->offset, rx->status, PAGE_SIZE);
|
||||||
|
+ rx.offset, rx.status, PAGE_SIZE);
|
||||||
|
|
||||||
|
skb_shinfo(nskb)->nr_frags = 0;
|
||||||
|
kfree_skb(nskb);
|
||||||
|
@@ -987,7 +986,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
||||||
|
i = queue->rx.rsp_cons;
|
||||||
|
work_done = 0;
|
||||||
|
while ((i != rp) && (work_done < budget)) {
|
||||||
|
- memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
|
||||||
|
+ RING_COPY_RESPONSE(&queue->rx, i, rx);
|
||||||
|
memset(extras, 0, sizeof(rinfo.extras));
|
||||||
|
|
||||||
|
err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,54 @@
|
|||||||
|
From 2adc557330dde5b474d885518d2663180d3c8f45 Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
||||||
|
<marmarek@invisiblethingslab.com>
|
||||||
|
Date: Wed, 16 Dec 2015 05:19:37 +0100
|
||||||
|
Subject: [PATCH 10/13] xen-netfront: do not use data already exposed to
|
||||||
|
backend
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
Organization: Invisible Things Lab
|
||||||
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
|
||||||
|
Backend may freely modify anything on shared page, so use data which was
|
||||||
|
supposed to be written there, instead of reading it back from the shared
|
||||||
|
page.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
---
|
||||||
|
drivers/net/xen-netfront.c | 32 +++++++++++---------------------
|
||||||
|
1 file changed, 11 insertions(+), 21 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
|
||||||
|
index 2af5100..959e479 100644
|
||||||
|
--- a/drivers/net/xen-netfront.c 2015-12-17 05:17:39.600724757 +0100
|
||||||
|
+++ b/drivers/net/xen-netfront.c 2015-12-17 05:19:35.060724757 +0100
|
||||||
|
@@ -429,6 +429,7 @@
|
||||||
|
int frags = skb_shinfo(skb)->nr_frags;
|
||||||
|
unsigned int offset = offset_in_page(data);
|
||||||
|
unsigned int len = skb_headlen(skb);
|
||||||
|
+ unsigned int size;
|
||||||
|
unsigned int id;
|
||||||
|
grant_ref_t ref;
|
||||||
|
int i;
|
||||||
|
@@ -436,10 +437,11 @@
|
||||||
|
/* While the header overlaps a page boundary (including being
|
||||||
|
larger than a page), split it it into page-sized chunks. */
|
||||||
|
while (len > PAGE_SIZE - offset) {
|
||||||
|
- tx->size = PAGE_SIZE - offset;
|
||||||
|
+ size = PAGE_SIZE - offset;
|
||||||
|
+ tx->size = size;
|
||||||
|
tx->flags |= XEN_NETTXF_more_data;
|
||||||
|
- len -= tx->size;
|
||||||
|
- data += tx->size;
|
||||||
|
+ len -= size;
|
||||||
|
+ data += size;
|
||||||
|
offset = 0;
|
||||||
|
|
||||||
|
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
@ -0,0 +1,154 @@
|
|||||||
|
From 74aaa42e1f25309a163acd00083ecbbc186fbb47 Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
|
||||||
|
<marmarek@invisiblethingslab.com>
|
||||||
|
Date: Wed, 16 Dec 2015 06:07:14 +0100
|
||||||
|
Subject: [PATCH 13/13] xen-blkfront: prepare request locally, only then put it
|
||||||
|
on the shared ring
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
Organization: Invisible Things Lab
|
||||||
|
Cc: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
|
||||||
|
Do not reuse data which theoretically might be already modified by the
|
||||||
|
backend. This is mostly about private copy of the request
|
||||||
|
(info->shadow[id].req) - make sure the request saved there is really the
|
||||||
|
one just filled.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
|
||||||
|
---
|
||||||
|
drivers/block/xen-blkfront.c | 56 ++++++++++++++++++++++++--------------------
|
||||||
|
1 file changed, 30 insertions(+), 26 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
|
||||||
|
index 5d7eb04..514cf18 100644
|
||||||
|
--- a/drivers/block/xen-blkfront.c
|
||||||
|
+++ b/drivers/block/xen-blkfront.c
|
||||||
|
@@ -389,7 +389,7 @@ static int blkif_ioctl(struct block_devi
|
||||||
|
static int blkif_queue_request(struct request *req)
|
||||||
|
{
|
||||||
|
struct blkfront_info *info = req->rq_disk->private_data;
|
||||||
|
- struct blkif_request *ring_req;
|
||||||
|
+ struct blkif_request ring_req;
|
||||||
|
unsigned long id;
|
||||||
|
unsigned int fsect, lsect;
|
||||||
|
int i, ref, n;
|
||||||
|
@@ -435,7 +435,7 @@
|
||||||
|
new_persistent_gnts = 0;
|
||||||
|
|
||||||
|
/* Fill out a communications ring structure. */
|
||||||
|
- ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
|
||||||
|
+ RING_COPY_REQUEST(&info->ring, info->ring.req_prod_pvt, &ring_req);
|
||||||
|
id = get_id_from_freelist(info);
|
||||||
|
info->shadow[id].request = req;
|
||||||
|
|
||||||
|
@@ -435,37 +435,37 @@ static int blkif_queue_request(struct re
|
||||||
|
info->shadow[id].request = req;
|
||||||
|
|
||||||
|
if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
|
||||||
|
- ring_req->operation = BLKIF_OP_DISCARD;
|
||||||
|
- ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
|
||||||
|
- ring_req->u.discard.id = id;
|
||||||
|
- ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||||
|
+ ring_req.operation = BLKIF_OP_DISCARD;
|
||||||
|
+ ring_req.u.discard.nr_sectors = blk_rq_sectors(req);
|
||||||
|
+ ring_req.u.discard.id = id;
|
||||||
|
+ ring_req.u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||||
|
if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
|
||||||
|
- ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
|
||||||
|
+ ring_req.u.discard.flag = BLKIF_DISCARD_SECURE;
|
||||||
|
else
|
||||||
|
- ring_req->u.discard.flag = 0;
|
||||||
|
+ ring_req.u.discard.flag = 0;
|
||||||
|
} else {
|
||||||
|
BUG_ON(info->max_indirect_segments == 0 &&
|
||||||
|
req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
|
||||||
|
BUG_ON(info->max_indirect_segments &&
|
||||||
|
req->nr_phys_segments > info->max_indirect_segments);
|
||||||
|
nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
|
||||||
|
- ring_req->u.rw.id = id;
|
||||||
|
+ ring_req.u.rw.id = id;
|
||||||
|
if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
|
||||||
|
/*
|
||||||
|
* The indirect operation can only be a BLKIF_OP_READ or
|
||||||
|
* BLKIF_OP_WRITE
|
||||||
|
*/
|
||||||
|
BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
|
||||||
|
- ring_req->operation = BLKIF_OP_INDIRECT;
|
||||||
|
- ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
|
||||||
|
+ ring_req.operation = BLKIF_OP_INDIRECT;
|
||||||
|
+ ring_req.u.indirect.indirect_op = rq_data_dir(req) ?
|
||||||
|
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||||
|
- ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||||
|
- ring_req->u.indirect.handle = info->handle;
|
||||||
|
- ring_req->u.indirect.nr_segments = nseg;
|
||||||
|
+ ring_req.u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||||
|
+ ring_req.u.indirect.handle = info->handle;
|
||||||
|
+ ring_req.u.indirect.nr_segments = nseg;
|
||||||
|
} else {
|
||||||
|
- ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||||
|
- ring_req->u.rw.handle = info->handle;
|
||||||
|
- ring_req->operation = rq_data_dir(req) ?
|
||||||
|
+ ring_req.u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
|
||||||
|
+ ring_req.u.rw.handle = info->handle;
|
||||||
|
+ ring_req.operation = rq_data_dir(req) ?
|
||||||
|
BLKIF_OP_WRITE : BLKIF_OP_READ;
|
||||||
|
if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
|
||||||
|
/*
|
||||||
|
@@ -475,15 +475,15 @@ static int blkif_queue_request(struct re
|
||||||
|
* way. (It's also a FLUSH+FUA, since it is
|
||||||
|
* guaranteed ordered WRT previous writes.)
|
||||||
|
*/
|
||||||
|
- ring_req->operation = info->flush_op;
|
||||||
|
+ ring_req.operation = info->flush_op;
|
||||||
|
}
|
||||||
|
- ring_req->u.rw.nr_segments = nseg;
|
||||||
|
+ ring_req.u.rw.nr_segments = nseg;
|
||||||
|
}
|
||||||
|
for_each_sg(info->shadow[id].sg, sg, nseg, i) {
|
||||||
|
fsect = sg->offset >> 9;
|
||||||
|
lsect = fsect + (sg->length >> 9) - 1;
|
||||||
|
|
||||||
|
- if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
|
||||||
|
+ if ((ring_req.operation == BLKIF_OP_INDIRECT) &&
|
||||||
|
(i % SEGS_PER_INDIRECT_FRAME == 0)) {
|
||||||
|
unsigned long uninitialized_var(pfn);
|
||||||
|
|
||||||
|
@@ -504,7 +504,7 @@ static int blkif_queue_request(struct re
|
||||||
|
gnt_list_entry = get_grant(&gref_head, pfn, info);
|
||||||
|
info->shadow[id].indirect_grants[n] = gnt_list_entry;
|
||||||
|
segments = kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
|
||||||
|
- ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
|
||||||
|
+ ring_req.u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
|
||||||
|
}
|
||||||
|
|
||||||
|
gnt_list_entry = get_grant(&gref_head, page_to_pfn(sg_page(sg)), info);
|
||||||
|
@@ -537,8 +537,8 @@ static int blkif_queue_request(struct re
|
||||||
|
kunmap_atomic(bvec_data);
|
||||||
|
kunmap_atomic(shared_data);
|
||||||
|
}
|
||||||
|
- if (ring_req->operation != BLKIF_OP_INDIRECT) {
|
||||||
|
- ring_req->u.rw.seg[i] =
|
||||||
|
+ if (ring_req.operation != BLKIF_OP_INDIRECT) {
|
||||||
|
+ ring_req.u.rw.seg[i] =
|
||||||
|
(struct blkif_request_segment) {
|
||||||
|
.gref = ref,
|
||||||
|
.first_sect = fsect,
|
||||||
|
@@ -556,10 +556,13 @@ static int blkif_queue_request(struct re
|
||||||
|
kunmap_atomic(segments);
|
||||||
|
}
|
||||||
|
|
||||||
|
+ /* make the request available to the backend */
|
||||||
|
+ *RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt) = ring_req;
|
||||||
|
+ wmb();
|
||||||
|
info->ring.req_prod_pvt++;
|
||||||
|
|
||||||
|
/* Keep a private copy so we can reissue requests when recovering. */
|
||||||
|
- info->shadow[id].req = *ring_req;
|
||||||
|
+ info->shadow[id].req = ring_req;
|
||||||
|
|
||||||
|
if (new_persistent_gnts)
|
||||||
|
gnttab_free_grant_references(gref_head);
|
@ -0,0 +1,65 @@
|
|||||||
|
From d52f00960c1070c683809faddd35a2223e2b8a6e Mon Sep 17 00:00:00 2001
|
||||||
|
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
|
||||||
|
Date: Tue, 3 Nov 2015 16:40:43 +0000
|
||||||
|
Subject: [PATCH 6/7] xen-blkback: read from indirect descriptors only once
|
||||||
|
MIME-Version: 1.0
|
||||||
|
Content-Type: text/plain; charset=UTF-8
|
||||||
|
Content-Transfer-Encoding: 8bit
|
||||||
|
|
||||||
|
Since indirect descriptors are in memory shared with the frontend, the
|
||||||
|
frontend could alter the first_sect and last_sect values after they have
|
||||||
|
been validated but before they are recorded in the request. This may
|
||||||
|
result in I/O requests that overflow the foreign page, possibly
|
||||||
|
overwriting local pages when the I/O request is executed.
|
||||||
|
|
||||||
|
When parsing indirect descriptors, only read first_sect and last_sect
|
||||||
|
once.
|
||||||
|
|
||||||
|
This is part of XSA155.
|
||||||
|
|
||||||
|
CC: stable@vger.kernel.org
|
||||||
|
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
|
||||||
|
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
|
||||||
|
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
----
|
||||||
|
v2: This is against v4.3
|
||||||
|
---
|
||||||
|
drivers/block/xen-blkback/blkback.c | 12 +++++++-----
|
||||||
|
1 file changed, 7 insertions(+), 5 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
|
||||||
|
index 6a685ae..f2e7a38 100644
|
||||||
|
--- a/drivers/block/xen-blkback/blkback.c
|
||||||
|
+++ b/drivers/block/xen-blkback/blkback.c
|
||||||
|
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
|
||||||
|
goto unmap;
|
||||||
|
|
||||||
|
for (n = 0, i = 0; n < nseg; n++) {
|
||||||
|
+ uint8_t first_sect, last_sect;
|
||||||
|
+
|
||||||
|
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
|
||||||
|
/* Map indirect segments */
|
||||||
|
if (segments)
|
||||||
|
@@ -958,14 +960,14 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
|
||||||
|
}
|
||||||
|
i = n % SEGS_PER_INDIRECT_FRAME;
|
||||||
|
pending_req->segments[n]->gref = segments[i].gref;
|
||||||
|
- seg[n].nsec = segments[i].last_sect -
|
||||||
|
- segments[i].first_sect + 1;
|
||||||
|
- seg[n].offset = (segments[i].first_sect << 9);
|
||||||
|
- if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
|
||||||
|
- (segments[i].last_sect < segments[i].first_sect)) {
|
||||||
|
+ first_sect = READ_ONCE(segments[i].first_sect);
|
||||||
|
+ last_sect = READ_ONCE(segments[i].last_sect);
|
||||||
|
+ if (last_sect >= (PAGE_SIZE >> 9) || last_sect < first_sect) {
|
||||||
|
rc = -EINVAL;
|
||||||
|
goto unmap;
|
||||||
|
}
|
||||||
|
+ seg[n].nsec = last_sect - first_sect + 1;
|
||||||
|
+ seg[n].offset = first_sect << 9;
|
||||||
|
preq->nr_sects += seg[n].nsec;
|
||||||
|
}
|
||||||
|
|
||||||
|
--
|
||||||
|
2.1.0
|
||||||
|
|
Loading…
Reference in new issue