qubes-linux-kernel/patches.xen/xen-cxgb3
2011-04-19 22:09:59 +02:00

152 lines
5.3 KiB
Plaintext

From: http://xenbits.xen.org/XCP/linux-2.6.32.pq.hg?rev/20e4634f7b7b
Subject: apply xen specific patch to the Chelsio ethernet drivers
as a result of their feedback from the Cowly Beta
Patch-mainline: n/a
* Disable LRO by default. The kernel.org driver does enable it, but it
does not play very well with the bridging layer. (Please note that the
kernel.org driver does now implement GRO)
* Allocate SKBs instead of pages for incoming data. Using pages causes
traffic to stall when the VMs use large MTUs.
* Disable lazy completion to Tx buffers. cxgb3 completion mechanism
coalesces TX completion notifications, but this breaks the VM's
behavior: The VMs networking stacks rely on skb to be freed in the
hypervisor to open the Tx buffer.
Acked-by: bphilips@suse.de
--- head-2011-01-30.orig/drivers/net/cxgb3/cxgb3_main.c 2011-01-31 12:42:17.000000000 +0100
+++ head-2011-01-30/drivers/net/cxgb3/cxgb3_main.c 2011-02-03 14:45:48.000000000 +0100
@@ -1923,7 +1923,11 @@ static int set_rx_csum(struct net_device
} else {
int i;
+#ifndef CONFIG_XEN
p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
+#else
+ p->rx_offload &= ~(T3_RX_CSUM);
+#endif
for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
set_qset_lro(dev, i, 0);
}
@@ -3298,7 +3302,11 @@ static int __devinit init_one(struct pci
adapter->port[i] = netdev;
pi = netdev_priv(netdev);
pi->adapter = adapter;
+#ifndef CONFIG_XEN
pi->rx_offload = T3_RX_CSUM | T3_LRO;
+#else
+ pi->rx_offload = T3_RX_CSUM;
+#endif
pi->port_id = i;
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
--- head-2011-01-30.orig/drivers/net/cxgb3/sge.c 2011-01-05 01:50:19.000000000 +0100
+++ head-2011-01-30/drivers/net/cxgb3/sge.c 2011-02-03 14:45:48.000000000 +0100
@@ -58,11 +58,24 @@
* It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
* directly.
*/
+#ifndef CONFIG_XEN
#define FL0_PG_CHUNK_SIZE 2048
+#else
+/* Use skbuffs for XEN kernels. LRO is already disabled */
+#define FL0_PG_CHUNK_SIZE 0
+#endif
+
#define FL0_PG_ORDER 0
#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
+
+#ifndef CONFIG_XEN
#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
+#else
+#define FL1_PG_CHUNK_SIZE 0
+#define FL1_PG_ORDER 0
+#endif
+
#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
#define SGE_RX_DROP_THRES 16
@@ -1267,7 +1280,27 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *
gen = q->gen;
q->unacked += ndesc;
+#ifdef CONFIG_XEN
+ /*
+ * Some Guest OS clients get terrible performance when they have bad
+ * message size / socket send buffer space parameters. For instance,
+ * if an application selects an 8KB message size and an 8KB send
+ * socket buffer size. This forces the application into a single
+ * packet stop-and-go mode where it's only willing to have a single
+ * message outstanding. The next message is only sent when the
+ * previous message is noted as having been sent. Until we issue a
+ * kfree_skb() against the TX skb, the skb is charged against the
+ * application's send buffer space. We only free up TX skbs when we
+ * get a TX credit return from the hardware / firmware which is fairly
+ * lazy about this. So we request a TX WR Completion Notification on
+ * every TX descriptor in order to accellerate TX credit returns. See
+ * also the change in handle_rsp_cntrl_info() to free up TX skb's when
+ * we receive the TX WR Completion Notifications ...
+ */
+ compl = F_WR_COMPL;
+#else
compl = (q->unacked & 8) << (S_WR_COMPL - 3);
+#endif
q->unacked &= 7;
pidx = q->pidx;
q->pidx += ndesc;
@@ -2176,8 +2209,35 @@ static inline void handle_rsp_cntrl_info
#endif
credits = G_RSPD_TXQ0_CR(flags);
- if (credits)
+ if (credits) {
qs->txq[TXQ_ETH].processed += credits;
+#ifdef CONFIG_XEN
+ /*
+ * In the normal Linux driver t3_eth_xmit() routine, we call
+ * skb_orphan() on unshared TX skb. This results in a call to
+ * the destructor for the skb which frees up the send buffer
+ * space it was holding down. This, in turn, allows the
+ * application to make forward progress generating more data
+ * which is important at 10Gb/s. For Virtual Machine Guest
+ * Operating Systems this doesn't work since the send buffer
+ * space is being held down in the Virtual Machine. Thus we
+ * need to get the TX skb's freed up as soon as possible in
+ * order to prevent applications from stalling.
+ *
+ * This code is largely copied from the corresponding code in
+ * sge_timer_tx() and should probably be kept in sync with any
+ * changes there.
+ */
+ if (__netif_tx_trylock(qs->tx_q)) {
+ struct port_info *pi = netdev_priv(qs->netdev);
+ struct adapter *adap = pi->adapter;
+
+ reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
+ TX_RECLAIM_CHUNK);
+ __netif_tx_unlock(qs->tx_q);
+ }
+#endif
+ }
credits = G_RSPD_TXQ2_CR(flags);
if (credits)
--- head-2011-01-30.orig/drivers/net/cxgb3/version.h 2010-10-20 22:30:22.000000000 +0200
+++ head-2011-01-30/drivers/net/cxgb3/version.h 2011-02-03 14:45:48.000000000 +0100
@@ -35,7 +35,11 @@
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
+#ifndef CONFIG_XEN
#define DRV_VERSION "1.1.4-ko"
+#else
+#define DRV_VERSION "1.1.4-xen-ko"
+#endif
/* Firmware version */
#define FW_VERSION_MAJOR 7