156 lines
5.2 KiB
Plaintext
156 lines
5.2 KiB
Plaintext
From: Dongxiao Xu <dongxiao.xu@intel.com>
|
|
Subject: [PATCH 2/3] Netback: Multiple tasklets support.
|
|
Patch-mainline: n/a
|
|
|
|
Now netback uses one pair of tasklets for Tx/Rx data transaction. Netback
|
|
tasklet could only run at one CPU at a time, and it is used to serve all the
|
|
netfronts. Therefore it has become a performance bottle neck. This patch is to
|
|
use multiple tasklet pairs to replace the current single pair in dom0.
|
|
|
|
Assuming that Dom0 has CPUNR VCPUs, we define CPUNR kinds of tasklets pair
|
|
(CPUNR for Tx, and CPUNR for Rx). Each pare of tasklets serve specific group of
|
|
netfronts. Also for those global and static variables, we duplicated them for
|
|
each group in order to avoid the spinlock.
|
|
|
|
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
|
|
|
|
jb: some cleanups
|
|
Acked-by: jbeulich@novell.com
|
|
|
|
--- head-2010-04-29.orig/drivers/xen/netback/common.h 2010-04-30 11:11:33.000000000 +0200
|
|
+++ head-2010-04-29/drivers/xen/netback/common.h 2010-04-30 11:32:08.000000000 +0200
|
|
@@ -58,6 +58,7 @@
|
|
typedef struct netif_st {
|
|
/* Unique identifier for this interface. */
|
|
domid_t domid;
|
|
+ unsigned int group;
|
|
unsigned int handle;
|
|
|
|
u8 fe_dev_addr[6];
|
|
@@ -260,6 +261,7 @@ struct xen_netbk {
|
|
|
|
struct page **mmap_pages;
|
|
|
|
+ atomic_t nr_groups;
|
|
unsigned int alloc_index;
|
|
|
|
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
|
|
@@ -287,4 +289,8 @@ struct xen_netbk {
|
|
|
|
unsigned long mfn_list[MAX_MFN_ALLOC];
|
|
};
|
|
+
|
|
+extern struct xen_netbk *xen_netbk;
|
|
+extern unsigned int netbk_nr_groups;
|
|
+
|
|
#endif /* __NETIF__BACKEND__COMMON_H__ */
|
|
--- head-2010-04-29.orig/drivers/xen/netback/interface.c 2010-01-26 09:03:24.000000000 +0100
|
|
+++ head-2010-04-29/drivers/xen/netback/interface.c 2010-04-30 10:42:29.000000000 +0200
|
|
@@ -54,14 +54,36 @@ module_param_named(queue_length, netbk_q
|
|
|
|
static void __netif_up(netif_t *netif)
|
|
{
|
|
+ unsigned int group = 0;
|
|
+ unsigned int min_groups = atomic_read(&xen_netbk[0].nr_groups);
|
|
+ unsigned int i;
|
|
+
|
|
+ /* Find the list which contains least number of domains. */
|
|
+ for (i = 1; i < netbk_nr_groups; i++) {
|
|
+ unsigned int nr_groups = atomic_read(&xen_netbk[i].nr_groups);
|
|
+
|
|
+ if (nr_groups < min_groups) {
|
|
+ group = i;
|
|
+ min_groups = nr_groups;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ atomic_inc(&xen_netbk[group].nr_groups);
|
|
+ netif->group = group;
|
|
+
|
|
enable_irq(netif->irq);
|
|
netif_schedule_work(netif);
|
|
}
|
|
|
|
static void __netif_down(netif_t *netif)
|
|
{
|
|
+ struct xen_netbk *netbk = xen_netbk + netif->group;
|
|
+
|
|
disable_irq(netif->irq);
|
|
netif_deschedule_work(netif);
|
|
+
|
|
+ netif->group = UINT_MAX;
|
|
+ atomic_dec(&netbk->nr_groups);
|
|
}
|
|
|
|
static int net_open(struct net_device *dev)
|
|
@@ -207,6 +229,7 @@ netif_t *netif_alloc(struct device *pare
|
|
netif = netdev_priv(dev);
|
|
memset(netif, 0, sizeof(*netif));
|
|
netif->domid = domid;
|
|
+ netif->group = UINT_MAX;
|
|
netif->handle = handle;
|
|
atomic_set(&netif->refcnt, 1);
|
|
init_waitqueue_head(&netif->waiting_to_free);
|
|
--- head-2010-04-29.orig/drivers/xen/netback/netback.c 2010-04-30 11:49:12.000000000 +0200
|
|
+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-04-30 11:49:27.000000000 +0200
|
|
@@ -41,10 +41,10 @@
|
|
|
|
/*define NETBE_DEBUG_INTERRUPT*/
|
|
|
|
-static struct xen_netbk *__read_mostly xen_netbk;
|
|
-static const unsigned int __read_mostly netbk_nr_groups = 1;
|
|
+struct xen_netbk *__read_mostly xen_netbk;
|
|
+unsigned int __read_mostly netbk_nr_groups;
|
|
|
|
-#define GET_GROUP_INDEX(netif) (0)
|
|
+#define GET_GROUP_INDEX(netif) ((netif)->group)
|
|
|
|
static void netif_idx_release(struct xen_netbk *, u16 pending_idx);
|
|
static void make_tx_response(netif_t *netif,
|
|
@@ -126,6 +126,8 @@ MODULE_PARM_DESC(copy_skb, "Copy data re
|
|
static int MODPARM_permute_returns = 0;
|
|
module_param_named(permute_returns, MODPARM_permute_returns, bool, S_IRUSR|S_IWUSR);
|
|
MODULE_PARM_DESC(permute_returns, "Randomly permute the order in which TX responses are sent to the frontend");
|
|
+module_param_named(groups, netbk_nr_groups, uint, 0);
|
|
+MODULE_PARM_DESC(groups, "Specify the number of tasklet pairs to use");
|
|
|
|
int netbk_copy_skb_mode;
|
|
|
|
@@ -1548,9 +1550,20 @@ static void netif_page_release(struct pa
|
|
irqreturn_t netif_be_int(int irq, void *dev_id)
|
|
{
|
|
netif_t *netif = dev_id;
|
|
+ unsigned int group = GET_GROUP_INDEX(netif);
|
|
+
|
|
+ if (unlikely(group >= netbk_nr_groups)) {
|
|
+ /*
|
|
+ * Short of having a way to bind the IRQ in disabled mode
|
|
+ * (IRQ_NOAUTOEN), we have to ignore the first invocation(s)
|
|
+ * (before we got assigned to a group).
|
|
+ */
|
|
+ BUG_ON(group != UINT_MAX);
|
|
+ return IRQ_HANDLED;
|
|
+ }
|
|
|
|
add_to_net_schedule_list_tail(netif);
|
|
- maybe_schedule_tx_action(GET_GROUP_INDEX(netif));
|
|
+ maybe_schedule_tx_action(group);
|
|
|
|
if (netif_schedulable(netif) && !netbk_queue_full(netif))
|
|
netif_wake_queue(netif->dev);
|
|
@@ -1666,8 +1679,13 @@ static int __init netback_init(void)
|
|
if (!is_running_on_xen())
|
|
return -ENODEV;
|
|
|
|
+ if (!netbk_nr_groups)
|
|
+ netbk_nr_groups = (num_online_cpus() + 1) / 2;
|
|
+ if (netbk_nr_groups > MAX_GROUPS)
|
|
+ netbk_nr_groups = MAX_GROUPS;
|
|
+
|
|
/* We can increase reservation by this much in net_rx_action(). */
|
|
- balloon_update_driver_allowance(NET_RX_RING_SIZE);
|
|
+ balloon_update_driver_allowance(netbk_nr_groups * NET_RX_RING_SIZE);
|
|
|
|
xen_netbk = __vmalloc(netbk_nr_groups * sizeof(*xen_netbk),
|
|
GFP_KERNEL|__GFP_HIGHMEM|__GFP_ZERO, PAGE_KERNEL);
|