qubes-linux-kernel/patches.xen/xen-netback-kernel-threads
2010-07-07 13:12:45 +02:00

287 lines
8.7 KiB
Plaintext

From: Dongxiao Xu <dongxiao.xu@intel.com>
Subject: [PATCH 3/3] Use Kernel thread to replace the tasklet.
Patch-mainline: n/a
Kernel thread has more control over QoS, and could improve
dom0's userspace responseness.
Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Subject: xen: ensure locking gnttab_copy_grant_page is safe against interrupts.
Now that netback processing occurs in a thread instead of a tasklet
gnttab_copy_grant_page needs to be safe against interrupts.
The code is currently commented out in this tree but on 2.6.18 we observed a
deadlock where the netback thread called gnttab_copy_grant_page, locked
gnttab_dma_lock for writing, was interrupted and on return from interrupt the
network stack's TX tasklet ended up calling __gnttab_dma_map_page via the
hardware driver->swiotlb and tries to take gnttab_dma_lock for reading.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>#
Cc: "Xu, Dongxiao" <dongxiao.xu@intel.com>
jb: changed write_seq{,un}lock_irq() to write_seq{,un}lock_bh(), and
made the use of kernel threads optional (but default)
Acked-by: jbeulich@novell.com
--- head-2010-04-29.orig/drivers/xen/core/gnttab.c 2010-04-15 11:42:34.000000000 +0200
+++ head-2010-04-29/drivers/xen/core/gnttab.c 2010-04-15 11:44:26.000000000 +0200
@@ -554,14 +554,14 @@ int gnttab_copy_grant_page(grant_ref_t r
mfn = pfn_to_mfn(pfn);
new_mfn = virt_to_mfn(new_addr);
- write_seqlock(&gnttab_dma_lock);
+ write_seqlock_bh(&gnttab_dma_lock);
/* Make seq visible before checking page_mapped. */
smp_mb();
/* Has the page been DMA-mapped? */
if (unlikely(page_mapped(page))) {
- write_sequnlock(&gnttab_dma_lock);
+ write_sequnlock_bh(&gnttab_dma_lock);
put_page(new_page);
err = -EBUSY;
goto out;
@@ -578,7 +578,7 @@ int gnttab_copy_grant_page(grant_ref_t r
BUG_ON(err);
BUG_ON(unmap.status);
- write_sequnlock(&gnttab_dma_lock);
+ write_sequnlock_bh(&gnttab_dma_lock);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
--- head-2010-04-29.orig/drivers/xen/netback/common.h 2010-04-30 11:32:08.000000000 +0200
+++ head-2010-04-29/drivers/xen/netback/common.h 2010-04-30 11:32:26.000000000 +0200
@@ -239,8 +239,16 @@ struct netbk_tx_pending_inuse {
#define MAX_MFN_ALLOC 64
struct xen_netbk {
- struct tasklet_struct net_tx_tasklet;
- struct tasklet_struct net_rx_tasklet;
+ union {
+ struct {
+ struct tasklet_struct net_tx_tasklet;
+ struct tasklet_struct net_rx_tasklet;
+ };
+ struct {
+ wait_queue_head_t netbk_action_wq;
+ struct task_struct *task;
+ };
+ };
struct sk_buff_head rx_queue;
struct sk_buff_head tx_queue;
--- head-2010-04-29.orig/drivers/xen/netback/netback.c 2010-04-30 11:49:27.000000000 +0200
+++ head-2010-04-29/drivers/xen/netback/netback.c 2010-04-30 11:49:32.000000000 +0200
@@ -35,6 +35,7 @@
*/
#include "common.h"
+#include <linux/kthread.h>
#include <linux/vmalloc.h>
#include <xen/balloon.h>
#include <xen/interface/memory.h>
@@ -43,6 +44,8 @@
struct xen_netbk *__read_mostly xen_netbk;
unsigned int __read_mostly netbk_nr_groups;
+static bool __read_mostly use_kthreads = true;
+static bool __initdata bind_threads;
#define GET_GROUP_INDEX(netif) ((netif)->group)
@@ -127,7 +130,11 @@ static int MODPARM_permute_returns = 0;
module_param_named(permute_returns, MODPARM_permute_returns, bool, S_IRUSR|S_IWUSR);
MODULE_PARM_DESC(permute_returns, "Randomly permute the order in which TX responses are sent to the frontend");
module_param_named(groups, netbk_nr_groups, uint, 0);
-MODULE_PARM_DESC(groups, "Specify the number of tasklet pairs to use");
+MODULE_PARM_DESC(groups, "Specify the number of tasklet pairs/threads to use");
+module_param_named(tasklets, use_kthreads, invbool, 0);
+MODULE_PARM_DESC(tasklets, "Use tasklets instead of kernel threads");
+module_param_named(bind, bind_threads, bool, 0);
+MODULE_PARM_DESC(bind, "Bind kernel threads to (v)CPUs");
int netbk_copy_skb_mode;
@@ -164,8 +171,12 @@ static inline void maybe_schedule_tx_act
smp_mb();
if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
- !list_empty(&netbk->net_schedule_list))
- tasklet_schedule(&netbk->net_tx_tasklet);
+ !list_empty(&netbk->net_schedule_list)) {
+ if (use_kthreads)
+ wake_up(&netbk->netbk_action_wq);
+ else
+ tasklet_schedule(&netbk->net_tx_tasklet);
+ }
}
static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
@@ -326,7 +337,10 @@ int netif_be_start_xmit(struct sk_buff *
netbk = &xen_netbk[GET_GROUP_INDEX(netif)];
skb_queue_tail(&netbk->rx_queue, skb);
- tasklet_schedule(&netbk->net_rx_tasklet);
+ if (use_kthreads)
+ wake_up(&netbk->netbk_action_wq);
+ else
+ tasklet_schedule(&netbk->net_rx_tasklet);
return NETDEV_TX_OK;
@@ -779,8 +793,12 @@ static void net_rx_action(unsigned long
/* More work to do? */
if (!skb_queue_empty(&netbk->rx_queue) &&
- !timer_pending(&netbk->net_timer))
- tasklet_schedule(&netbk->net_rx_tasklet);
+ !timer_pending(&netbk->net_timer)) {
+ if (use_kthreads)
+ wake_up(&netbk->netbk_action_wq);
+ else
+ tasklet_schedule(&netbk->net_rx_tasklet);
+ }
#if 0
else
xen_network_done_notify();
@@ -789,12 +807,18 @@ static void net_rx_action(unsigned long
static void net_alarm(unsigned long group)
{
- tasklet_schedule(&xen_netbk[group].net_rx_tasklet);
+ if (use_kthreads)
+ wake_up(&xen_netbk[group].netbk_action_wq);
+ else
+ tasklet_schedule(&xen_netbk[group].net_rx_tasklet);
}
static void netbk_tx_pending_timeout(unsigned long group)
{
- tasklet_schedule(&xen_netbk[group].net_tx_tasklet);
+ if (use_kthreads)
+ wake_up(&xen_netbk[group].netbk_action_wq);
+ else
+ tasklet_schedule(&xen_netbk[group].net_tx_tasklet);
}
struct net_device_stats *netif_be_get_stats(struct net_device *dev)
@@ -1506,7 +1530,10 @@ static void net_tx_action(unsigned long
continue;
}
- netif_rx(skb);
+ if (use_kthreads)
+ netif_rx_ni(skb);
+ else
+ netif_rx(skb);
netif->dev->last_rx = jiffies;
}
@@ -1532,7 +1559,10 @@ static void netif_idx_release(struct xen
netbk->dealloc_prod++;
spin_unlock_irqrestore(&netbk->release_lock, flags);
- tasklet_schedule(&netbk->net_tx_tasklet);
+ if (use_kthreads)
+ wake_up(&netbk->netbk_action_wq);
+ else
+ tasklet_schedule(&netbk->net_tx_tasklet);
}
static void netif_page_release(struct page *page, unsigned int order)
@@ -1670,6 +1700,46 @@ static struct irqaction netif_be_dbg_act
};
#endif
+static inline int rx_work_todo(struct xen_netbk *netbk)
+{
+ return !skb_queue_empty(&netbk->rx_queue);
+}
+
+static inline int tx_work_todo(struct xen_netbk *netbk)
+{
+ if (netbk->dealloc_cons != netbk->dealloc_prod)
+ return 1;
+
+ if (nr_pending_reqs(netbk) + MAX_SKB_FRAGS < MAX_PENDING_REQS &&
+ !list_empty(&netbk->net_schedule_list))
+ return 1;
+
+ return 0;
+}
+
+static int netbk_action_thread(void *index)
+{
+ unsigned long group = (unsigned long)index;
+ struct xen_netbk *netbk = &xen_netbk[group];
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(netbk->netbk_action_wq,
+ rx_work_todo(netbk) ||
+ tx_work_todo(netbk) ||
+ kthread_should_stop());
+ cond_resched();
+
+ if (rx_work_todo(netbk))
+ net_rx_action(group);
+
+ if (tx_work_todo(netbk))
+ net_tx_action(group);
+ }
+
+ return 0;
+}
+
+
static int __init netback_init(void)
{
unsigned int i, group;
@@ -1697,8 +1767,26 @@ static int __init netback_init(void)
for (group = 0; group < netbk_nr_groups; group++) {
struct xen_netbk *netbk = &xen_netbk[group];
- tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group);
- tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group);
+ if (use_kthreads) {
+ init_waitqueue_head(&netbk->netbk_action_wq);
+ netbk->task = kthread_create(netbk_action_thread,
+ (void *)(long)group,
+ "netback/%u", group);
+
+ if (!IS_ERR(netbk->task)) {
+ if (bind_threads)
+ kthread_bind(netbk->task, group);
+ wake_up_process(netbk->task);
+ } else {
+ printk(KERN_ALERT
+ "kthread_create() fails at netback\n");
+ rc = PTR_ERR(netbk->task);
+ goto failed_init;
+ }
+ } else {
+ tasklet_init(&netbk->net_tx_tasklet, net_tx_action, group);
+ tasklet_init(&netbk->net_rx_tasklet, net_rx_action, group);
+ }
skb_queue_head_init(&netbk->rx_queue);
skb_queue_head_init(&netbk->tx_queue);
@@ -1762,8 +1850,11 @@ failed_init:
while (group-- > 0) {
struct xen_netbk *netbk = &xen_netbk[group];
- free_empty_pages_and_pagevec(netbk->mmap_pages,
- MAX_PENDING_REQS);
+ if (use_kthreads && netbk->task && !IS_ERR(netbk->task))
+ kthread_stop(netbk->task);
+ if (netbk->mmap_pages)
+ free_empty_pages_and_pagevec(netbk->mmap_pages,
+ MAX_PENDING_REQS);
del_timer(&netbk->tx_pending_timer);
del_timer(&netbk->net_timer);
}