902 lines
25 KiB
Plaintext
902 lines
25 KiB
Plaintext
From: jbeulich@novell.com
|
|
Subject: fold IPIs onto a single IRQ each
|
|
Patch-mainline: n/a
|
|
|
|
--- head-2011-02-17.orig/arch/x86/include/asm/hw_irq.h 2011-02-01 15:09:47.000000000 +0100
|
|
+++ head-2011-02-17/arch/x86/include/asm/hw_irq.h 2011-02-02 15:09:42.000000000 +0100
|
|
@@ -132,7 +132,6 @@ extern void smp_error_interrupt(struct p
|
|
extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
|
|
#endif
|
|
#ifdef CONFIG_SMP
|
|
-#ifndef CONFIG_XEN
|
|
extern void smp_reschedule_interrupt(struct pt_regs *);
|
|
extern void smp_call_function_interrupt(struct pt_regs *);
|
|
extern void smp_call_function_single_interrupt(struct pt_regs *);
|
|
@@ -141,13 +140,9 @@ extern void smp_invalidate_interrupt(str
|
|
#else
|
|
extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
|
|
#endif
|
|
-#else
|
|
-#include <linux/irqreturn.h>
|
|
-extern irqreturn_t smp_reschedule_interrupt(int, void *);
|
|
-extern irqreturn_t smp_call_function_interrupt(int, void *);
|
|
-extern irqreturn_t smp_call_function_single_interrupt(int, void *);
|
|
-extern irqreturn_t smp_reboot_interrupt(int, void *);
|
|
-extern irqreturn_t smp_irq_work_interrupt(int, void *);
|
|
+extern void smp_irq_work_interrupt(struct pt_regs *);
|
|
+#ifdef CONFIG_XEN
|
|
+extern void smp_reboot_interrupt(struct pt_regs *);
|
|
#endif
|
|
#endif
|
|
|
|
--- head-2011-02-17.orig/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:57:40.000000000 +0100
|
|
+++ head-2011-02-17/arch/x86/kernel/apic/ipi-xen.c 2011-02-21 13:58:00.000000000 +0100
|
|
@@ -6,25 +6,6 @@
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
-DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
|
|
-
|
|
-static inline void __send_IPI_one(unsigned int cpu, int vector)
|
|
-{
|
|
- int irq = per_cpu(ipi_to_irq, cpu)[vector];
|
|
-
|
|
- if (vector == NMI_VECTOR) {
|
|
- static int __read_mostly printed;
|
|
- int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
|
|
-
|
|
- if (rc && !printed)
|
|
- pr_warning("Unable (%d) to send NMI to CPU#%u\n",
|
|
- printed = rc, cpu);
|
|
- return;
|
|
- }
|
|
- BUG_ON(irq < 0);
|
|
- notify_remote_via_irq(irq);
|
|
-}
|
|
-
|
|
void xen_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
|
|
{
|
|
unsigned int cpu, this_cpu = smp_processor_id();
|
|
@@ -32,7 +13,7 @@ void xen_send_IPI_mask_allbutself(const
|
|
WARN_ON(!cpumask_subset(cpumask, cpu_online_mask));
|
|
for_each_cpu_and(cpu, cpumask, cpu_online_mask)
|
|
if (cpu != this_cpu)
|
|
- __send_IPI_one(cpu, vector);
|
|
+ notify_remote_via_ipi(vector, cpu);
|
|
}
|
|
|
|
void xen_send_IPI_mask(const struct cpumask *cpumask, int vector)
|
|
@@ -41,7 +22,7 @@ void xen_send_IPI_mask(const struct cpum
|
|
|
|
WARN_ON(!cpumask_subset(cpumask, cpu_online_mask));
|
|
for_each_cpu_and(cpu, cpumask, cpu_online_mask)
|
|
- __send_IPI_one(cpu, vector);
|
|
+ notify_remote_via_ipi(vector, cpu);
|
|
}
|
|
|
|
void xen_send_IPI_allbutself(int vector)
|
|
@@ -56,5 +37,5 @@ void xen_send_IPI_all(int vector)
|
|
|
|
void xen_send_IPI_self(int vector)
|
|
{
|
|
- __send_IPI_one(smp_processor_id(), vector);
|
|
+ notify_remote_via_ipi(vector, smp_processor_id());
|
|
}
|
|
--- head-2011-02-17.orig/arch/x86/kernel/irq-xen.c 2011-02-18 15:17:23.000000000 +0100
|
|
+++ head-2011-02-17/arch/x86/kernel/irq-xen.c 2011-02-02 15:09:43.000000000 +0100
|
|
@@ -331,6 +331,7 @@ void fixup_irqs(void)
|
|
data = &desc->irq_data;
|
|
affinity = data->affinity;
|
|
if (!irq_has_action(irq) ||
|
|
+ (desc->status & IRQ_PER_CPU) ||
|
|
cpumask_subset(affinity, cpu_online_mask)) {
|
|
raw_spin_unlock(&desc->lock);
|
|
continue;
|
|
--- head-2011-02-17.orig/arch/x86/kernel/irq_work-xen.c 2011-02-03 11:19:35.000000000 +0100
|
|
+++ head-2011-02-17/arch/x86/kernel/irq_work-xen.c 2011-02-03 13:56:43.000000000 +0100
|
|
@@ -8,12 +8,10 @@
|
|
#include <asm/ipi.h>
|
|
|
|
#ifdef CONFIG_SMP
|
|
-irqreturn_t smp_irq_work_interrupt(int irq, void *dev_id)
|
|
+void smp_irq_work_interrupt(struct pt_regs *regs)
|
|
{
|
|
inc_irq_stat(apic_irq_work_irqs);
|
|
irq_work_run();
|
|
-
|
|
- return IRQ_HANDLED;
|
|
}
|
|
|
|
void arch_irq_work_raise(void)
|
|
--- head-2011-02-17.orig/arch/x86/kernel/smp-xen.c 2011-02-01 15:09:47.000000000 +0100
|
|
+++ head-2011-02-17/arch/x86/kernel/smp-xen.c 2011-02-02 15:09:43.000000000 +0100
|
|
@@ -136,11 +136,9 @@ void xen_send_call_func_ipi(const struct
|
|
* this function calls the 'stop' function on all other CPUs in the system.
|
|
*/
|
|
|
|
-irqreturn_t smp_reboot_interrupt(int irq, void *dev_id)
|
|
+void smp_reboot_interrupt(struct pt_regs *regs)
|
|
{
|
|
stop_this_cpu(NULL);
|
|
-
|
|
- return IRQ_HANDLED;
|
|
}
|
|
|
|
void xen_stop_other_cpus(int wait)
|
|
@@ -179,24 +177,19 @@ void xen_stop_other_cpus(int wait)
|
|
* all the work is done automatically when
|
|
* we return from the interrupt.
|
|
*/
|
|
-irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
|
|
+void smp_reschedule_interrupt(struct pt_regs *regs)
|
|
{
|
|
inc_irq_stat(irq_resched_count);
|
|
- return IRQ_HANDLED;
|
|
}
|
|
|
|
-irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
|
|
+void smp_call_function_interrupt(struct pt_regs *regs)
|
|
{
|
|
generic_smp_call_function_interrupt();
|
|
inc_irq_stat(irq_call_count);
|
|
-
|
|
- return IRQ_HANDLED;
|
|
}
|
|
|
|
-irqreturn_t smp_call_function_single_interrupt(int irq, void *dev_id)
|
|
+void smp_call_function_single_interrupt(struct pt_regs *regs)
|
|
{
|
|
generic_smp_call_function_single_interrupt();
|
|
inc_irq_stat(irq_call_count);
|
|
-
|
|
- return IRQ_HANDLED;
|
|
}
|
|
--- head-2011-02-17.orig/drivers/xen/Kconfig 2011-02-03 14:48:57.000000000 +0100
|
|
+++ head-2011-02-17/drivers/xen/Kconfig 2011-02-03 14:49:15.000000000 +0100
|
|
@@ -4,6 +4,7 @@
|
|
|
|
config XEN
|
|
bool
|
|
+ select IRQ_PER_CPU if SMP
|
|
|
|
if XEN
|
|
config XEN_INTERFACE_VERSION
|
|
--- head-2011-02-17.orig/drivers/xen/core/evtchn.c 2011-02-10 16:18:00.000000000 +0100
|
|
+++ head-2011-02-17/drivers/xen/core/evtchn.c 2011-02-15 17:52:39.000000000 +0100
|
|
@@ -59,6 +59,20 @@ static DEFINE_SPINLOCK(irq_mapping_updat
|
|
static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
|
|
[0 ... NR_EVENT_CHANNELS-1] = -1 };
|
|
|
|
+/* IRQ <-> IPI mapping. */
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_X86)
|
|
+static int __read_mostly ipi_irq = -1;
|
|
+DEFINE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending);
|
|
+static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, ipi_evtchn);
|
|
+#else
|
|
+#define PER_CPU_IPI_IRQ
|
|
+#endif
|
|
+#if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
|
|
+#define BUG_IF_IPI(irq_cfg) BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_IPI)
|
|
+#else
|
|
+#define BUG_IF_IPI(irq_cfg) ((void)0)
|
|
+#endif
|
|
+
|
|
/* Binding types. */
|
|
enum {
|
|
IRQT_UNBOUND,
|
|
@@ -108,7 +122,9 @@ static inline u32 mk_irq_info(u32 type,
|
|
|
|
BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
|
|
BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
|
|
+#if defined(PER_CPU_IPI_IRQ) && defined(NR_IPIS)
|
|
BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
|
|
+#endif
|
|
BUG_ON(index >> _INDEX_BITS);
|
|
|
|
BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
|
|
@@ -120,25 +136,6 @@ static inline u32 mk_irq_info(u32 type,
|
|
* Accessors for packed IRQ information.
|
|
*/
|
|
|
|
-static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg)
|
|
-{
|
|
- return cfg->info & ((1U << _EVTCHN_BITS) - 1);
|
|
-}
|
|
-
|
|
-static inline unsigned int evtchn_from_irq_data(struct irq_data *data)
|
|
-{
|
|
- const struct irq_cfg *cfg = irq_data_cfg(data);
|
|
-
|
|
- return cfg ? evtchn_from_irq_cfg(cfg) : 0;
|
|
-}
|
|
-
|
|
-static inline unsigned int evtchn_from_irq(int irq)
|
|
-{
|
|
- struct irq_data *data = irq_get_irq_data(irq);
|
|
-
|
|
- return data ? evtchn_from_irq_data(data) : 0;
|
|
-}
|
|
-
|
|
static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg)
|
|
{
|
|
return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
|
|
@@ -163,6 +160,38 @@ static inline unsigned int type_from_irq
|
|
return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND;
|
|
}
|
|
|
|
+#ifndef PER_CPU_IPI_IRQ
|
|
+static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg,
|
|
+ unsigned int cpu)
|
|
+{
|
|
+ BUG_ON(type_from_irq_cfg(cfg) != IRQT_IPI);
|
|
+ return per_cpu(ipi_evtchn, cpu);
|
|
+}
|
|
+#endif
|
|
+
|
|
+static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg)
|
|
+{
|
|
+#ifndef PER_CPU_IPI_IRQ
|
|
+ if (type_from_irq_cfg(cfg) == IRQT_IPI)
|
|
+ return evtchn_from_per_cpu_irq(cfg, smp_processor_id());
|
|
+#endif
|
|
+ return cfg->info & ((1U << _EVTCHN_BITS) - 1);
|
|
+}
|
|
+
|
|
+static inline unsigned int evtchn_from_irq_data(struct irq_data *data)
|
|
+{
|
|
+ const struct irq_cfg *cfg = irq_data_cfg(data);
|
|
+
|
|
+ return cfg ? evtchn_from_irq_cfg(cfg) : 0;
|
|
+}
|
|
+
|
|
+static inline unsigned int evtchn_from_irq(int irq)
|
|
+{
|
|
+ struct irq_data *data = irq_get_irq_data(irq);
|
|
+
|
|
+ return data ? evtchn_from_irq_data(data) : 0;
|
|
+}
|
|
+
|
|
unsigned int irq_from_evtchn(unsigned int port)
|
|
{
|
|
return evtchn_to_irq[port];
|
|
@@ -172,11 +201,13 @@ EXPORT_SYMBOL_GPL(irq_from_evtchn);
|
|
/* IRQ <-> VIRQ mapping. */
|
|
DEFINE_PER_CPU(int[NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
|
|
|
|
+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
|
|
/* IRQ <-> IPI mapping. */
|
|
#ifndef NR_IPIS
|
|
#define NR_IPIS 1
|
|
#endif
|
|
DEFINE_PER_CPU(int[NR_IPIS], ipi_to_irq) = {[0 ... NR_IPIS-1] = -1};
|
|
+#endif
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
@@ -204,8 +235,14 @@ static void bind_evtchn_to_cpu(unsigned
|
|
|
|
BUG_ON(!test_bit(chn, s->evtchn_mask));
|
|
|
|
- if (irq != -1)
|
|
- cpumask_copy(irq_get_irq_data(irq)->affinity, cpumask_of(cpu));
|
|
+ if (irq != -1) {
|
|
+ struct irq_desc *desc = irq_to_desc(irq);
|
|
+
|
|
+ if (!(desc->status & IRQ_PER_CPU))
|
|
+ cpumask_copy(desc->irq_data.affinity, cpumask_of(cpu));
|
|
+ else
|
|
+ cpumask_set_cpu(cpu, desc->irq_data.affinity);
|
|
+ }
|
|
|
|
clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn]));
|
|
set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
|
|
@@ -370,7 +407,10 @@ asmlinkage void __irq_entry evtchn_do_up
|
|
port = (l1i * BITS_PER_LONG) + l2i;
|
|
mask_evtchn(port);
|
|
if ((irq = evtchn_to_irq[port]) != -1) {
|
|
- clear_evtchn(port);
|
|
+#ifndef PER_CPU_IPI_IRQ
|
|
+ if (port != percpu_read(ipi_evtchn))
|
|
+#endif
|
|
+ clear_evtchn(port);
|
|
handled = handle_irq(irq, regs);
|
|
}
|
|
if (!handled && printk_ratelimit())
|
|
@@ -404,7 +444,7 @@ asmlinkage void __irq_entry evtchn_do_up
|
|
}
|
|
|
|
static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg,
|
|
- struct irq_chip *chip)
|
|
+ struct irq_chip *chip, bool percpu)
|
|
{
|
|
static int warned;
|
|
int irq;
|
|
@@ -420,11 +460,20 @@ static int find_unbound_irq(unsigned int
|
|
continue;
|
|
|
|
if (!cfg->bindcount) {
|
|
+ irq_flow_handler_t handle;
|
|
+ const char *name;
|
|
+
|
|
*pcfg = cfg;
|
|
desc->status |= IRQ_NOPROBE;
|
|
+ if (!percpu) {
|
|
+ handle = handle_fasteoi_irq;
|
|
+ name = "fasteoi";
|
|
+ } else {
|
|
+ handle = handle_percpu_irq;
|
|
+ name = "percpu";
|
|
+ }
|
|
set_irq_chip_and_handler_name(irq, chip,
|
|
- handle_fasteoi_irq,
|
|
- "fasteoi");
|
|
+ handle, name);
|
|
return irq;
|
|
}
|
|
}
|
|
@@ -449,7 +498,7 @@ static int bind_caller_port_to_irq(unsig
|
|
|
|
if ((irq = evtchn_to_irq[caller_port]) == -1) {
|
|
if ((irq = find_unbound_irq(numa_node_id(), &cfg,
|
|
- &dynirq_chip)) < 0)
|
|
+ &dynirq_chip, false)) < 0)
|
|
goto out;
|
|
|
|
evtchn_to_irq[caller_port] = irq;
|
|
@@ -473,7 +522,8 @@ static int bind_local_port_to_irq(unsign
|
|
|
|
BUG_ON(evtchn_to_irq[local_port] != -1);
|
|
|
|
- if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip)) < 0) {
|
|
+ if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip,
|
|
+ false)) < 0) {
|
|
if (close_evtchn(local_port))
|
|
BUG();
|
|
goto out;
|
|
@@ -527,7 +577,7 @@ static int bind_virq_to_irq(unsigned int
|
|
|
|
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
|
|
if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
|
|
- &dynirq_chip)) < 0)
|
|
+ &dynirq_chip, false)) < 0)
|
|
goto out;
|
|
|
|
bind_virq.virq = virq;
|
|
@@ -553,6 +603,7 @@ static int bind_virq_to_irq(unsigned int
|
|
return irq;
|
|
}
|
|
|
|
+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
|
|
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
|
|
{
|
|
struct evtchn_bind_ipi bind_ipi;
|
|
@@ -563,7 +614,7 @@ static int bind_ipi_to_irq(unsigned int
|
|
|
|
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
|
|
if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
|
|
- &dynirq_chip)) < 0)
|
|
+ &dynirq_chip, false)) < 0)
|
|
goto out;
|
|
|
|
bind_ipi.vcpu = cpu;
|
|
@@ -587,6 +638,7 @@ static int bind_ipi_to_irq(unsigned int
|
|
spin_unlock(&irq_mapping_update_lock);
|
|
return irq;
|
|
}
|
|
+#endif
|
|
|
|
static void unbind_from_irq(unsigned int irq)
|
|
{
|
|
@@ -594,6 +646,7 @@ static void unbind_from_irq(unsigned int
|
|
struct irq_cfg *cfg = irq_cfg(irq);
|
|
int evtchn = evtchn_from_irq_cfg(cfg);
|
|
|
|
+ BUG_IF_IPI(cfg);
|
|
spin_lock(&irq_mapping_update_lock);
|
|
|
|
if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) {
|
|
@@ -606,10 +659,12 @@ static void unbind_from_irq(unsigned int
|
|
per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
|
|
[index_from_irq_cfg(cfg)] = -1;
|
|
break;
|
|
+#if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
|
|
case IRQT_IPI:
|
|
per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
|
|
[index_from_irq_cfg(cfg)] = -1;
|
|
break;
|
|
+#endif
|
|
default:
|
|
break;
|
|
}
|
|
@@ -636,6 +691,46 @@ static void unbind_from_irq(unsigned int
|
|
spin_unlock(&irq_mapping_update_lock);
|
|
}
|
|
|
|
+#ifndef PER_CPU_IPI_IRQ
|
|
+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu)
|
|
+{
|
|
+ struct evtchn_close close;
|
|
+ struct irq_data *data = irq_get_irq_data(irq);
|
|
+ struct irq_cfg *cfg = irq_data_cfg(data);
|
|
+ int evtchn = evtchn_from_per_cpu_irq(cfg, cpu);
|
|
+
|
|
+ spin_lock(&irq_mapping_update_lock);
|
|
+
|
|
+ if (VALID_EVTCHN(evtchn)) {
|
|
+ mask_evtchn(evtchn);
|
|
+
|
|
+ BUG_ON(cfg->bindcount <= 1);
|
|
+ cfg->bindcount--;
|
|
+ cpumask_clear_cpu(cpu, data->affinity);
|
|
+
|
|
+ close.port = evtchn;
|
|
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
|
|
+ BUG();
|
|
+
|
|
+ switch (type_from_irq_cfg(cfg)) {
|
|
+ case IRQT_IPI:
|
|
+ per_cpu(ipi_evtchn, cpu) = 0;
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Closed ports are implicitly re-bound to VCPU0. */
|
|
+ bind_evtchn_to_cpu(evtchn, 0);
|
|
+
|
|
+ evtchn_to_irq[evtchn] = -1;
|
|
+ }
|
|
+
|
|
+ spin_unlock(&irq_mapping_update_lock);
|
|
+}
|
|
+#endif /* !PER_CPU_IPI_IRQ */
|
|
+
|
|
int bind_caller_port_to_irqhandler(
|
|
unsigned int caller_port,
|
|
irq_handler_t handler,
|
|
@@ -730,6 +825,8 @@ int bind_virq_to_irqhandler(
|
|
}
|
|
EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
|
|
|
|
+#ifdef CONFIG_SMP
|
|
+#ifdef PER_CPU_IPI_IRQ
|
|
int bind_ipi_to_irqhandler(
|
|
unsigned int ipi,
|
|
unsigned int cpu,
|
|
@@ -753,7 +850,71 @@ int bind_ipi_to_irqhandler(
|
|
|
|
return irq;
|
|
}
|
|
-EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
|
|
+#else
|
|
+int __cpuinit bind_ipi_to_irqaction(
|
|
+ unsigned int cpu,
|
|
+ struct irqaction *action)
|
|
+{
|
|
+ struct evtchn_bind_ipi bind_ipi;
|
|
+ struct irq_cfg *cfg;
|
|
+ int evtchn, retval = 0;
|
|
+
|
|
+ spin_lock(&irq_mapping_update_lock);
|
|
+
|
|
+ if (VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) {
|
|
+ spin_unlock(&irq_mapping_update_lock);
|
|
+ return -EBUSY;
|
|
+ }
|
|
+
|
|
+ if (ipi_irq < 0) {
|
|
+ if ((ipi_irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
|
|
+ &dynirq_chip, true)) < 0) {
|
|
+ spin_unlock(&irq_mapping_update_lock);
|
|
+ return ipi_irq;
|
|
+ }
|
|
+
|
|
+ /* Extra reference so count will never drop to zero. */
|
|
+ cfg->bindcount++;
|
|
+
|
|
+ cfg->info = mk_irq_info(IRQT_IPI, 0, 0);
|
|
+ retval = 1;
|
|
+ } else
|
|
+ cfg = irq_cfg(ipi_irq);
|
|
+
|
|
+ bind_ipi.vcpu = cpu;
|
|
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi))
|
|
+ BUG();
|
|
+
|
|
+ evtchn = bind_ipi.port;
|
|
+ evtchn_to_irq[evtchn] = ipi_irq;
|
|
+ per_cpu(ipi_evtchn, cpu) = evtchn;
|
|
+
|
|
+ bind_evtchn_to_cpu(evtchn, cpu);
|
|
+
|
|
+ cfg->bindcount++;
|
|
+
|
|
+ spin_unlock(&irq_mapping_update_lock);
|
|
+
|
|
+ if (retval == 0) {
|
|
+ unsigned long flags;
|
|
+
|
|
+ local_irq_save(flags);
|
|
+ unmask_evtchn(evtchn);
|
|
+ local_irq_restore(flags);
|
|
+ } else {
|
|
+ action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND;
|
|
+ retval = setup_irq(ipi_irq, action);
|
|
+ if (retval) {
|
|
+ unbind_from_per_cpu_irq(ipi_irq, cpu);
|
|
+ BUG_ON(retval > 0);
|
|
+ ipi_irq = retval;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return ipi_irq;
|
|
+}
|
|
+#endif /* PER_CPU_IPI_IRQ */
|
|
+#endif /* CONFIG_SMP */
|
|
|
|
void unbind_from_irqhandler(unsigned int irq, void *dev_id)
|
|
{
|
|
@@ -777,8 +938,10 @@ void rebind_evtchn_to_cpu(int port, unsi
|
|
|
|
static void rebind_irq_to_cpu(struct irq_data *data, unsigned int tcpu)
|
|
{
|
|
- int evtchn = evtchn_from_irq_data(data);
|
|
+ const struct irq_cfg *cfg = irq_data_cfg(data);
|
|
+ int evtchn = evtchn_from_irq_cfg(cfg);
|
|
|
|
+ BUG_IF_IPI(cfg);
|
|
if (VALID_EVTCHN(evtchn))
|
|
rebind_evtchn_to_cpu(evtchn, tcpu);
|
|
}
|
|
@@ -1031,10 +1194,47 @@ int irq_ignore_unhandled(unsigned int ir
|
|
return !!(irq_status.flags & XENIRQSTAT_shared);
|
|
}
|
|
|
|
+#if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
|
|
+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
|
|
+{
|
|
+ int evtchn = per_cpu(ipi_evtchn, cpu);
|
|
+
|
|
+#ifdef NMI_VECTOR
|
|
+ if (ipi == NMI_VECTOR) {
|
|
+ static int __read_mostly printed;
|
|
+ int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
|
|
+
|
|
+ if (rc && !printed)
|
|
+ pr_warning("Unable (%d) to send NMI to CPU#%u\n",
|
|
+ printed = rc, cpu);
|
|
+ return;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (VALID_EVTCHN(evtchn)
|
|
+ && !test_and_set_bit(ipi, per_cpu(ipi_pending, cpu))
|
|
+ && !test_evtchn(evtchn))
|
|
+ notify_remote_via_evtchn(evtchn);
|
|
+}
|
|
+
|
|
+void clear_ipi_evtchn(void)
|
|
+{
|
|
+ int evtchn = percpu_read(ipi_evtchn);
|
|
+
|
|
+ BUG_ON(!VALID_EVTCHN(evtchn));
|
|
+ clear_evtchn(evtchn);
|
|
+}
|
|
+#endif
|
|
+
|
|
void notify_remote_via_irq(int irq)
|
|
{
|
|
- int evtchn = evtchn_from_irq(irq);
|
|
+ const struct irq_cfg *cfg = irq_cfg(irq);
|
|
+ int evtchn;
|
|
|
|
+ if (WARN_ON_ONCE(!cfg))
|
|
+ return;
|
|
+ BUG_IF_IPI(cfg);
|
|
+ evtchn = evtchn_from_irq_cfg(cfg);
|
|
if (VALID_EVTCHN(evtchn))
|
|
notify_remote_via_evtchn(evtchn);
|
|
}
|
|
@@ -1042,7 +1242,12 @@ EXPORT_SYMBOL_GPL(notify_remote_via_irq)
|
|
|
|
int irq_to_evtchn_port(int irq)
|
|
{
|
|
- return evtchn_from_irq(irq);
|
|
+ const struct irq_cfg *cfg = irq_cfg(irq);
|
|
+
|
|
+ if (!cfg)
|
|
+ return 0;
|
|
+ BUG_IF_IPI(cfg);
|
|
+ return evtchn_from_irq_cfg(cfg);
|
|
}
|
|
EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
|
|
|
|
@@ -1130,12 +1335,22 @@ static void restore_cpu_virqs(unsigned i
|
|
|
|
static void restore_cpu_ipis(unsigned int cpu)
|
|
{
|
|
+#ifdef CONFIG_SMP
|
|
struct evtchn_bind_ipi bind_ipi;
|
|
- int ipi, irq, evtchn;
|
|
+ int evtchn;
|
|
+#ifdef PER_CPU_IPI_IRQ
|
|
+ int ipi, irq;
|
|
|
|
for (ipi = 0; ipi < NR_IPIS; ipi++) {
|
|
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
|
|
continue;
|
|
+#else
|
|
+#define ipi 0
|
|
+#define irq ipi_irq
|
|
+ if (irq == -1
|
|
+ || !VALID_EVTCHN(per_cpu(ipi_evtchn, cpu)))
|
|
+ return;
|
|
+#endif
|
|
|
|
BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_IPI, ipi, 0));
|
|
|
|
@@ -1148,13 +1363,23 @@ static void restore_cpu_ipis(unsigned in
|
|
|
|
/* Record the new mapping. */
|
|
evtchn_to_irq[evtchn] = irq;
|
|
+#ifdef PER_CPU_IPI_IRQ
|
|
irq_cfg(irq)->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
|
|
+#else
|
|
+ per_cpu(ipi_evtchn, cpu) = evtchn;
|
|
+#endif
|
|
bind_evtchn_to_cpu(evtchn, cpu);
|
|
|
|
/* Ready for use. */
|
|
if (!(irq_to_desc(irq)->status & IRQ_DISABLED))
|
|
unmask_evtchn(evtchn);
|
|
+#ifdef PER_CPU_IPI_IRQ
|
|
}
|
|
+#else
|
|
+#undef irq
|
|
+#undef ipi
|
|
+#endif
|
|
+#endif /* CONFIG_SMP */
|
|
}
|
|
|
|
static int evtchn_resume(struct sys_device *dev)
|
|
@@ -1358,7 +1583,8 @@ int evtchn_map_pirq(int irq, int xen_pir
|
|
struct irq_cfg *cfg;
|
|
|
|
spin_lock(&irq_mapping_update_lock);
|
|
- irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip);
|
|
+ irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip,
|
|
+ false);
|
|
if (irq >= 0) {
|
|
BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
|
|
cfg->bindcount++;
|
|
--- head-2011-02-17.orig/drivers/xen/core/smpboot.c 2011-03-03 16:13:04.000000000 +0100
|
|
+++ head-2011-02-17/drivers/xen/core/smpboot.c 2011-03-03 16:14:20.000000000 +0100
|
|
@@ -36,19 +36,7 @@ cpumask_var_t vcpu_initialized_mask;
|
|
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
|
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
|
|
|
-static DEFINE_PER_CPU(int, resched_irq);
|
|
-static DEFINE_PER_CPU(int, callfunc_irq);
|
|
-static DEFINE_PER_CPU(int, call1func_irq);
|
|
-static DEFINE_PER_CPU(int, reboot_irq);
|
|
-static char resched_name[NR_CPUS][15];
|
|
-static char callfunc_name[NR_CPUS][15];
|
|
-static char call1func_name[NR_CPUS][15];
|
|
-static char reboot_name[NR_CPUS][15];
|
|
-
|
|
-#ifdef CONFIG_IRQ_WORK
|
|
-static DEFINE_PER_CPU(int, irq_work_irq);
|
|
-static char irq_work_name[NR_CPUS][15];
|
|
-#endif
|
|
+static int __read_mostly ipi_irq = -1;
|
|
|
|
void __init prefill_possible_map(void)
|
|
{
|
|
@@ -75,76 +63,59 @@ void __init prefill_possible_map(void)
|
|
++total_cpus;
|
|
}
|
|
|
|
-static int __cpuinit xen_smp_intr_init(unsigned int cpu)
|
|
+static irqreturn_t ipi_interrupt(int irq, void *dev_id)
|
|
{
|
|
- int rc;
|
|
-
|
|
- per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) =
|
|
+ static void (*const handlers[])(struct pt_regs *) = {
|
|
+ [RESCHEDULE_VECTOR] = smp_reschedule_interrupt,
|
|
+ [CALL_FUNCTION_VECTOR] = smp_call_function_interrupt,
|
|
+ [CALL_FUNC_SINGLE_VECTOR] = smp_call_function_single_interrupt,
|
|
+ [REBOOT_VECTOR] = smp_reboot_interrupt,
|
|
#ifdef CONFIG_IRQ_WORK
|
|
- per_cpu(irq_work_irq, cpu) =
|
|
+ [IRQ_WORK_VECTOR] = smp_irq_work_interrupt,
|
|
#endif
|
|
- per_cpu(call1func_irq, cpu) = per_cpu(reboot_irq, cpu) = -1;
|
|
-
|
|
- sprintf(resched_name[cpu], "resched%u", cpu);
|
|
- rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
|
|
- cpu,
|
|
- smp_reschedule_interrupt,
|
|
- IRQF_DISABLED|IRQF_NOBALANCING,
|
|
- resched_name[cpu],
|
|
- NULL);
|
|
- if (rc < 0)
|
|
- goto fail;
|
|
- per_cpu(resched_irq, cpu) = rc;
|
|
-
|
|
- sprintf(callfunc_name[cpu], "callfunc%u", cpu);
|
|
- rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
|
|
- cpu,
|
|
- smp_call_function_interrupt,
|
|
- IRQF_DISABLED|IRQF_NOBALANCING,
|
|
- callfunc_name[cpu],
|
|
- NULL);
|
|
- if (rc < 0)
|
|
- goto fail;
|
|
- per_cpu(callfunc_irq, cpu) = rc;
|
|
-
|
|
- sprintf(call1func_name[cpu], "call1func%u", cpu);
|
|
- rc = bind_ipi_to_irqhandler(CALL_FUNC_SINGLE_VECTOR,
|
|
- cpu,
|
|
- smp_call_function_single_interrupt,
|
|
- IRQF_DISABLED|IRQF_NOBALANCING,
|
|
- call1func_name[cpu],
|
|
- NULL);
|
|
- if (rc < 0)
|
|
- goto fail;
|
|
- per_cpu(call1func_irq, cpu) = rc;
|
|
+ };
|
|
+ unsigned long *pending = __get_cpu_var(ipi_pending);
|
|
+ struct pt_regs *regs = get_irq_regs();
|
|
+ irqreturn_t ret = IRQ_NONE;
|
|
+
|
|
+ for (;;) {
|
|
+ unsigned int ipi = find_first_bit(pending, NR_IPIS);
|
|
+
|
|
+ if (ipi >= NR_IPIS) {
|
|
+ clear_ipi_evtchn();
|
|
+ ipi = find_first_bit(pending, NR_IPIS);
|
|
+ }
|
|
+ if (ipi >= NR_IPIS)
|
|
+ return ret;
|
|
+ ret = IRQ_HANDLED;
|
|
+ do {
|
|
+ clear_bit(ipi, pending);
|
|
+ handlers[ipi](regs);
|
|
+ ipi = find_next_bit(pending, NR_IPIS, ipi);
|
|
+ } while (ipi < NR_IPIS);
|
|
+ }
|
|
+}
|
|
|
|
- sprintf(reboot_name[cpu], "reboot%u", cpu);
|
|
- rc = bind_ipi_to_irqhandler(REBOOT_VECTOR,
|
|
- cpu,
|
|
- smp_reboot_interrupt,
|
|
- IRQF_DISABLED|IRQF_NOBALANCING,
|
|
- reboot_name[cpu],
|
|
- NULL);
|
|
- if (rc < 0)
|
|
- goto fail;
|
|
- per_cpu(reboot_irq, cpu) = rc;
|
|
+static int __cpuinit xen_smp_intr_init(unsigned int cpu)
|
|
+{
|
|
+ static struct irqaction ipi_action = {
|
|
+ .handler = ipi_interrupt,
|
|
+ .flags = IRQF_DISABLED,
|
|
+ .name = "ipi"
|
|
+ };
|
|
+ int rc;
|
|
|
|
-#ifdef CONFIG_IRQ_WORK
|
|
- sprintf(irq_work_name[cpu], "irqwork%u", cpu);
|
|
- rc = bind_ipi_to_irqhandler(IRQ_WORK_VECTOR,
|
|
- cpu,
|
|
- smp_irq_work_interrupt,
|
|
- IRQF_DISABLED|IRQF_NOBALANCING,
|
|
- irq_work_name[cpu],
|
|
- NULL);
|
|
+ rc = bind_ipi_to_irqaction(cpu, &ipi_action);
|
|
if (rc < 0)
|
|
- goto fail;
|
|
- per_cpu(irq_work_irq, cpu) = rc;
|
|
-#endif
|
|
+ return rc;
|
|
+ if (ipi_irq < 0)
|
|
+ ipi_irq = rc;
|
|
+ else
|
|
+ BUG_ON(ipi_irq != rc);
|
|
|
|
rc = xen_spinlock_init(cpu);
|
|
if (rc < 0)
|
|
- goto fail;
|
|
+ goto unbind_ipi;
|
|
|
|
if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
|
|
goto fail;
|
|
@@ -152,19 +123,9 @@ static int __cpuinit xen_smp_intr_init(u
|
|
return 0;
|
|
|
|
fail:
|
|
- if (per_cpu(resched_irq, cpu) >= 0)
|
|
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
|
|
- if (per_cpu(callfunc_irq, cpu) >= 0)
|
|
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
|
|
- if (per_cpu(call1func_irq, cpu) >= 0)
|
|
- unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
|
|
- if (per_cpu(reboot_irq, cpu) >= 0)
|
|
- unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL);
|
|
-#ifdef CONFIG_IRQ_WORK
|
|
- if (per_cpu(irq_work_irq, cpu) >= 0)
|
|
- unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL);
|
|
-#endif
|
|
xen_spinlock_cleanup(cpu);
|
|
+ unbind_ipi:
|
|
+ unbind_from_per_cpu_irq(ipi_irq, cpu);
|
|
return rc;
|
|
}
|
|
|
|
@@ -174,13 +135,7 @@ static void __cpuinit xen_smp_intr_exit(
|
|
if (cpu != 0)
|
|
local_teardown_timer(cpu);
|
|
|
|
- unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
|
|
- unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
|
|
- unbind_from_irqhandler(per_cpu(call1func_irq, cpu), NULL);
|
|
- unbind_from_irqhandler(per_cpu(reboot_irq, cpu), NULL);
|
|
-#ifdef CONFIG_IRQ_WORK
|
|
- unbind_from_irqhandler(per_cpu(irq_work_irq, cpu), NULL);
|
|
-#endif
|
|
+ unbind_from_per_cpu_irq(ipi_irq, cpu);
|
|
xen_spinlock_cleanup(cpu);
|
|
}
|
|
#endif
|
|
--- head-2011-02-17.orig/include/xen/evtchn.h 2010-11-23 15:07:01.000000000 +0100
|
|
+++ head-2011-02-17/include/xen/evtchn.h 2011-02-02 15:09:43.000000000 +0100
|
|
@@ -94,6 +94,8 @@ int bind_virq_to_irqhandler(
|
|
unsigned long irqflags,
|
|
const char *devname,
|
|
void *dev_id);
|
|
+#if defined(CONFIG_SMP) && !defined(MODULE)
|
|
+#ifndef CONFIG_X86
|
|
int bind_ipi_to_irqhandler(
|
|
unsigned int ipi,
|
|
unsigned int cpu,
|
|
@@ -101,6 +103,13 @@ int bind_ipi_to_irqhandler(
|
|
unsigned long irqflags,
|
|
const char *devname,
|
|
void *dev_id);
|
|
+#else
|
|
+int bind_ipi_to_irqaction(
|
|
+ unsigned int cpu,
|
|
+ struct irqaction *action);
|
|
+DECLARE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending);
|
|
+#endif
|
|
+#endif
|
|
|
|
/*
|
|
* Common unbind function for all event sources. Takes IRQ to unbind from.
|
|
@@ -109,6 +118,11 @@ int bind_ipi_to_irqhandler(
|
|
*/
|
|
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
|
|
|
|
+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
|
|
+/* Specialized unbind function for per-CPU IRQs. */
|
|
+void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu);
|
|
+#endif
|
|
+
|
|
#ifndef CONFIG_XEN
|
|
void irq_resume(void);
|
|
#endif
|
|
@@ -180,5 +194,10 @@ int xen_test_irq_pending(int irq);
|
|
void notify_remote_via_irq(int irq);
|
|
int irq_to_evtchn_port(int irq);
|
|
|
|
+#if defined(CONFIG_SMP) && !defined(MODULE) && defined(CONFIG_X86)
|
|
+void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu);
|
|
+void clear_ipi_evtchn(void);
|
|
+#endif
|
|
+
|
|
#endif /* __ASM_EVTCHN_H__ */
|
|
#endif /* CONFIG_PARAVIRT_XEN */
|