qubes-linux-kernel/patches.xen/xen-spinlock-poll-early
2011-04-19 22:09:59 +02:00

185 lines
5.7 KiB
Plaintext

From: jbeulich@novell.com
Subject: Go into polling mode early if lock owner is not running
Patch-mainline: n/a
This could be merged into the original ticket spinlock code once
validated, if there wasn't the dependency on smp-processor-id.h, which
only gets introduced in the 2.6.32 merge.
--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock.h 2011-02-01 15:09:47.000000000 +0100
+++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock.h 2011-01-18 15:47:44.000000000 +0100
@@ -41,11 +41,12 @@
#ifdef TICKET_SHIFT
#include <asm/irqflags.h>
+#include <asm/smp-processor-id.h>
int xen_spinlock_init(unsigned int cpu);
void xen_spinlock_cleanup(unsigned int cpu);
-bool xen_spin_wait(arch_spinlock_t *, unsigned int *token,
- unsigned int flags);
+unsigned int xen_spin_wait(arch_spinlock_t *, unsigned int *token,
+ unsigned int flags);
unsigned int xen_spin_adjust(const arch_spinlock_t *, unsigned int token);
void xen_spin_kick(arch_spinlock_t *, unsigned int token);
@@ -113,6 +114,9 @@ static __always_inline int __ticket_spin
:
: "memory", "cc");
+ if (tmp)
+ lock->owner = raw_smp_processor_id();
+
return tmp;
}
#elif TICKET_SHIFT == 16
@@ -179,10 +183,15 @@ static __always_inline int __ticket_spin
:
: "memory", "cc");
+ if (tmp)
+ lock->owner = raw_smp_processor_id();
+
return tmp;
}
#endif
+#define __ticket_spin_count(lock) (vcpu_running((lock)->owner) ? 1 << 10 : 1)
+
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
int tmp = ACCESS_ONCE(lock->slock);
@@ -204,16 +213,18 @@ static __always_inline void __ticket_spi
bool free;
__ticket_spin_lock_preamble;
- if (likely(free)) {
+ if (likely(free))
arch_local_irq_restore(flags);
- return;
+ else {
+ token = xen_spin_adjust(lock, token);
+ arch_local_irq_restore(flags);
+ count = __ticket_spin_count(lock);
+ do {
+ __ticket_spin_lock_body;
+ } while (unlikely(!count)
+ && (count = xen_spin_wait(lock, &token, flags)));
}
- token = xen_spin_adjust(lock, token);
- arch_local_irq_restore(flags);
- do {
- count = 1 << 10;
- __ticket_spin_lock_body;
- } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags));
+ lock->owner = raw_smp_processor_id();
}
static __always_inline void __ticket_spin_lock_flags(arch_spinlock_t *lock,
@@ -223,13 +234,15 @@ static __always_inline void __ticket_spi
bool free;
__ticket_spin_lock_preamble;
- if (likely(free))
- return;
- token = xen_spin_adjust(lock, token);
- do {
- count = 1 << 10;
- __ticket_spin_lock_body;
- } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags));
+ if (unlikely(!free)) {
+ token = xen_spin_adjust(lock, token);
+ count = __ticket_spin_count(lock);
+ do {
+ __ticket_spin_lock_body;
+ } while (unlikely(!count)
+ && (count = xen_spin_wait(lock, &token, flags)));
+ }
+ lock->owner = raw_smp_processor_id();
}
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
@@ -246,6 +259,7 @@ static __always_inline void __ticket_spi
#undef __ticket_spin_lock_preamble
#undef __ticket_spin_lock_body
#undef __ticket_spin_unlock_body
+#undef __ticket_spin_count
#endif
#define __arch_spin(n) __ticket_spin_##n
--- head-2011-03-11.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2011-02-01 14:55:46.000000000 +0100
+++ head-2011-03-11/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-26 11:27:24.000000000 +0100
@@ -26,6 +26,11 @@ typedef union {
# define TICKET_SHIFT 16
u16 cur, seq;
#endif
+#if CONFIG_NR_CPUS <= 256
+ u8 owner;
+#else
+ u16 owner;
+#endif
#else
/*
* This differs from the pre-2.6.24 spinlock by always using xchgb
--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:18:37.000000000 +0100
+++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:19:26.000000000 +0100
@@ -39,6 +39,8 @@ int __cpuinit xen_spinlock_init(unsigned
struct evtchn_bind_ipi bind_ipi;
int rc;
+ setup_runstate_area(cpu);
+
WARN_ON(per_cpu(poll_evtchn, cpu));
bind_ipi.vcpu = cpu;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi);
@@ -124,18 +126,17 @@ unsigned int xen_spin_adjust(const arch_
return spin_adjust(percpu_read(_spinning), lock, token);
}
-bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok,
- unsigned int flags)
+unsigned int xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok,
+ unsigned int flags)
{
+ unsigned int rm_idx, cpu = raw_smp_processor_id();
bool rc;
typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask;
- unsigned int rm_idx;
struct spinning spinning, *other;
/* If kicker interrupt not initialized yet, just spin. */
- if (unlikely(!cpu_online(raw_smp_processor_id()))
- || unlikely(!percpu_read(poll_evtchn)))
- return false;
+ if (unlikely(!cpu_online(cpu)) || unlikely(!percpu_read(poll_evtchn)))
+ return UINT_MAX;
/* announce we're spinning */
spinning.ticket = *ptok >> TICKET_SHIFT;
@@ -155,6 +156,7 @@ bool xen_spin_wait(arch_spinlock_t *lock
* we weren't looking.
*/
if (lock->cur == spinning.ticket) {
+ lock->owner = cpu;
/*
* If we interrupted another spinlock while it was
* blocking, make sure it doesn't block (again)
@@ -251,6 +253,8 @@ bool xen_spin_wait(arch_spinlock_t *lock
if (!free)
token = spin_adjust(other->prev, lock, token);
other->ticket = token >> TICKET_SHIFT;
+ if (lock->cur == other->ticket)
+ lock->owner = cpu;
} while ((other = other->prev) != NULL);
lock = spinning.lock;
}
@@ -261,7 +265,7 @@ bool xen_spin_wait(arch_spinlock_t *lock
arch_local_irq_restore(upcall_mask);
*ptok = lock->cur | (spinning.ticket << TICKET_SHIFT);
- return rc;
+ return rc ? 0 : __ticket_spin_count(lock);
}
void xen_spin_kick(arch_spinlock_t *lock, unsigned int token)