178 lines
5.5 KiB
Plaintext
178 lines
5.5 KiB
Plaintext
From: jbeulich@novell.com
|
|
Subject: Go into polling mode early if lock owner is not running
|
|
Patch-mainline: n/a
|
|
|
|
This could be merged into the original ticket spinlock code once
|
|
validated, if there wasn't the dependency on smp-processor-id.h, which
|
|
only gets introduced in the 2.6.32 merge.
|
|
|
|
--- head-2010-03-15.orig/arch/x86/include/mach-xen/asm/spinlock.h 2010-02-24 12:25:27.000000000 +0100
|
|
+++ head-2010-03-15/arch/x86/include/mach-xen/asm/spinlock.h 2010-02-24 12:39:22.000000000 +0100
|
|
@@ -41,6 +41,10 @@
|
|
#ifdef TICKET_SHIFT
|
|
|
|
#include <asm/irqflags.h>
|
|
+#include <asm/smp-processor-id.h>
|
|
+#include <xen/interface/vcpu.h>
|
|
+
|
|
+DECLARE_PER_CPU(struct vcpu_runstate_info, runstate);
|
|
|
|
int xen_spinlock_init(unsigned int cpu);
|
|
void xen_spinlock_cleanup(unsigned int cpu);
|
|
@@ -113,6 +117,9 @@ static __always_inline int __ticket_spin
|
|
:
|
|
: "memory", "cc");
|
|
|
|
+ if (tmp)
|
|
+ lock->owner = raw_smp_processor_id();
|
|
+
|
|
return tmp;
|
|
}
|
|
#elif TICKET_SHIFT == 16
|
|
@@ -179,10 +186,17 @@ static __always_inline int __ticket_spin
|
|
:
|
|
: "memory", "cc");
|
|
|
|
+ if (tmp)
|
|
+ lock->owner = raw_smp_processor_id();
|
|
+
|
|
return tmp;
|
|
}
|
|
#endif
|
|
|
|
+#define __ticket_spin_count(lock) \
|
|
+ (per_cpu(runstate.state, (lock)->owner) == RUNSTATE_running \
|
|
+ ? 1 << 10 : 1)
|
|
+
|
|
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
|
|
{
|
|
int tmp = ACCESS_ONCE(lock->slock);
|
|
@@ -204,16 +218,18 @@ static __always_inline void __ticket_spi
|
|
bool free;
|
|
|
|
__ticket_spin_lock_preamble;
|
|
- if (likely(free)) {
|
|
+ if (likely(free))
|
|
+ raw_local_irq_restore(flags);
|
|
+ else {
|
|
+ token = xen_spin_adjust(lock, token);
|
|
raw_local_irq_restore(flags);
|
|
- return;
|
|
+ do {
|
|
+ count = __ticket_spin_count(lock);
|
|
+ __ticket_spin_lock_body;
|
|
+ } while (unlikely(!count)
|
|
+ && !xen_spin_wait(lock, &token, flags));
|
|
}
|
|
- token = xen_spin_adjust(lock, token);
|
|
- raw_local_irq_restore(flags);
|
|
- do {
|
|
- count = 1 << 10;
|
|
- __ticket_spin_lock_body;
|
|
- } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags));
|
|
+ lock->owner = raw_smp_processor_id();
|
|
}
|
|
|
|
static __always_inline void __ticket_spin_lock_flags(arch_spinlock_t *lock,
|
|
@@ -223,13 +239,15 @@ static __always_inline void __ticket_spi
|
|
bool free;
|
|
|
|
__ticket_spin_lock_preamble;
|
|
- if (likely(free))
|
|
- return;
|
|
- token = xen_spin_adjust(lock, token);
|
|
- do {
|
|
- count = 1 << 10;
|
|
- __ticket_spin_lock_body;
|
|
- } while (unlikely(!count) && !xen_spin_wait(lock, &token, flags));
|
|
+ if (unlikely(!free)) {
|
|
+ token = xen_spin_adjust(lock, token);
|
|
+ do {
|
|
+ count = __ticket_spin_count(lock);
|
|
+ __ticket_spin_lock_body;
|
|
+ } while (unlikely(!count)
|
|
+ && !xen_spin_wait(lock, &token, flags));
|
|
+ }
|
|
+ lock->owner = raw_smp_processor_id();
|
|
}
|
|
|
|
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
|
|
@@ -246,6 +264,7 @@ static __always_inline void __ticket_spi
|
|
#undef __ticket_spin_lock_preamble
|
|
#undef __ticket_spin_lock_body
|
|
#undef __ticket_spin_unlock_body
|
|
+#undef __ticket_spin_count
|
|
#endif
|
|
|
|
#define __arch_spin(n) __ticket_spin_##n
|
|
--- head-2010-03-15.orig/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-28 10:38:23.000000000 +0100
|
|
+++ head-2010-03-15/arch/x86/include/mach-xen/asm/spinlock_types.h 2010-01-26 11:27:24.000000000 +0100
|
|
@@ -26,6 +26,11 @@ typedef union {
|
|
# define TICKET_SHIFT 16
|
|
u16 cur, seq;
|
|
#endif
|
|
+#if CONFIG_NR_CPUS <= 256
|
|
+ u8 owner;
|
|
+#else
|
|
+ u16 owner;
|
|
+#endif
|
|
#else
|
|
/*
|
|
* This differs from the pre-2.6.24 spinlock by always using xchgb
|
|
--- head-2010-03-15.orig/arch/x86/kernel/time-xen.c 2010-03-02 10:20:07.000000000 +0100
|
|
+++ head-2010-03-15/arch/x86/kernel/time-xen.c 2010-03-02 10:20:42.000000000 +0100
|
|
@@ -58,7 +58,7 @@ static u32 shadow_tv_version;
|
|
static u64 jiffies_bias, system_time_bias;
|
|
|
|
/* Current runstate of each CPU (updated automatically by the hypervisor). */
|
|
-static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
|
+DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
|
|
|
|
/* Must be signed, as it's compared with s64 quantities which can be -ve. */
|
|
#define NS_PER_TICK (1000000000LL/HZ)
|
|
--- head-2010-03-15.orig/drivers/xen/core/spinlock.c 2010-02-26 15:34:33.000000000 +0100
|
|
+++ head-2010-03-15/drivers/xen/core/spinlock.c 2010-03-19 08:48:35.000000000 +0100
|
|
@@ -39,6 +39,8 @@ int __cpuinit xen_spinlock_init(unsigned
|
|
};
|
|
int rc;
|
|
|
|
+ setup_runstate_area(cpu);
|
|
+
|
|
rc = bind_ipi_to_irqaction(SPIN_UNLOCK_VECTOR,
|
|
cpu,
|
|
&spinlock_action);
|
|
@@ -86,6 +88,7 @@ unsigned int xen_spin_adjust(const arch_
|
|
bool xen_spin_wait(arch_spinlock_t *lock, unsigned int *ptok,
|
|
unsigned int flags)
|
|
{
|
|
+ unsigned int cpu = raw_smp_processor_id();
|
|
int irq = spinlock_irq;
|
|
bool rc;
|
|
typeof(vcpu_info(0)->evtchn_upcall_mask) upcall_mask;
|
|
@@ -93,7 +96,7 @@ bool xen_spin_wait(arch_spinlock_t *lock
|
|
struct spinning spinning, *other;
|
|
|
|
/* If kicker interrupt not initialized yet, just spin. */
|
|
- if (unlikely(irq < 0) || unlikely(!cpu_online(raw_smp_processor_id())))
|
|
+ if (unlikely(irq < 0) || unlikely(!cpu_online(cpu)))
|
|
return false;
|
|
|
|
/* announce we're spinning */
|
|
@@ -114,6 +117,7 @@ bool xen_spin_wait(arch_spinlock_t *lock
|
|
* we weren't looking.
|
|
*/
|
|
if (lock->cur == spinning.ticket) {
|
|
+ lock->owner = cpu;
|
|
/*
|
|
* If we interrupted another spinlock while it was
|
|
* blocking, make sure it doesn't block (again)
|
|
@@ -207,6 +211,8 @@ bool xen_spin_wait(arch_spinlock_t *lock
|
|
if (!free)
|
|
token = spin_adjust(other->prev, lock, token);
|
|
other->ticket = token >> TICKET_SHIFT;
|
|
+ if (lock->cur == other->ticket)
|
|
+ lock->owner = cpu;
|
|
}
|
|
raw_local_irq_restore(upcall_mask);
|
|
|