From: www.kernel.org Subject: Linux 2.6.18 Patch-mainline: 2.6.18 Automatically created from "patches.kernel.org/patch-2.6.18" by xen-port-patches.py Acked-by: jbeulich@novell.com --- head-2010-04-29.orig/arch/x86/Kconfig 2010-03-24 15:02:14.000000000 +0100 +++ head-2010-04-29/arch/x86/Kconfig 2010-03-24 15:06:08.000000000 +0100 @@ -70,7 +70,6 @@ config ARCH_DEFCONFIG config GENERIC_TIME def_bool y - depends on !X86_XEN config GENERIC_CMOS_UPDATE def_bool y @@ -1665,7 +1664,7 @@ config KEXEC_JUMP code in physical address mode via KEXEC config PHYSICAL_START - hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) + hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP || XEN) default "0x1000000" ---help--- This gives the physical address where the kernel is loaded. --- head-2010-04-29.orig/arch/x86/kernel/Makefile 2010-03-24 15:01:37.000000000 +0100 +++ head-2010-04-29/arch/x86/kernel/Makefile 2010-03-24 15:06:08.000000000 +0100 @@ -138,5 +138,5 @@ ifeq ($(CONFIG_X86_64),y) pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o endif -disabled-obj-$(CONFIG_XEN) := i8259_$(BITS).o reboot.o smpboot_$(BITS).o +disabled-obj-$(CONFIG_XEN) := i8253.o i8259_$(BITS).o reboot.o smpboot_$(BITS).o tsc_$(BITS).o %/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) := --- head-2010-04-29.orig/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100 +++ head-2010-04-29/arch/x86/kernel/setup64-xen.c 2010-03-24 15:06:08.000000000 +0100 @@ -363,5 +363,7 @@ void __cpuinit cpu_init (void) fpu_init(); - raw_local_save_flags(kernel_eflags); + asm ("pushfq; popq %0" : "=rm" (kernel_eflags)); + if (raw_irqs_disabled()) + kernel_eflags &= ~X86_EFLAGS_IF; } --- head-2010-04-29.orig/arch/x86/kernel/time-xen.c 2010-02-24 11:50:47.000000000 +0100 +++ head-2010-04-29/arch/x86/kernel/time-xen.c 2010-03-24 15:06:08.000000000 +0100 @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -76,8 +75,13 @@ #if defined (__i386__) #include +#include +DEFINE_SPINLOCK(i8253_lock); +EXPORT_SYMBOL(i8253_lock); #endif +#define XEN_SHIFT 22 + int pit_latch_buggy; /* extern */ #if defined(__x86_64__) @@ -97,10 +101,6 @@ extern unsigned long wall_jiffies; DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); -extern struct init_timer_opts timer_tsc_init; -extern struct timer_opts timer_tsc; -#define timer_none timer_tsc - /* These are peridically updated in shared_info, and then copied here. */ struct shadow_time_info { u64 tsc_timestamp; /* TSC at last update of time vals. */ @@ -175,24 +175,6 @@ static int __init __permitted_clock_jitt } __setup("permitted_clock_jitter=", __permitted_clock_jitter); -#if 0 -static void delay_tsc(unsigned long loops) -{ - unsigned long bclock, now; - - rdtscl(bclock); - do { - rep_nop(); - rdtscl(now); - } while ((now - bclock) < loops); -} - -struct timer_opts timer_tsc = { - .name = "tsc", - .delay = delay_tsc, -}; -#endif - /* * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, * yielding a 64-bit result. @@ -229,14 +211,6 @@ static inline u64 scale_delta(u64 delta, return product; } -#if 0 /* defined (__i386__) */ -int read_current_timer(unsigned long *timer_val) -{ - rdtscl(*timer_val); - return 0; -} -#endif - void init_cpu_khz(void) { u64 __cpu_khz = 1000000ULL << 32; @@ -256,6 +230,7 @@ static u64 get_nsec_offset(struct shadow return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift); } +#ifdef CONFIG_X86_64 static unsigned long get_usec_offset(struct shadow_time_info *shadow) { u64 now, delta; @@ -263,6 +238,7 @@ static unsigned long get_usec_offset(str delta = now - shadow->tsc_timestamp; return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift); } +#endif static void __update_wallclock(time_t sec, long nsec) { @@ -377,6 +353,8 @@ void rtc_cmos_write(unsigned char val, u } EXPORT_SYMBOL(rtc_cmos_write); +#ifdef CONFIG_X86_64 + /* * This version of gettimeofday has microsecond resolution * and better than microsecond precision on fast x86 machines with TSC. @@ -515,6 +493,8 @@ int do_settimeofday(struct timespec *tv) EXPORT_SYMBOL(do_settimeofday); +#endif + static void sync_xen_wallclock(unsigned long dummy); static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0); static void sync_xen_wallclock(unsigned long dummy) @@ -566,11 +546,15 @@ static int set_rtc_mmss(unsigned long no return retval; } +#ifdef CONFIG_X86_64 /* monotonic_clock(): returns # of nanoseconds passed since time_init() * Note: This function is required to return accurate * time even in the absence of multiple timer ticks. */ unsigned long long monotonic_clock(void) +#else +unsigned long long sched_clock(void) +#endif { unsigned int cpu = get_cpu(); struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); @@ -590,9 +574,9 @@ unsigned long long monotonic_clock(void) return time; } +#ifdef CONFIG_X86_64 EXPORT_SYMBOL(monotonic_clock); -#ifdef __x86_64__ unsigned long long sched_clock(void) { return monotonic_clock(); @@ -762,6 +746,89 @@ irqreturn_t timer_interrupt(int irq, voi return IRQ_HANDLED; } +#ifndef CONFIG_X86_64 + +void tsc_init(void) +{ + init_cpu_khz(); + printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n", + cpu_khz / 1000, cpu_khz % 1000); + + use_tsc_delay(); +} + +#include + +void mark_tsc_unstable(void) +{ +#ifndef CONFIG_XEN /* XXX Should tell the hypervisor about this fact. */ + tsc_unstable = 1; +#endif +} +EXPORT_SYMBOL_GPL(mark_tsc_unstable); + +static cycle_t xen_clocksource_read(void) +{ +#ifdef CONFIG_SMP + static cycle_t last_ret; +#ifndef CONFIG_64BIT + cycle_t last = cmpxchg64(&last_ret, 0, 0); +#else + cycle_t last = last_ret; +#define cmpxchg64 cmpxchg +#endif + cycle_t ret = sched_clock(); + + if (unlikely((s64)(ret - last) < 0)) { + if (last - ret > permitted_clock_jitter + && printk_ratelimit()) { + unsigned int cpu = get_cpu(); + struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu); + + printk(KERN_WARNING "clocksource/%u: " + "Time went backwards: " + "ret=%Lx delta=%Ld shadow=%Lx offset=%Lx\n", + cpu, ret, ret - last, shadow->system_timestamp, + get_nsec_offset(shadow)); + put_cpu(); + } + return last; + } + + for (;;) { + cycle_t cur = cmpxchg64(&last_ret, last, ret); + + if (cur == last || (s64)(ret - cur) < 0) + return ret; + last = cur; + } +#else + return sched_clock(); +#endif +} + +static struct clocksource clocksource_xen = { + .name = "xen", + .rating = 400, + .read = xen_clocksource_read, + .mask = CLOCKSOURCE_MASK(64), + .mult = 1 << XEN_SHIFT, /* time directly in nanoseconds */ + .shift = XEN_SHIFT, + .is_continuous = 1, +}; + +static int __init init_xen_clocksource(void) +{ + clocksource_xen.mult = clocksource_khz2mult(cpu_khz, + clocksource_xen.shift); + + return clocksource_register(&clocksource_xen); +} + +module_init(init_xen_clocksource); + +#endif + static void init_missing_ticks_accounting(unsigned int cpu) { struct vcpu_register_runstate_memory_area area; @@ -908,7 +975,7 @@ static void setup_cpu0_timer_irq(void) VIRQ_TIMER, 0, timer_interrupt, - SA_INTERRUPT, + IRQF_DISABLED|IRQF_TIMER, "timer0", NULL); BUG_ON(per_cpu(timer_irq, 0) < 0); @@ -950,11 +1017,11 @@ void __init time_init(void) update_wallclock(); +#ifdef CONFIG_X86_64 init_cpu_khz(); printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000); -#if defined(__x86_64__) vxtime.mode = VXTIME_TSC; vxtime.quot = (1000000L << 32) / vxtime_hz; vxtime.tsc_quot = (1000L << 32) / cpu_khz; @@ -1129,7 +1196,7 @@ int __cpuinit local_setup_timer(unsigned irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, timer_interrupt, - SA_INTERRUPT, + IRQF_DISABLED|IRQF_TIMER, timer_name[cpu], NULL); if (irq < 0) --- head-2010-04-29.orig/drivers/acpi/processor_perflib.c 2010-04-15 09:43:05.000000000 +0200 +++ head-2010-04-29/drivers/acpi/processor_perflib.c 2010-05-06 14:22:32.000000000 +0200 @@ -578,6 +578,8 @@ end: return result; } +#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL + int acpi_processor_preregister_performance( struct acpi_processor_performance __percpu *performance) { @@ -793,3 +795,5 @@ acpi_processor_unregister_performance(st } EXPORT_SYMBOL(acpi_processor_unregister_performance); + +#endif /* !CONFIG_PROCESSOR_EXTERNAL_CONTROL */ --- head-2010-04-29.orig/drivers/char/agp/intel-agp.c 2010-04-15 09:43:13.000000000 +0200 +++ head-2010-04-29/drivers/char/agp/intel-agp.c 2010-04-15 09:52:07.000000000 +0200 @@ -452,6 +452,10 @@ static struct page *i8xx_alloc_pages(voi if (set_pages_uc(page, 4) < 0) { set_pages_wb(page, 4); +#ifdef CONFIG_XEN + xen_destroy_contiguous_region((unsigned long)page_address(page), + 2); +#endif __free_pages(page, 2); return NULL; } --- head-2010-04-29.orig/drivers/xen/console/console.c 2009-03-18 10:39:31.000000000 +0100 +++ head-2010-04-29/drivers/xen/console/console.c 2010-03-24 15:06:08.000000000 +0100 @@ -94,7 +94,6 @@ static int __init xencons_setup(char *st { char *q; int n; - extern int console_use_vt; console_use_vt = 1; if (!strncmp(str, "ttyS", 4)) { --- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2009-06-23 09:28:21.000000000 +0200 +++ head-2010-04-29/arch/x86/include/mach-xen/asm/pgtable_64.h 2010-03-24 15:06:08.000000000 +0100 @@ -394,7 +394,6 @@ static inline int pmd_large(pmd_t pte) { /* * Level 4 access. - * Never use these in the common code. */ #define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) --- head-2010-04-29.orig/arch/x86/include/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100 +++ head-2010-04-29/arch/x86/include/mach-xen/asm/processor_32.h 2010-03-24 15:06:08.000000000 +0100 @@ -23,7 +23,7 @@ #include /* flag for disabling the tsc */ -extern int tsc_disable; +#define tsc_disable 0 struct desc_struct { unsigned long a,b; --- head-2010-04-29.orig/arch/x86/include/asm/thread_info.h 2010-05-06 14:21:27.000000000 +0200 +++ head-2010-04-29/arch/x86/include/asm/thread_info.h 2010-03-24 15:06:08.000000000 +0100 @@ -146,11 +146,15 @@ struct thread_info { _TIF_USER_RETURN_NOTIFY) /* flags to check in __switch_to() */ +#ifndef CONFIG_XEN #define _TIF_WORK_CTXSW \ (_TIF_IO_BITMAP|_TIF_DEBUGCTLMSR|_TIF_DS_AREA_MSR|_TIF_NOTSC) #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG) +#else +#define _TIF_WORK_CTXSW _TIF_DEBUG +#endif #define PREEMPT_ACTIVE 0x10000000