2010-07-07 11:12:45 +00:00
|
|
|
From: www.kernel.org
|
|
|
|
Subject: Update to 2.6.23
|
|
|
|
Patch-mainline: 2.6.23
|
|
|
|
|
|
|
|
Automatically created from "patches.kernel.org/patch-2.6.23" by xen-port-patches.py
|
|
|
|
|
|
|
|
Acked-by: jbeulich@novell.com
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/Kbuild 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/Kbuild 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -2,7 +2,7 @@
|
|
|
|
obj-$(CONFIG_KVM) += kvm/
|
|
|
|
|
|
|
|
# Xen paravirtualization support
|
|
|
|
-obj-$(CONFIG_XEN) += xen/
|
|
|
|
+obj-$(CONFIG_PARAVIRT_XEN) += xen/
|
|
|
|
|
|
|
|
# lguest paravirtualization support
|
|
|
|
obj-$(CONFIG_LGUEST_GUEST) += lguest/
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/acpi/sleep_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -15,7 +15,7 @@
|
|
|
|
#ifndef CONFIG_ACPI_PV_SLEEP
|
|
|
|
/* address in low memory of the wakeup routine. */
|
|
|
|
unsigned long acpi_wakeup_address = 0;
|
|
|
|
-unsigned long acpi_video_flags;
|
|
|
|
+unsigned long acpi_realmode_flags;
|
|
|
|
extern char wakeup_start, wakeup_end;
|
|
|
|
|
|
|
|
extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
|
|
|
|
@@ -74,9 +74,11 @@ static int __init acpi_sleep_setup(char
|
|
|
|
{
|
|
|
|
while ((str != NULL) && (*str != '\0')) {
|
|
|
|
if (strncmp(str, "s3_bios", 7) == 0)
|
|
|
|
- acpi_video_flags = 1;
|
|
|
|
+ acpi_realmode_flags |= 1;
|
|
|
|
if (strncmp(str, "s3_mode", 7) == 0)
|
|
|
|
- acpi_video_flags |= 2;
|
|
|
|
+ acpi_realmode_flags |= 2;
|
|
|
|
+ if (strncmp(str, "s3_beep", 7) == 0)
|
|
|
|
+ acpi_realmode_flags |= 4;
|
|
|
|
str = strchr(str, ',');
|
|
|
|
if (str != NULL)
|
|
|
|
str += strspn(str, ", \t");
|
|
|
|
@@ -86,9 +88,11 @@ static int __init acpi_sleep_setup(char
|
|
|
|
|
|
|
|
__setup("acpi_sleep=", acpi_sleep_setup);
|
|
|
|
|
|
|
|
+/* Ouch, we want to delete this. We already have better version in userspace, in
|
|
|
|
+ s2ram from suspend.sf.net project */
|
|
|
|
static __init int reset_videomode_after_s3(struct dmi_system_id *d)
|
|
|
|
{
|
|
|
|
- acpi_video_flags |= 2;
|
|
|
|
+ acpi_realmode_flags |= 2;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/asm-offsets_32.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -20,7 +20,9 @@
|
|
|
|
#include <asm/elf.h>
|
|
|
|
#include <asm/suspend.h>
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(CONFIG_PARAVIRT_XEN)
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
#include <linux/lguest.h>
|
|
|
|
#include "../../../drivers/lguest/lg.h"
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -55,7 +57,6 @@ void foo(void)
|
|
|
|
OFFSET(TI_exec_domain, thread_info, exec_domain);
|
|
|
|
OFFSET(TI_flags, thread_info, flags);
|
|
|
|
OFFSET(TI_status, thread_info, status);
|
|
|
|
- OFFSET(TI_cpu, thread_info, cpu);
|
|
|
|
OFFSET(TI_preempt_count, thread_info, preempt_count);
|
|
|
|
OFFSET(TI_addr_limit, thread_info, addr_limit);
|
|
|
|
OFFSET(TI_restart_block, thread_info, restart_block);
|
|
|
|
@@ -121,7 +122,7 @@ void foo(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
+#ifdef CONFIG_PARAVIRT_XEN
|
|
|
|
BLANK();
|
|
|
|
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
|
|
|
|
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/cpu/common-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -362,6 +362,8 @@ static void __cpuinit generic_identify(s
|
|
|
|
if ( xlvl >= 0x80000004 )
|
|
|
|
get_model_name(c); /* Default name */
|
|
|
|
}
|
|
|
|
+
|
|
|
|
+ init_scattered_cpuid_features(c);
|
|
|
|
}
|
|
|
|
|
|
|
|
early_intel_workaround(c);
|
|
|
|
@@ -613,7 +615,6 @@ extern int nsc_init_cpu(void);
|
|
|
|
extern int amd_init_cpu(void);
|
|
|
|
extern int centaur_init_cpu(void);
|
|
|
|
extern int transmeta_init_cpu(void);
|
|
|
|
-extern int rise_init_cpu(void);
|
|
|
|
extern int nexgen_init_cpu(void);
|
|
|
|
extern int umc_init_cpu(void);
|
|
|
|
|
|
|
|
@@ -625,7 +626,6 @@ void __init early_cpu_init(void)
|
|
|
|
amd_init_cpu();
|
|
|
|
centaur_init_cpu();
|
|
|
|
transmeta_init_cpu();
|
|
|
|
- rise_init_cpu();
|
|
|
|
nexgen_init_cpu();
|
|
|
|
umc_init_cpu();
|
|
|
|
early_cpu_detect();
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/cpu/mtrr/main-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -167,7 +167,7 @@ mtrr_del(int reg, unsigned long base, un
|
|
|
|
EXPORT_SYMBOL(mtrr_add);
|
|
|
|
EXPORT_SYMBOL(mtrr_del);
|
|
|
|
|
|
|
|
-__init void mtrr_bp_init(void)
|
|
|
|
+void __init mtrr_bp_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/e820_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -10,6 +10,7 @@
|
|
|
|
#include <linux/efi.h>
|
|
|
|
#include <linux/pfn.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
+#include <linux/suspend.h>
|
|
|
|
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
@@ -343,6 +344,37 @@ static int __init request_standard_resou
|
|
|
|
|
|
|
|
subsys_initcall(request_standard_resources);
|
|
|
|
|
|
|
|
+#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
|
|
|
|
+/**
|
|
|
|
+ * e820_mark_nosave_regions - Find the ranges of physical addresses that do not
|
|
|
|
+ * correspond to e820 RAM areas and mark the corresponding pages as nosave for
|
|
|
|
+ * hibernation.
|
|
|
|
+ *
|
|
|
|
+ * This function requires the e820 map to be sorted and without any
|
|
|
|
+ * overlapping entries and assumes the first e820 area to be RAM.
|
|
|
|
+ */
|
|
|
|
+void __init e820_mark_nosave_regions(void)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ unsigned long pfn;
|
|
|
|
+
|
|
|
|
+ pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
|
|
|
|
+ for (i = 1; i < e820.nr_map; i++) {
|
|
|
|
+ struct e820entry *ei = &e820.map[i];
|
|
|
|
+
|
|
|
|
+ if (pfn < PFN_UP(ei->addr))
|
|
|
|
+ register_nosave_region(pfn, PFN_UP(ei->addr));
|
|
|
|
+
|
|
|
|
+ pfn = PFN_DOWN(ei->addr + ei->size);
|
|
|
|
+ if (ei->type != E820_RAM)
|
|
|
|
+ register_nosave_region(PFN_UP(ei->addr), pfn);
|
|
|
|
+
|
|
|
|
+ if (pfn >= max_low_pfn)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
void __init add_memory_region(unsigned long long start,
|
|
|
|
unsigned long long size, int type)
|
|
|
|
{
|
|
|
|
@@ -791,7 +823,7 @@ void __init print_memory_map(char *who)
|
|
|
|
case E820_NVS:
|
|
|
|
printk("(ACPI NVS)\n");
|
|
|
|
break;
|
|
|
|
- default: printk("type %lu\n", e820.map[i].type);
|
|
|
|
+ default: printk("type %u\n", e820.map[i].type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/entry_32.S 2011-02-01 14:10:27.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/entry_32.S 2011-02-01 14:10:55.000000000 +0100
|
|
|
|
@@ -1047,7 +1047,7 @@ ENTRY(kernel_thread_helper)
|
2010-07-07 11:12:45 +00:00
|
|
|
CFI_ENDPROC
|
|
|
|
ENDPROC(kernel_thread_helper)
|
|
|
|
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
+#ifdef CONFIG_PARAVIRT_XEN
|
|
|
|
/* Xen doesn't set %esp to be precisely what the normal sysenter
|
|
|
|
entrypoint expects, so fix it up before using the normal path. */
|
|
|
|
ENTRY(xen_sysenter_target)
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1139,7 +1139,7 @@ ENDPROC(xen_failsafe_callback)
|
|
|
|
BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
|
|
|
|
xen_evtchn_do_upcall)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
-#endif /* CONFIG_XEN */
|
|
|
|
+#endif /* CONFIG_PARAVIRT_XEN */
|
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/entry_32-xen.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -452,9 +452,6 @@ restore_nocheck_notrace:
|
|
|
|
1: INTERRUPT_RETURN
|
|
|
|
.section .fixup,"ax"
|
|
|
|
iret_exc:
|
|
|
|
-#ifndef CONFIG_XEN
|
|
|
|
- ENABLE_INTERRUPTS(CLBR_NONE)
|
|
|
|
-#endif
|
|
|
|
pushl $0 # no error code
|
|
|
|
pushl $do_iret_error
|
|
|
|
jmp error_code
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/head_32-xen.S 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/head_32-xen.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -86,7 +86,10 @@ ENTRY(_stext)
|
|
|
|
/*
|
|
|
|
* BSS section
|
|
|
|
*/
|
|
|
|
-.section ".bss.page_aligned","w"
|
|
|
|
+.section ".bss.page_aligned","wa"
|
|
|
|
+ .align PAGE_SIZE_asm
|
|
|
|
+ENTRY(swapper_pg_pmd)
|
|
|
|
+ .fill 1024,4,0
|
|
|
|
ENTRY(empty_zero_page)
|
|
|
|
.fill 4096,1,0
|
|
|
|
|
|
|
|
@@ -136,25 +139,25 @@ ENTRY(empty_zero_page)
|
|
|
|
#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
|
|
|
|
|
|
|
|
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long __PAGE_OFFSET)
|
|
|
|
#if CONFIG_XEN_COMPAT <= 0x030002
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long __PAGE_OFFSET)
|
|
|
|
#else
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long 0)
|
|
|
|
#endif
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long startup_32)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long hypercall_page)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long HYPERVISOR_VIRT_START)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT)
|
|
|
|
#else
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "no")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long _PAGE_PRESENT, _PAGE_PRESENT)
|
|
|
|
#endif
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/io_apic_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -402,14 +402,6 @@ static void set_ioapic_affinity_irq(unsi
|
|
|
|
# include <linux/slab.h> /* kmalloc() */
|
|
|
|
# include <linux/timer.h> /* time_after() */
|
|
|
|
|
|
|
|
-#ifdef CONFIG_BALANCED_IRQ_DEBUG
|
|
|
|
-# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
|
|
|
|
-# define Dprintk(x...) do { TDprintk(x); } while (0)
|
|
|
|
-# else
|
|
|
|
-# define TDprintk(x...)
|
|
|
|
-# define Dprintk(x...)
|
|
|
|
-# endif
|
|
|
|
-
|
|
|
|
#define IRQBALANCE_CHECK_ARCH -999
|
|
|
|
#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
|
|
|
|
#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
|
|
|
|
@@ -492,7 +484,7 @@ static inline void balance_irq(int cpu,
|
|
|
|
static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
|
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
- Dprintk("Rotating IRQs among CPUs.\n");
|
|
|
|
+
|
|
|
|
for_each_online_cpu(i) {
|
|
|
|
for (j = 0; j < NR_IRQS; j++) {
|
|
|
|
if (!irq_desc[j].action)
|
|
|
|
@@ -609,19 +601,11 @@ tryanothercpu:
|
|
|
|
max_loaded = tmp_loaded; /* processor */
|
|
|
|
imbalance = (max_cpu_irq - min_cpu_irq) / 2;
|
|
|
|
|
|
|
|
- Dprintk("max_loaded cpu = %d\n", max_loaded);
|
|
|
|
- Dprintk("min_loaded cpu = %d\n", min_loaded);
|
|
|
|
- Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
|
|
|
|
- Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
|
|
|
|
- Dprintk("load imbalance = %lu\n", imbalance);
|
|
|
|
-
|
|
|
|
/* if imbalance is less than approx 10% of max load, then
|
|
|
|
* observe diminishing returns action. - quit
|
|
|
|
*/
|
|
|
|
- if (imbalance < (max_cpu_irq >> 3)) {
|
|
|
|
- Dprintk("Imbalance too trivial\n");
|
|
|
|
+ if (imbalance < (max_cpu_irq >> 3))
|
|
|
|
goto not_worth_the_effort;
|
|
|
|
- }
|
|
|
|
|
|
|
|
tryanotherirq:
|
|
|
|
/* if we select an IRQ to move that can't go where we want, then
|
|
|
|
@@ -678,9 +662,6 @@ tryanotherirq:
|
|
|
|
cpus_and(tmp, target_cpu_mask, allowed_mask);
|
|
|
|
|
|
|
|
if (!cpus_empty(tmp)) {
|
|
|
|
-
|
|
|
|
- Dprintk("irq = %d moved to cpu = %d\n",
|
|
|
|
- selected_irq, min_loaded);
|
|
|
|
/* mark for change destination */
|
|
|
|
set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
|
|
|
|
|
|
|
|
@@ -700,7 +681,6 @@ not_worth_the_effort:
|
|
|
|
*/
|
|
|
|
balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
|
|
|
|
balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
|
|
|
|
- Dprintk("IRQ worth rotating not found\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -716,6 +696,7 @@ static int balanced_irq(void *unused)
|
|
|
|
set_pending_irq(i, cpumask_of_cpu(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
+ set_freezable();
|
|
|
|
for ( ; ; ) {
|
|
|
|
time_remaining = schedule_timeout_interruptible(time_remaining);
|
|
|
|
try_to_freeze();
|
|
|
|
@@ -825,14 +806,6 @@ static int pirq_entries [MAX_PIRQS];
|
|
|
|
static int pirqs_enabled;
|
|
|
|
int skip_ioapic_setup;
|
|
|
|
|
|
|
|
-static int __init ioapic_setup(char *str)
|
|
|
|
-{
|
|
|
|
- skip_ioapic_setup = 1;
|
|
|
|
- return 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-__setup("noapic", ioapic_setup);
|
|
|
|
-
|
|
|
|
static int __init ioapic_pirq_setup(char *str)
|
|
|
|
{
|
|
|
|
int i, max;
|
|
|
|
@@ -1323,12 +1296,15 @@ static struct irq_chip ioapic_chip;
|
|
|
|
static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
|
|
|
|
{
|
|
|
|
if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
|
|
|
|
- trigger == IOAPIC_LEVEL)
|
|
|
|
+ trigger == IOAPIC_LEVEL) {
|
|
|
|
+ irq_desc[irq].status |= IRQ_LEVEL;
|
|
|
|
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
|
|
|
handle_fasteoi_irq, "fasteoi");
|
|
|
|
- else
|
|
|
|
+ } else {
|
|
|
|
+ irq_desc[irq].status &= ~IRQ_LEVEL;
|
|
|
|
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
|
|
|
handle_edge_irq, "edge");
|
|
|
|
+ }
|
|
|
|
set_intr_gate(vector, interrupt[irq]);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
@@ -1957,7 +1933,7 @@ __setup("no_timer_check", notimercheck);
|
|
|
|
* - if this function detects that timer IRQs are defunct, then we fall
|
|
|
|
* back to ISA timer IRQs
|
|
|
|
*/
|
|
|
|
-int __init timer_irq_works(void)
|
|
|
|
+static int __init timer_irq_works(void)
|
|
|
|
{
|
|
|
|
unsigned long t1 = jiffies;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/microcode-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/microcode-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -33,6 +33,7 @@
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
+#include <linux/fs.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/firmware.h>
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/pci-dma-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -24,7 +24,7 @@
|
|
|
|
#include <asm/bug.h>
|
|
|
|
|
|
|
|
#ifdef __x86_64__
|
|
|
|
-#include <asm/proto.h>
|
|
|
|
+#include <asm/iommu.h>
|
|
|
|
|
|
|
|
int iommu_merge __read_mostly = 0;
|
|
|
|
EXPORT_SYMBOL(iommu_merge);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/process_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/process_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -241,6 +241,7 @@ early_param("idle", idle_setup);
|
|
|
|
void show_regs(struct pt_regs * regs)
|
|
|
|
{
|
|
|
|
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
|
|
|
|
+ unsigned long d0, d1, d2, d3, d6, d7;
|
|
|
|
|
|
|
|
printk("\n");
|
|
|
|
printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
|
|
|
|
@@ -265,6 +266,17 @@ void show_regs(struct pt_regs * regs)
|
|
|
|
cr3 = read_cr3();
|
|
|
|
cr4 = read_cr4_safe();
|
|
|
|
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
|
|
|
|
+
|
|
|
|
+ get_debugreg(d0, 0);
|
|
|
|
+ get_debugreg(d1, 1);
|
|
|
|
+ get_debugreg(d2, 2);
|
|
|
|
+ get_debugreg(d3, 3);
|
|
|
|
+ printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
|
|
|
|
+ d0, d1, d2, d3);
|
|
|
|
+ get_debugreg(d6, 6);
|
|
|
|
+ get_debugreg(d7, 7);
|
|
|
|
+ printk("DR6: %08lx DR7: %08lx\n", d6, d7);
|
|
|
|
+
|
|
|
|
show_trace(NULL, regs, ®s->esp);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -473,7 +485,30 @@ int dump_task_regs(struct task_struct *t
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static noinline void __switch_to_xtra(struct task_struct *next_p)
|
|
|
|
+#ifdef CONFIG_SECCOMP
|
|
|
|
+void hard_disable_TSC(void)
|
|
|
|
+{
|
|
|
|
+ write_cr4(read_cr4() | X86_CR4_TSD);
|
|
|
|
+}
|
|
|
|
+void disable_TSC(void)
|
|
|
|
+{
|
|
|
|
+ preempt_disable();
|
|
|
|
+ if (!test_and_set_thread_flag(TIF_NOTSC))
|
|
|
|
+ /*
|
|
|
|
+ * Must flip the CPU state synchronously with
|
|
|
|
+ * TIF_NOTSC in the current running context.
|
|
|
|
+ */
|
|
|
|
+ hard_disable_TSC();
|
|
|
|
+ preempt_enable();
|
|
|
|
+}
|
|
|
|
+void hard_enable_TSC(void)
|
|
|
|
+{
|
|
|
|
+ write_cr4(read_cr4() & ~X86_CR4_TSD);
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_SECCOMP */
|
|
|
|
+
|
|
|
|
+static noinline void
|
|
|
|
+__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
|
|
|
|
{
|
|
|
|
struct thread_struct *next;
|
|
|
|
|
|
|
|
@@ -488,33 +523,17 @@ static noinline void __switch_to_xtra(st
|
|
|
|
set_debugreg(next->debugreg[6], 6);
|
|
|
|
set_debugreg(next->debugreg[7], 7);
|
|
|
|
}
|
|
|
|
-}
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * This function selects if the context switch from prev to next
|
|
|
|
- * has to tweak the TSC disable bit in the cr4.
|
|
|
|
- */
|
|
|
|
-static inline void disable_tsc(struct task_struct *prev_p,
|
|
|
|
- struct task_struct *next_p)
|
|
|
|
-{
|
|
|
|
- struct thread_info *prev, *next;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * gcc should eliminate the ->thread_info dereference if
|
|
|
|
- * has_secure_computing returns 0 at compile time (SECCOMP=n).
|
|
|
|
- */
|
|
|
|
- prev = task_thread_info(prev_p);
|
|
|
|
- next = task_thread_info(next_p);
|
|
|
|
-
|
|
|
|
- if (has_secure_computing(prev) || has_secure_computing(next)) {
|
|
|
|
- /* slow path here */
|
|
|
|
- if (has_secure_computing(prev) &&
|
|
|
|
- !has_secure_computing(next)) {
|
|
|
|
- write_cr4(read_cr4() & ~X86_CR4_TSD);
|
|
|
|
- } else if (!has_secure_computing(prev) &&
|
|
|
|
- has_secure_computing(next))
|
|
|
|
- write_cr4(read_cr4() | X86_CR4_TSD);
|
|
|
|
+#ifdef CONFIG_SECCOMP
|
|
|
|
+ if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
|
|
|
+ test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
|
|
|
+ /* prev and next are different */
|
|
|
|
+ if (test_tsk_thread_flag(next_p, TIF_NOTSC))
|
|
|
|
+ hard_disable_TSC();
|
|
|
|
+ else
|
|
|
|
+ hard_enable_TSC();
|
|
|
|
}
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -649,10 +668,9 @@ struct task_struct fastcall * __switch_t
|
|
|
|
/*
|
|
|
|
* Now maybe handle debug registers
|
|
|
|
*/
|
|
|
|
- if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
|
|
|
|
- __switch_to_xtra(next_p);
|
|
|
|
-
|
|
|
|
- disable_tsc(prev_p, next_p);
|
|
|
|
+ if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
|
|
|
|
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
|
|
|
|
+ __switch_to_xtra(prev_p, next_p);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave lazy mode, flushing any hypercalls made here.
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/setup_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -114,19 +114,10 @@ static unsigned int highmem_pages = -1;
|
|
|
|
/*
|
|
|
|
* Setup options
|
|
|
|
*/
|
|
|
|
-struct drive_info_struct { char dummy[32]; } drive_info;
|
|
|
|
-#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
|
|
|
|
- defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
|
|
|
|
-EXPORT_SYMBOL(drive_info);
|
|
|
|
-#endif
|
|
|
|
struct screen_info screen_info;
|
|
|
|
EXPORT_SYMBOL(screen_info);
|
|
|
|
struct apm_info apm_info;
|
|
|
|
EXPORT_SYMBOL(apm_info);
|
|
|
|
-struct sys_desc_table_struct {
|
|
|
|
- unsigned short length;
|
|
|
|
- unsigned char table[0];
|
|
|
|
-};
|
|
|
|
struct edid_info edid_info;
|
|
|
|
EXPORT_SYMBOL_GPL(edid_info);
|
|
|
|
#ifndef CONFIG_XEN
|
|
|
|
@@ -149,7 +140,7 @@ unsigned long saved_videomode;
|
|
|
|
|
|
|
|
static char __initdata command_line[COMMAND_LINE_SIZE];
|
|
|
|
|
|
|
|
-unsigned char __initdata boot_params[PARAM_SIZE];
|
|
|
|
+struct boot_params __initdata boot_params;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Point at the empty zero page to start with. We map the real shared_info
|
|
|
|
@@ -316,18 +307,18 @@ unsigned long __init find_max_low_pfn(vo
|
|
|
|
printk(KERN_WARNING "Warning only %ldMB will be used.\n",
|
|
|
|
MAXMEM>>20);
|
|
|
|
if (max_pfn > MAX_NONPAE_PFN)
|
|
|
|
- printk(KERN_WARNING "Use a PAE enabled kernel.\n");
|
|
|
|
+ printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
|
|
|
|
else
|
|
|
|
printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
|
|
|
|
max_pfn = MAXMEM_PFN;
|
|
|
|
#else /* !CONFIG_HIGHMEM */
|
|
|
|
-#ifndef CONFIG_X86_PAE
|
|
|
|
+#ifndef CONFIG_HIGHMEM64G
|
|
|
|
if (max_pfn > MAX_NONPAE_PFN) {
|
|
|
|
max_pfn = MAX_NONPAE_PFN;
|
|
|
|
printk(KERN_WARNING "Warning only 4GB will be used.\n");
|
|
|
|
- printk(KERN_WARNING "Use a PAE enabled kernel.\n");
|
|
|
|
+ printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
|
|
|
|
}
|
|
|
|
-#endif /* !CONFIG_X86_PAE */
|
|
|
|
+#endif /* !CONFIG_HIGHMEM64G */
|
|
|
|
#endif /* !CONFIG_HIGHMEM */
|
|
|
|
} else {
|
|
|
|
if (highmem_pages == -1)
|
|
|
|
@@ -516,7 +507,7 @@ void __init setup_bootmem_allocator(void
|
|
|
|
*
|
|
|
|
* This should all compile down to nothing when NUMA is off.
|
|
|
|
*/
|
|
|
|
-void __init remapped_pgdat_init(void)
|
|
|
|
+static void __init remapped_pgdat_init(void)
|
|
|
|
{
|
|
|
|
int nid;
|
|
|
|
|
|
|
|
@@ -591,7 +582,6 @@ void __init setup_arch(char **cmdline_p)
|
|
|
|
properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
|
|
|
|
*/
|
|
|
|
ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
|
|
|
|
- drive_info = DRIVE_INFO;
|
|
|
|
screen_info = SCREEN_INFO;
|
|
|
|
copy_edid();
|
|
|
|
apm_info.bios = APM_BIOS_INFO;
|
|
|
|
@@ -769,6 +759,8 @@ void __init setup_arch(char **cmdline_p)
|
|
|
|
* NOTE: at this point the bootmem allocator is fully available.
|
|
|
|
*/
|
|
|
|
|
|
|
|
+ paravirt_post_allocator_init();
|
|
|
|
+
|
|
|
|
if (is_initial_xendomain())
|
|
|
|
dmi_scan_machine();
|
|
|
|
|
|
|
|
@@ -816,6 +808,7 @@ void __init setup_arch(char **cmdline_p)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
e820_register_memory();
|
|
|
|
+ e820_mark_nosave_regions();
|
|
|
|
|
|
|
|
if (is_initial_xendomain()) {
|
|
|
|
#ifdef CONFIG_VT
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/smp_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -22,6 +22,7 @@
|
|
|
|
|
|
|
|
#include <asm/mtrr.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
+#include <asm/mmu_context.h>
|
|
|
|
#if 0
|
|
|
|
#include <mach_apic.h>
|
|
|
|
#endif
|
|
|
|
@@ -217,13 +218,13 @@ static unsigned long flush_va;
|
|
|
|
static DEFINE_SPINLOCK(tlbstate_lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
- * We cannot call mmdrop() because we are in interrupt context,
|
|
|
|
+ * We cannot call mmdrop() because we are in interrupt context,
|
|
|
|
* instead update mm->cpu_vm_mask.
|
|
|
|
*
|
|
|
|
* We need to reload %cr3 since the page tables may be going
|
|
|
|
* away from under us..
|
|
|
|
*/
|
|
|
|
-static inline void leave_mm (unsigned long cpu)
|
|
|
|
+void leave_mm(unsigned long cpu)
|
|
|
|
{
|
|
|
|
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
|
|
|
|
BUG();
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/time-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/time-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -75,11 +75,12 @@
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <xen/interface/vcpu.h>
|
|
|
|
|
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
|
#include <asm/i8253.h>
|
|
|
|
DEFINE_SPINLOCK(i8253_lock);
|
|
|
|
EXPORT_SYMBOL(i8253_lock);
|
|
|
|
-#else
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
+#include <asm/vsyscall.h>
|
|
|
|
volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
|
|
|
|
#endif
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -760,56 +761,10 @@ unsigned long read_persistent_clock(void
|
2010-07-07 11:12:45 +00:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
-static void sync_cmos_clock(unsigned long dummy);
|
|
|
|
-
|
|
|
|
-static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
|
|
|
|
-int no_sync_cmos_clock;
|
|
|
|
-
|
|
|
|
-static void sync_cmos_clock(unsigned long dummy)
|
|
|
|
-{
|
|
|
|
- struct timeval now, next;
|
|
|
|
- int fail = 1;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If we have an externally synchronized Linux clock, then update
|
|
|
|
- * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
|
|
|
|
- * called as close as possible to 500 ms before the new second starts.
|
|
|
|
- * This code is run on a timer. If the clock is set, that timer
|
|
|
|
- * may not expire at the correct time. Thus, we adjust...
|
|
|
|
- */
|
|
|
|
- if (!ntp_synced())
|
|
|
|
- /*
|
|
|
|
- * Not synced, exit, do not restart a timer (if one is
|
|
|
|
- * running, let it run out).
|
|
|
|
- */
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- do_gettimeofday(&now);
|
|
|
|
- if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
|
|
|
|
- now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
|
|
|
|
- fail = set_rtc_mmss(now.tv_sec);
|
|
|
|
-
|
|
|
|
- next.tv_usec = USEC_AFTER - now.tv_usec;
|
|
|
|
- if (next.tv_usec <= 0)
|
|
|
|
- next.tv_usec += USEC_PER_SEC;
|
|
|
|
-
|
|
|
|
- if (!fail)
|
|
|
|
- next.tv_sec = 659;
|
|
|
|
- else
|
|
|
|
- next.tv_sec = 0;
|
|
|
|
-
|
|
|
|
- if (next.tv_usec >= USEC_PER_SEC) {
|
|
|
|
- next.tv_sec++;
|
|
|
|
- next.tv_usec -= USEC_PER_SEC;
|
|
|
|
- }
|
|
|
|
- mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void notify_arch_cmos_timer(void)
|
|
|
|
+int update_persistent_clock(struct timespec now)
|
|
|
|
{
|
|
|
|
- if (!no_sync_cmos_clock)
|
|
|
|
- mod_timer(&sync_cmos_timer, jiffies + 1);
|
|
|
|
mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
|
|
|
|
+ return set_rtc_mmss(now.tv_sec);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern void (*late_time_init)(void);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/traps_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -41,6 +41,10 @@
|
|
|
|
#include <linux/mca.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
+#if defined(CONFIG_EDAC)
|
|
|
|
+#include <linux/edac.h>
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
@@ -102,36 +106,45 @@ asmlinkage void machine_check(void);
|
|
|
|
int kstack_depth_to_print = 24;
|
|
|
|
static unsigned int code_bytes = 64;
|
|
|
|
|
|
|
|
-static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
|
|
|
|
+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p, unsigned size)
|
|
|
|
{
|
|
|
|
return p > (void *)tinfo &&
|
|
|
|
- p < (void *)tinfo + THREAD_SIZE - 3;
|
|
|
|
+ p <= (void *)tinfo + THREAD_SIZE - size;
|
|
|
|
}
|
|
|
|
|
|
|
|
+/* The form of the top of the frame on the stack */
|
|
|
|
+struct stack_frame {
|
|
|
|
+ struct stack_frame *next_frame;
|
|
|
|
+ unsigned long return_address;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
static inline unsigned long print_context_stack(struct thread_info *tinfo,
|
|
|
|
unsigned long *stack, unsigned long ebp,
|
|
|
|
struct stacktrace_ops *ops, void *data)
|
|
|
|
{
|
|
|
|
- unsigned long addr;
|
|
|
|
-
|
|
|
|
#ifdef CONFIG_FRAME_POINTER
|
|
|
|
- while (valid_stack_ptr(tinfo, (void *)ebp)) {
|
|
|
|
- unsigned long new_ebp;
|
|
|
|
- addr = *(unsigned long *)(ebp + 4);
|
|
|
|
+ struct stack_frame *frame = (struct stack_frame *)ebp;
|
|
|
|
+ while (valid_stack_ptr(tinfo, frame, sizeof(*frame))) {
|
|
|
|
+ struct stack_frame *next;
|
|
|
|
+ unsigned long addr;
|
|
|
|
+
|
|
|
|
+ addr = frame->return_address;
|
|
|
|
ops->address(data, addr);
|
|
|
|
/*
|
|
|
|
* break out of recursive entries (such as
|
|
|
|
* end_of_stack_stop_unwind_function). Also,
|
|
|
|
* we can never allow a frame pointer to
|
|
|
|
* move downwards!
|
|
|
|
- */
|
|
|
|
- new_ebp = *(unsigned long *)ebp;
|
|
|
|
- if (new_ebp <= ebp)
|
|
|
|
+ */
|
|
|
|
+ next = frame->next_frame;
|
|
|
|
+ if (next <= frame)
|
|
|
|
break;
|
|
|
|
- ebp = new_ebp;
|
|
|
|
+ frame = next;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
- while (valid_stack_ptr(tinfo, stack)) {
|
|
|
|
+ while (valid_stack_ptr(tinfo, stack, sizeof(*stack))) {
|
|
|
|
+ unsigned long addr;
|
|
|
|
+
|
|
|
|
addr = *stack++;
|
|
|
|
if (__kernel_text_address(addr))
|
|
|
|
ops->address(data, addr);
|
|
|
|
@@ -154,7 +167,7 @@ void dump_trace(struct task_struct *task
|
|
|
|
if (!stack) {
|
|
|
|
unsigned long dummy;
|
|
|
|
stack = &dummy;
|
|
|
|
- if (task && task != current)
|
|
|
|
+ if (task != current)
|
|
|
|
stack = (unsigned long *)task->thread.esp;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -213,6 +226,7 @@ static void print_trace_address(void *da
|
|
|
|
{
|
|
|
|
printk("%s [<%08lx>] ", (char *)data, addr);
|
|
|
|
print_symbol("%s\n", addr);
|
|
|
|
+ touch_nmi_watchdog();
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct stacktrace_ops print_trace_ops = {
|
|
|
|
@@ -396,7 +410,7 @@ void die(const char * str, struct pt_reg
|
|
|
|
unsigned long esp;
|
|
|
|
unsigned short ss;
|
|
|
|
|
|
|
|
- report_bug(regs->eip);
|
|
|
|
+ report_bug(regs->eip, regs);
|
|
|
|
|
|
|
|
printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
@@ -439,6 +453,7 @@ void die(const char * str, struct pt_reg
|
|
|
|
|
|
|
|
bust_spinlocks(0);
|
|
|
|
die.lock_owner = -1;
|
|
|
|
+ add_taint(TAINT_DIE);
|
|
|
|
spin_unlock_irqrestore(&die.lock, flags);
|
|
|
|
|
|
|
|
if (!regs)
|
|
|
|
@@ -523,10 +538,12 @@ fastcall void do_##name(struct pt_regs *
|
|
|
|
do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
|
|
|
|
}
|
|
|
|
|
|
|
|
-#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
|
|
|
|
+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr, irq) \
|
|
|
|
fastcall void do_##name(struct pt_regs * regs, long error_code) \
|
|
|
|
{ \
|
|
|
|
siginfo_t info; \
|
|
|
|
+ if (irq) \
|
|
|
|
+ local_irq_enable(); \
|
|
|
|
info.si_signo = signr; \
|
|
|
|
info.si_errno = 0; \
|
|
|
|
info.si_code = sicode; \
|
|
|
|
@@ -566,13 +583,13 @@ DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
|
|
|
|
#endif
|
|
|
|
DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
|
|
|
|
DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
|
|
|
|
-DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
|
|
|
|
+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip, 0)
|
|
|
|
DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
|
|
|
|
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
|
|
|
|
DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
|
|
|
|
DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
|
|
|
|
-DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
|
|
|
|
-DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
|
|
|
|
+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0, 0)
|
|
|
|
+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0, 1)
|
|
|
|
|
|
|
|
fastcall void __kprobes do_general_protection(struct pt_regs * regs,
|
|
|
|
long error_code)
|
|
|
|
@@ -585,6 +602,13 @@ fastcall void __kprobes do_general_prote
|
|
|
|
|
|
|
|
current->thread.error_code = error_code;
|
|
|
|
current->thread.trap_no = 13;
|
|
|
|
+ if (show_unhandled_signals && unhandled_signal(current, SIGSEGV) &&
|
|
|
|
+ printk_ratelimit())
|
|
|
|
+ printk(KERN_INFO
|
|
|
|
+ "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
|
|
|
|
+ current->comm, current->pid,
|
|
|
|
+ regs->eip, regs->esp, error_code);
|
|
|
|
+
|
|
|
|
force_sig(SIGSEGV, current);
|
|
|
|
return;
|
|
|
|
|
|
|
|
@@ -610,6 +634,14 @@ mem_parity_error(unsigned char reason, s
|
|
|
|
printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
|
|
|
|
"CPU %d.\n", reason, smp_processor_id());
|
|
|
|
printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
|
|
|
|
+
|
|
|
|
+#if defined(CONFIG_EDAC)
|
|
|
|
+ if(edac_handler_set()) {
|
|
|
|
+ edac_atomic_assert_error();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
if (panic_on_unrecovered_nmi)
|
|
|
|
panic("NMI: Not continuing");
|
|
|
|
|
|
|
|
@@ -720,6 +752,8 @@ static __kprobes void default_do_nmi(str
|
|
|
|
reassert_nmi();
|
|
|
|
}
|
|
|
|
|
|
|
|
+static int ignore_nmis;
|
|
|
|
+
|
|
|
|
fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
@@ -730,11 +764,24 @@ fastcall __kprobes void do_nmi(struct pt
|
|
|
|
|
|
|
|
++nmi_count(cpu);
|
|
|
|
|
|
|
|
- default_do_nmi(regs);
|
|
|
|
+ if (!ignore_nmis)
|
|
|
|
+ default_do_nmi(regs);
|
|
|
|
|
|
|
|
nmi_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
+void stop_nmi(void)
|
|
|
|
+{
|
|
|
|
+ acpi_nmi_disable();
|
|
|
|
+ ignore_nmis++;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void restart_nmi(void)
|
|
|
|
+{
|
|
|
|
+ ignore_nmis--;
|
|
|
|
+ acpi_nmi_enable();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
#ifdef CONFIG_KPROBES
|
|
|
|
fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
|
|
|
|
{
|
|
|
|
@@ -1023,6 +1070,7 @@ asmlinkage void math_state_restore(void)
|
|
|
|
thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
|
|
|
|
tsk->fpu_counter++;
|
|
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(math_state_restore);
|
|
|
|
|
|
|
|
#ifndef CONFIG_MATH_EMULATION
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mach-xen/setup.c 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mach-xen/setup.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -12,6 +12,7 @@
|
|
|
|
#include <asm/e820.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
+#include <asm/pgtable.h>
|
|
|
|
|
|
|
|
#include <xen/interface/callback.h>
|
|
|
|
#include <xen/interface/memory.h>
|
|
|
|
@@ -101,7 +102,7 @@ void __init pre_setup_arch_hook(void)
|
|
|
|
|
|
|
|
init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
|
|
|
|
|
|
|
|
- setup_xen_features();
|
|
|
|
+ xen_setup_features();
|
|
|
|
|
|
|
|
if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
|
|
|
|
hypervisor_virt_start = pp.virt_start;
|
|
|
|
@@ -157,4 +158,18 @@ void __init machine_specific_arch_setup(
|
|
|
|
HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
+
|
|
|
|
+ /* Do an early initialization of the fixmap area */
|
|
|
|
+ {
|
|
|
|
+ extern pte_t swapper_pg_pmd[PTRS_PER_PTE];
|
|
|
|
+ unsigned long addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
|
|
|
|
+ pgd_t *pgd = (pgd_t *)xen_start_info->pt_base;
|
|
|
|
+ pud_t *pud = pud_offset(pgd + pgd_index(addr), addr);
|
|
|
|
+ pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
|
+
|
|
|
|
+ swapper_pg_dir = pgd;
|
|
|
|
+ init_mm.pgd = pgd;
|
|
|
|
+ make_lowmem_page_readonly(swapper_pg_pmd, XENFEAT_writable_page_tables);
|
|
|
|
+ set_pmd(pmd, __pmd(__pa_symbol(swapper_pg_pmd) | _PAGE_TABLE));
|
|
|
|
+ }
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/fault_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/fault_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -346,7 +346,10 @@ static inline pmd_t *vmalloc_sync_one(pg
|
|
|
|
pmd_k = pmd_offset(pud_k, address);
|
|
|
|
if (!pmd_present(*pmd_k))
|
|
|
|
return NULL;
|
|
|
|
- if (!pmd_present(*pmd))
|
|
|
|
+ if (!pmd_present(*pmd)) {
|
|
|
|
+ bool lazy = x86_read_percpu(xen_lazy_mmu);
|
|
|
|
+
|
|
|
|
+ x86_write_percpu(xen_lazy_mmu, false);
|
|
|
|
#if CONFIG_XEN_COMPAT > 0x030002
|
|
|
|
set_pmd(pmd, *pmd_k);
|
|
|
|
#else
|
|
|
|
@@ -356,7 +359,8 @@ static inline pmd_t *vmalloc_sync_one(pg
|
|
|
|
*/
|
|
|
|
set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
|
|
|
|
#endif
|
|
|
|
- else
|
|
|
|
+ x86_write_percpu(xen_lazy_mmu, lazy);
|
|
|
|
+ } else
|
|
|
|
BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
|
|
|
|
return pmd_k;
|
|
|
|
}
|
|
|
|
@@ -388,6 +392,8 @@ static inline int vmalloc_fault(unsigned
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
+int show_unhandled_signals = 1;
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* This routine handles page faults. It determines the address,
|
|
|
|
* and the problem, and then passes it off to one of the appropriate
|
|
|
|
@@ -408,6 +414,7 @@ fastcall void __kprobes do_page_fault(st
|
|
|
|
struct vm_area_struct * vma;
|
|
|
|
unsigned long address;
|
|
|
|
int write, si_code;
|
|
|
|
+ int fault;
|
|
|
|
|
|
|
|
/* get the address */
|
|
|
|
address = read_cr2();
|
|
|
|
@@ -541,20 +548,18 @@ good_area:
|
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
|
* the fault.
|
|
|
|
*/
|
|
|
|
- switch (handle_mm_fault(mm, vma, address, write)) {
|
|
|
|
- case VM_FAULT_MINOR:
|
|
|
|
- tsk->min_flt++;
|
|
|
|
- break;
|
|
|
|
- case VM_FAULT_MAJOR:
|
|
|
|
- tsk->maj_flt++;
|
|
|
|
- break;
|
|
|
|
- case VM_FAULT_SIGBUS:
|
|
|
|
- goto do_sigbus;
|
|
|
|
- case VM_FAULT_OOM:
|
|
|
|
+ fault = handle_mm_fault(mm, vma, address, write);
|
|
|
|
+ if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
|
+ if (fault & VM_FAULT_OOM)
|
|
|
|
goto out_of_memory;
|
|
|
|
- default:
|
|
|
|
- BUG();
|
|
|
|
+ else if (fault & VM_FAULT_SIGBUS)
|
|
|
|
+ goto do_sigbus;
|
|
|
|
+ BUG();
|
|
|
|
}
|
|
|
|
+ if (fault & VM_FAULT_MAJOR)
|
|
|
|
+ tsk->maj_flt++;
|
|
|
|
+ else
|
|
|
|
+ tsk->min_flt++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Did it hit the DOS screen memory VA from vm86 mode?
|
|
|
|
@@ -589,6 +594,14 @@ bad_area_nosemaphore:
|
|
|
|
if (is_prefetch(regs, address, error_code))
|
|
|
|
return;
|
|
|
|
|
|
|
|
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
|
|
|
|
+ printk_ratelimit()) {
|
|
|
|
+ printk("%s%s[%d]: segfault at %08lx eip %08lx "
|
|
|
|
+ "esp %08lx error %lx\n",
|
|
|
|
+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
|
|
|
|
+ tsk->comm, tsk->pid, address, regs->eip,
|
|
|
|
+ regs->esp, error_code);
|
|
|
|
+ }
|
|
|
|
tsk->thread.cr2 = address;
|
|
|
|
/* Kernel addresses are always protection faults */
|
|
|
|
tsk->thread.error_code = error_code | (address >= TASK_SIZE);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/highmem_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -34,17 +34,16 @@ void *kmap_atomic_prot(struct page *page
|
|
|
|
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
|
|
|
|
pagefault_disable();
|
|
|
|
|
|
|
|
- idx = type + KM_TYPE_NR*smp_processor_id();
|
|
|
|
- BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
|
|
-
|
|
|
|
if (!PageHighMem(page))
|
|
|
|
return page_address(page);
|
|
|
|
|
|
|
|
+ idx = type + KM_TYPE_NR*smp_processor_id();
|
|
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
|
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
|
|
set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
|
|
|
|
/*arch_flush_lazy_mmu_mode();*/
|
|
|
|
|
|
|
|
- return (void*) vaddr;
|
|
|
|
+ return (void *)vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *kmap_atomic(struct page *page, enum km_type type)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -96,7 +96,7 @@ static pte_t * __init one_page_table_ini
|
|
|
|
#endif
|
|
|
|
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
|
|
|
|
|
|
|
- paravirt_alloc_pt(__pa(page_table) >> PAGE_SHIFT);
|
|
|
|
+ paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
|
|
|
|
make_lowmem_page_readonly(page_table,
|
|
|
|
XENFEAT_writable_page_tables);
|
|
|
|
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
|
|
|
|
@@ -453,7 +453,7 @@ static void __init pagetable_init (void)
|
|
|
|
xen_pagetable_setup_done(pgd_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
-#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
|
|
|
|
+#if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI)
|
|
|
|
/*
|
|
|
|
* Swap suspend & friends need this for resume because things like the intel-agp
|
|
|
|
* driver might have split up a kernel 4MB mapping.
|
|
|
|
@@ -492,9 +492,13 @@ void zap_low_mappings (void)
|
|
|
|
flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
+int nx_enabled = 0;
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_X86_PAE
|
|
|
|
+
|
|
|
|
static int disable_nx __initdata = 0;
|
|
|
|
u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
|
|
|
|
-EXPORT_SYMBOL(__supported_pte_mask);
|
|
|
|
+EXPORT_SYMBOL_GPL(__supported_pte_mask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* noexec = on|off
|
|
|
|
@@ -521,9 +525,6 @@ static int __init noexec_setup(char *str
|
|
|
|
}
|
|
|
|
early_param("noexec", noexec_setup);
|
|
|
|
|
|
|
|
-int nx_enabled = 0;
|
|
|
|
-#ifdef CONFIG_X86_PAE
|
|
|
|
-
|
|
|
|
static void __init set_nx(void)
|
|
|
|
{
|
|
|
|
unsigned int v[4], l, h;
|
|
|
|
@@ -770,7 +771,7 @@ void __init mem_init(void)
|
|
|
|
zap_low_mappings();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
- set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
|
|
|
|
+ SetPagePinned(virt_to_page(init_mm.pgd));
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
|
|
@@ -802,8 +803,7 @@ void __init pgtable_cache_init(void)
|
|
|
|
PTRS_PER_PMD*sizeof(pmd_t),
|
|
|
|
PTRS_PER_PMD*sizeof(pmd_t),
|
|
|
|
SLAB_PANIC,
|
|
|
|
- pmd_ctor,
|
|
|
|
- NULL);
|
|
|
|
+ pmd_ctor);
|
|
|
|
if (!SHARED_KERNEL_PMD) {
|
|
|
|
/* If we're in PAE mode and have a non-shared
|
|
|
|
kernel pmd, then the pgd size must be a
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/ioremap-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/ioremap-xen.c 2011-02-07 15:38:30.000000000 +0100
|
|
|
|
@@ -344,9 +344,8 @@ void iounmap(volatile void __iomem *addr
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
/* Reset the direct mapping. Can block */
|
|
|
|
if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
|
|
|
|
- /* p->size includes the guard page, but cpa doesn't like that */
|
|
|
|
change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
|
|
|
|
- (p->size - PAGE_SIZE) >> PAGE_SHIFT,
|
|
|
|
+ get_vm_area_size(p) >> PAGE_SHIFT,
|
|
|
|
PAGE_KERNEL);
|
|
|
|
global_flush_tlb();
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/pgtable_32-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -198,7 +198,7 @@ void pte_free(struct page *pte)
|
|
|
|
va, pfn_pte(pfn, PAGE_KERNEL), 0))
|
|
|
|
BUG();
|
|
|
|
} else
|
|
|
|
- clear_bit(PG_pinned, &pte->flags);
|
|
|
|
+ ClearPagePinned(pte);
|
|
|
|
|
|
|
|
ClearPageForeign(pte);
|
|
|
|
init_page_count(pte);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -249,7 +249,7 @@ static inline void pgd_list_del(pgd_t *p
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
#if (PTRS_PER_PMD == 1)
|
|
|
|
/* Non-PAE pgd constructor */
|
|
|
|
-void pgd_ctor(void *pgd)
|
|
|
|
+static void pgd_ctor(void *pgd)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -272,7 +272,7 @@ void pgd_ctor(void *pgd)
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
#else /* PTRS_PER_PMD > 1 */
|
|
|
|
/* PAE pgd constructor */
|
|
|
|
-void pgd_ctor(void *pgd)
|
|
|
|
+static void pgd_ctor(void *pgd)
|
|
|
|
{
|
|
|
|
/* PAE, kernel PMD may be shared */
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -286,7 +286,7 @@ void pgd_ctor(void *pgd)
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
#endif /* PTRS_PER_PMD */
|
|
|
|
|
|
|
|
-void pgd_dtor(void *pgd)
|
|
|
|
+static void pgd_dtor(void *pgd)
|
|
|
|
{
|
|
|
|
unsigned long flags; /* can be called from interrupt context */
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -644,9 +644,9 @@ static inline unsigned int pgd_walk_set_
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
if (PageHighMem(page)) {
|
|
|
|
if (pgprot_val(flags) & _PAGE_RW)
|
|
|
|
- clear_bit(PG_pinned, &page->flags);
|
|
|
|
+ ClearPagePinned(page);
|
|
|
|
else
|
|
|
|
- set_bit(PG_pinned, &page->flags);
|
|
|
|
+ SetPagePinned(page);
|
|
|
|
} else {
|
|
|
|
MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
|
|
|
|
(unsigned long)__va(pfn << PAGE_SHIFT),
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -716,19 +716,19 @@ static void __pgd_pin(pgd_t *pgd)
|
2010-07-07 11:12:45 +00:00
|
|
|
pgd_walk(pgd, PAGE_KERNEL_RO);
|
|
|
|
kmap_flush_unused();
|
|
|
|
xen_pgd_pin(__pa(pgd));
|
|
|
|
- set_bit(PG_pinned, &virt_to_page(pgd)->flags);
|
|
|
|
+ SetPagePinned(virt_to_page(pgd));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __pgd_unpin(pgd_t *pgd)
|
|
|
|
{
|
|
|
|
xen_pgd_unpin(__pa(pgd));
|
|
|
|
pgd_walk(pgd, PAGE_KERNEL);
|
|
|
|
- clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
|
|
|
|
+ ClearPagePinned(virt_to_page(pgd));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pgd_test_and_unpin(pgd_t *pgd)
|
|
|
|
{
|
|
|
|
- if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
|
|
|
|
+ if (PagePinned(virt_to_page(pgd)))
|
|
|
|
__pgd_unpin(pgd);
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -766,7 +766,7 @@ void mm_pin_all(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&pgd_lock, flags);
|
|
|
|
for (page = pgd_list; page; page = (struct page *)page->index) {
|
|
|
|
- if (!test_bit(PG_pinned, &page->flags))
|
|
|
|
+ if (!PagePinned(page))
|
|
|
|
__pgd_pin((pgd_t *)page_address(page));
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&pgd_lock, flags);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -774,7 +774,7 @@ void mm_pin_all(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
- if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
|
|
|
|
+ if (!PagePinned(virt_to_page(mm->pgd)))
|
|
|
|
mm_pin(mm);
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -800,7 +800,7 @@ void arch_exit_mmap(struct mm_struct *mm
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
task_unlock(tsk);
|
|
|
|
|
|
|
|
- if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
|
|
|
|
+ if (PagePinned(virt_to_page(mm->pgd)) &&
|
|
|
|
(atomic_read(&mm->mm_count) == 1) &&
|
|
|
|
!mm->context.has_foreign_mappings)
|
|
|
|
mm_unpin(mm);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/pci/irq-xen.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/pci/irq-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -142,8 +142,9 @@ static void __init pirq_peer_trick(void)
|
|
|
|
for(i = 1; i < 256; i++) {
|
|
|
|
if (!busmap[i] || pci_find_bus(0, i))
|
|
|
|
continue;
|
|
|
|
- if (pci_scan_bus(i, &pci_root_ops, NULL))
|
|
|
|
- printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
|
|
|
|
+ if (pci_scan_bus_with_sysdata(i))
|
|
|
|
+ printk(KERN_INFO "PCI: Discovered primary peer "
|
|
|
|
+ "bus %02x [IRQ]\n", i);
|
|
|
|
}
|
|
|
|
pcibios_last_bus = -1;
|
|
|
|
}
|
|
|
|
@@ -553,6 +554,7 @@ static __init int intel_router_probe(str
|
|
|
|
case PCI_DEVICE_ID_INTEL_ICH9_3:
|
|
|
|
case PCI_DEVICE_ID_INTEL_ICH9_4:
|
|
|
|
case PCI_DEVICE_ID_INTEL_ICH9_5:
|
|
|
|
+ case PCI_DEVICE_ID_INTEL_TOLAPAI_0:
|
|
|
|
r->name = "PIIX/ICH";
|
|
|
|
r->get = pirq_piix_get;
|
|
|
|
r->set = pirq_piix_set;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/xen/Kconfig 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/xen/Kconfig 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -2,7 +2,7 @@
|
|
|
|
# This Kconfig describes xen options
|
|
|
|
#
|
|
|
|
|
|
|
|
-config XEN
|
|
|
|
+config PARAVIRT_XEN
|
|
|
|
bool "Xen guest support"
|
2011-04-19 20:09:59 +00:00
|
|
|
select PARAVIRT
|
2010-07-07 11:12:45 +00:00
|
|
|
select PARAVIRT_CLOCK
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/xen/xen-head.S 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/xen/xen-head.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -1,7 +1,7 @@
|
|
|
|
/* Xen-specific pieces of head.S, intended to be included in the right
|
|
|
|
place in head.S */
|
|
|
|
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
+#ifdef CONFIG_PARAVIRT_XEN
|
|
|
|
|
|
|
|
#include <linux/elfnote.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
@@ -52,4 +52,4 @@ ENTRY(hypercall_page)
|
|
|
|
ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, _ASM_PTR __HYPERVISOR_VIRT_START)
|
|
|
|
ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, _ASM_PTR 0)
|
|
|
|
|
|
|
|
-#endif /*CONFIG_XEN */
|
|
|
|
+#endif /* CONFIG_PARAVIRT_XEN */
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/ia32/ia32entry-xen.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -105,7 +105,7 @@ ENTRY(ia32_sysenter_target)
|
|
|
|
movl $VSYSCALL32_SYSEXIT,8(%rsp)
|
|
|
|
movq %rax,(%rsp)
|
|
|
|
cld
|
|
|
|
- SAVE_ARGS 0,0,0
|
|
|
|
+ SAVE_ARGS 0,0,1
|
|
|
|
/* no need to do an access_ok check here because rbp has been
|
|
|
|
32bit zero extended */
|
|
|
|
1: movl (%rbp),%r9d
|
|
|
|
@@ -244,7 +244,7 @@ ia32_badarg:
|
|
|
|
*/
|
|
|
|
|
|
|
|
ENTRY(ia32_syscall)
|
|
|
|
- CFI_STARTPROC simple
|
|
|
|
+ CFI_STARTPROC32 simple
|
|
|
|
CFI_SIGNAL_FRAME
|
|
|
|
CFI_DEF_CFA rsp,SS+8-RIP+16
|
|
|
|
/*CFI_REL_OFFSET ss,SS-RIP+16*/
|
|
|
|
@@ -280,6 +280,7 @@ ia32_sysret:
|
|
|
|
|
|
|
|
ia32_tracesys:
|
|
|
|
SAVE_REST
|
|
|
|
+ CLEAR_RREGS
|
|
|
|
movq $-ENOSYS,RAX(%rsp) /* really needed? */
|
|
|
|
movq %rsp,%rdi /* &pt_regs -> arg1 */
|
|
|
|
call syscall_trace_enter
|
|
|
|
@@ -476,7 +477,7 @@ ia32_sys_call_table:
|
|
|
|
.quad sys_init_module
|
|
|
|
.quad sys_delete_module
|
|
|
|
.quad quiet_ni_syscall /* 130 get_kernel_syms */
|
|
|
|
- .quad sys_quotactl
|
|
|
|
+ .quad sys32_quotactl
|
|
|
|
.quad sys_getpgid
|
|
|
|
.quad sys_fchdir
|
|
|
|
.quad quiet_ni_syscall /* bdflush */
|
|
|
|
@@ -669,4 +670,5 @@ ia32_sys_call_table:
|
|
|
|
.quad compat_sys_signalfd
|
|
|
|
.quad compat_sys_timerfd
|
|
|
|
.quad sys_eventfd
|
|
|
|
+ .quad sys32_fallocate
|
|
|
|
ia32_syscall_end:
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/acpi/sleep_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -51,12 +51,10 @@
|
|
|
|
Low-Level Sleep Support
|
|
|
|
-------------------------------------------------------------------------- */
|
|
|
|
|
|
|
|
-#ifdef CONFIG_ACPI_SLEEP
|
|
|
|
-
|
|
|
|
#ifndef CONFIG_ACPI_PV_SLEEP
|
|
|
|
/* address in low memory of the wakeup routine. */
|
|
|
|
unsigned long acpi_wakeup_address = 0;
|
|
|
|
-unsigned long acpi_video_flags;
|
|
|
|
+unsigned long acpi_realmode_flags;
|
|
|
|
extern char wakeup_start, wakeup_end;
|
|
|
|
|
|
|
|
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
|
|
|
|
@@ -109,9 +107,11 @@ static int __init acpi_sleep_setup(char
|
|
|
|
{
|
|
|
|
while ((str != NULL) && (*str != '\0')) {
|
|
|
|
if (strncmp(str, "s3_bios", 7) == 0)
|
|
|
|
- acpi_video_flags = 1;
|
|
|
|
+ acpi_realmode_flags |= 1;
|
|
|
|
if (strncmp(str, "s3_mode", 7) == 0)
|
|
|
|
- acpi_video_flags |= 2;
|
|
|
|
+ acpi_realmode_flags |= 2;
|
|
|
|
+ if (strncmp(str, "s3_beep", 7) == 0)
|
|
|
|
+ acpi_realmode_flags |= 4;
|
|
|
|
str = strchr(str, ',');
|
|
|
|
if (str != NULL)
|
|
|
|
str += strspn(str, ", \t");
|
|
|
|
@@ -123,8 +123,6 @@ static int __init acpi_sleep_setup(char
|
|
|
|
__setup("acpi_sleep=", acpi_sleep_setup);
|
|
|
|
#endif /* CONFIG_ACPI_PV_SLEEP */
|
|
|
|
|
|
|
|
-#endif /*CONFIG_ACPI_SLEEP */
|
|
|
|
-
|
|
|
|
void acpi_pci_link_exit(void)
|
|
|
|
{
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/e820_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -224,37 +224,6 @@ unsigned long __init e820_end_of_ram(voi
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
- * Find the hole size in the range.
|
|
|
|
- */
|
|
|
|
-unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
|
|
|
|
-{
|
|
|
|
- unsigned long ram = 0;
|
|
|
|
- int i;
|
|
|
|
-
|
|
|
|
- for (i = 0; i < e820.nr_map; i++) {
|
|
|
|
- struct e820entry *ei = &e820.map[i];
|
|
|
|
- unsigned long last, addr;
|
|
|
|
-
|
|
|
|
- if (ei->type != E820_RAM ||
|
|
|
|
- ei->addr+ei->size <= start ||
|
|
|
|
- ei->addr >= end)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- addr = round_up(ei->addr, PAGE_SIZE);
|
|
|
|
- if (addr < start)
|
|
|
|
- addr = start;
|
|
|
|
-
|
|
|
|
- last = round_down(ei->addr + ei->size, PAGE_SIZE);
|
|
|
|
- if (last >= end)
|
|
|
|
- last = end;
|
|
|
|
-
|
|
|
|
- if (last > addr)
|
|
|
|
- ram += last - addr;
|
|
|
|
- }
|
|
|
|
- return ((end - start) - ram);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
* Mark e820 reserved areas as busy for the resource manager.
|
|
|
|
*/
|
|
|
|
void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
|
|
|
|
@@ -327,54 +296,66 @@ void __init e820_mark_nosave_regions(voi
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Finds an active region in the address range from start_pfn to end_pfn and
|
|
|
|
+ * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
|
|
|
|
+ */
|
|
|
|
+static int __init e820_find_active_region(const struct e820entry *ei,
|
|
|
|
+ unsigned long start_pfn,
|
|
|
|
+ unsigned long end_pfn,
|
|
|
|
+ unsigned long *ei_startpfn,
|
|
|
|
+ unsigned long *ei_endpfn)
|
|
|
|
+{
|
|
|
|
+#ifdef CONFIG_XEN
|
|
|
|
+ if (end_pfn > xen_start_info->nr_pages)
|
|
|
|
+ end_pfn = xen_start_info->nr_pages;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ *ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
+ *ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
+
|
|
|
|
+ /* Skip map entries smaller than a page */
|
|
|
|
+ if (*ei_startpfn >= *ei_endpfn)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* Check if end_pfn_map should be updated */
|
|
|
|
+ if (ei->type != E820_RAM && *ei_endpfn > end_pfn_map)
|
|
|
|
+ end_pfn_map = *ei_endpfn;
|
|
|
|
+
|
|
|
|
+ /* Skip if map is outside the node */
|
|
|
|
+ if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
|
|
|
|
+ *ei_startpfn >= end_pfn)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* Check for overlaps */
|
|
|
|
+ if (*ei_startpfn < start_pfn)
|
|
|
|
+ *ei_startpfn = start_pfn;
|
|
|
|
+ if (*ei_endpfn > end_pfn)
|
|
|
|
+ *ei_endpfn = end_pfn;
|
|
|
|
+
|
|
|
|
+ /* Obey end_user_pfn to save on memmap */
|
|
|
|
+ if (*ei_startpfn >= end_user_pfn)
|
|
|
|
+ return 0;
|
|
|
|
+ if (*ei_endpfn > end_user_pfn)
|
|
|
|
+ *ei_endpfn = end_user_pfn;
|
|
|
|
+
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
/* Walk the e820 map and register active regions within a node */
|
|
|
|
void __init
|
|
|
|
e820_register_active_regions(int nid, unsigned long start_pfn,
|
|
|
|
unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
+ unsigned long ei_startpfn;
|
|
|
|
+ unsigned long ei_endpfn;
|
|
|
|
int i;
|
|
|
|
- unsigned long ei_startpfn, ei_endpfn;
|
|
|
|
- for (i = 0; i < e820.nr_map; i++) {
|
|
|
|
- struct e820entry *ei = &e820.map[i];
|
|
|
|
- ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
|
|
|
|
- ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
|
|
|
|
- >> PAGE_SHIFT;
|
|
|
|
-
|
|
|
|
- /* Skip map entries smaller than a page */
|
|
|
|
- if (ei_startpfn >= ei_endpfn)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- /* Check if end_pfn_map should be updated */
|
|
|
|
- if (ei->type != E820_RAM && ei_endpfn > end_pfn_map)
|
|
|
|
- end_pfn_map = ei_endpfn;
|
|
|
|
-
|
|
|
|
- /* Skip if map is outside the node */
|
|
|
|
- if (ei->type != E820_RAM ||
|
|
|
|
- ei_endpfn <= start_pfn ||
|
|
|
|
- ei_startpfn >= end_pfn)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- /* Check for overlaps */
|
|
|
|
- if (ei_startpfn < start_pfn)
|
|
|
|
- ei_startpfn = start_pfn;
|
|
|
|
- if (ei_endpfn > end_pfn)
|
|
|
|
- ei_endpfn = end_pfn;
|
|
|
|
-
|
|
|
|
- /* Obey end_user_pfn to save on memmap */
|
|
|
|
- if (ei_startpfn >= end_user_pfn)
|
|
|
|
- continue;
|
|
|
|
- if (ei_endpfn > end_user_pfn)
|
|
|
|
- ei_endpfn = end_user_pfn;
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
- if (ei_startpfn >= xen_start_info->nr_pages)
|
|
|
|
- continue;
|
|
|
|
- if (ei_endpfn > xen_start_info->nr_pages)
|
|
|
|
- ei_endpfn = xen_start_info->nr_pages;
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
- add_active_range(nid, ei_startpfn, ei_endpfn);
|
|
|
|
- }
|
|
|
|
+ for (i = 0; i < e820.nr_map; i++)
|
|
|
|
+ if (e820_find_active_region(&e820.map[i],
|
|
|
|
+ start_pfn, end_pfn,
|
|
|
|
+ &ei_startpfn, &ei_endpfn))
|
|
|
|
+ add_active_range(nid, ei_startpfn, ei_endpfn);
|
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
BUG_ON(nid);
|
|
|
|
add_active_range(nid, end_pfn, end_pfn);
|
|
|
|
@@ -399,12 +380,35 @@ void __init add_memory_region(unsigned l
|
|
|
|
e820.nr_map++;
|
|
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Find the hole size (in bytes) in the memory range.
|
|
|
|
+ * @start: starting address of the memory range to scan
|
|
|
|
+ * @end: ending address of the memory range to scan
|
|
|
|
+ */
|
|
|
|
+unsigned long __init e820_hole_size(unsigned long start, unsigned long end)
|
|
|
|
+{
|
|
|
|
+ unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
+ unsigned long end_pfn = end >> PAGE_SHIFT;
|
|
|
|
+ unsigned long ei_startpfn;
|
|
|
|
+ unsigned long ei_endpfn;
|
|
|
|
+ unsigned long ram = 0;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < e820.nr_map; i++) {
|
|
|
|
+ if (e820_find_active_region(&e820.map[i],
|
|
|
|
+ start_pfn, end_pfn,
|
|
|
|
+ &ei_startpfn, &ei_endpfn))
|
|
|
|
+ ram += ei_endpfn - ei_startpfn;
|
|
|
|
+ }
|
|
|
|
+ return end - start - (ram << PAGE_SHIFT);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
void __init e820_print_map(char *who)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < e820.nr_map; i++) {
|
|
|
|
- printk(" %s: %016Lx - %016Lx ", who,
|
|
|
|
+ printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
|
|
|
|
(unsigned long long) e820.map[i].addr,
|
|
|
|
(unsigned long long) (e820.map[i].addr + e820.map[i].size));
|
|
|
|
switch (e820.map[i].type) {
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/early_printk-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -174,6 +174,7 @@ static __init void early_serial_init(cha
|
|
|
|
* mappings. Someone should fix this for domain 0. For now, use fake serial.
|
|
|
|
*/
|
|
|
|
#define early_vga_console early_serial_console
|
|
|
|
+#define xenboot_console early_serial_console
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
@@ -261,20 +262,22 @@ static int __init setup_early_printk(cha
|
|
|
|
} else if (!strncmp(buf, "ttyS", 4)) {
|
|
|
|
early_serial_init(buf);
|
|
|
|
early_console = &early_serial_console;
|
|
|
|
- } else if (!strncmp(buf, "vga", 3)
|
|
|
|
+ } else if (!strncmp(buf, "vga", 3)) {
|
|
|
|
#ifndef CONFIG_XEN
|
|
|
|
&& SCREEN_INFO.orig_video_isVGA == 1) {
|
|
|
|
max_xpos = SCREEN_INFO.orig_video_cols;
|
|
|
|
max_ypos = SCREEN_INFO.orig_video_lines;
|
|
|
|
current_ypos = SCREEN_INFO.orig_y;
|
|
|
|
-#else
|
|
|
|
- || !strncmp(buf, "xen", 3)) {
|
|
|
|
#endif
|
|
|
|
early_console = &early_vga_console;
|
|
|
|
} else if (!strncmp(buf, "simnow", 6)) {
|
|
|
|
simnow_init(buf + 6);
|
|
|
|
early_console = &simnow_console;
|
|
|
|
keep_early = 1;
|
|
|
|
+#ifdef CONFIG_XEN
|
|
|
|
+ } else if (!strncmp(buf, "xen", 3)) {
|
|
|
|
+ early_console = &xenboot_console;
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
if (keep_early)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/entry_64-xen.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -310,7 +310,7 @@ sysret_signal:
|
|
|
|
TRACE_IRQS_ON
|
|
|
|
/* sti */
|
|
|
|
XEN_UNBLOCK_EVENTS(%rsi)
|
|
|
|
- testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
|
|
|
|
+ testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
|
|
|
|
jz 1f
|
|
|
|
|
|
|
|
/* Really a signal */
|
|
|
|
@@ -409,7 +409,7 @@ int_very_careful:
|
|
|
|
jmp int_restore_rest
|
|
|
|
|
|
|
|
int_signal:
|
|
|
|
- testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
|
|
|
|
+ testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
|
|
|
|
jz 1f
|
|
|
|
movq %rsp,%rdi # &ptregs -> arg1
|
|
|
|
xorl %esi,%esi # oldset -> arg2
|
|
|
|
@@ -552,7 +552,7 @@ retint_careful:
|
|
|
|
jmp retint_check
|
|
|
|
|
|
|
|
retint_signal:
|
|
|
|
- testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
|
|
|
|
+ testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx
|
|
|
|
jz retint_restore_args
|
|
|
|
TRACE_IRQS_ON
|
|
|
|
XEN_UNBLOCK_EVENTS(%rsi)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/head_64-xen.S 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/head_64-xen.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -23,7 +23,7 @@
|
|
|
|
#include <asm/dwarf2.h>
|
|
|
|
#include <xen/interface/elfnote.h>
|
|
|
|
|
|
|
|
- .section .bootstrap.text, "ax", @progbits
|
|
|
|
+ .section .text.head, "ax", @progbits
|
|
|
|
.code64
|
|
|
|
.globl startup_64
|
|
|
|
startup_64:
|
|
|
|
@@ -39,7 +39,7 @@ startup_64:
|
|
|
|
|
|
|
|
#define NEXT_PAGE(name) \
|
|
|
|
.balign PAGE_SIZE; \
|
|
|
|
- phys_##name = . - .bootstrap.text; \
|
|
|
|
+ phys_##name = . - .text.head; \
|
|
|
|
ENTRY(name)
|
|
|
|
|
|
|
|
NEXT_PAGE(init_level4_pgt)
|
|
|
|
@@ -63,6 +63,12 @@ NEXT_PAGE(level3_kernel_pgt)
|
|
|
|
NEXT_PAGE(level3_user_pgt)
|
|
|
|
.fill 512,8,0
|
|
|
|
|
|
|
|
+NEXT_PAGE(level2_fixmap_pgt)
|
|
|
|
+ .fill 512,8,0
|
|
|
|
+
|
|
|
|
+NEXT_PAGE(level1_fixmap_pgt)
|
|
|
|
+ .fill 512,8,0
|
|
|
|
+
|
|
|
|
NEXT_PAGE(hypercall_page)
|
|
|
|
CFI_STARTPROC
|
|
|
|
.rept 0x1000 / 0x20
|
|
|
|
@@ -169,18 +175,18 @@ ENTRY(empty_zero_page)
|
|
|
|
.byte 0
|
|
|
|
#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
|
|
|
|
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, __START_KERNEL_map)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad __START_KERNEL_map)
|
|
|
|
#if CONFIG_XEN_COMPAT <= 0x030002
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, __START_KERNEL_map)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad __START_KERNEL_map)
|
|
|
|
#else
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, 0)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad 0)
|
|
|
|
#endif
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64)
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page)
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
|
2011-04-19 20:09:59 +00:00
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel")
|
2010-07-07 11:12:45 +00:00
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
|
|
|
|
- ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad startup_64)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad hypercall_page)
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad _PAGE_PRESENT, _PAGE_PRESENT)
|
2011-04-19 20:09:59 +00:00
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel")
|
2010-07-07 11:12:45 +00:00
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
|
|
|
|
+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/head64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/head64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -90,7 +90,7 @@ void __init x86_64_start_kernel(char * r
|
|
|
|
unsigned long machine_to_phys_nr_ents;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
- setup_xen_features();
|
|
|
|
+ xen_setup_features();
|
|
|
|
|
|
|
|
xen_start_info = (struct start_info *)real_mode_data;
|
|
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap))
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/io_apic_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -165,7 +165,9 @@ static inline void io_apic_write(unsigne
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
-#ifndef CONFIG_XEN
|
|
|
|
+#ifdef CONFIG_XEN
|
|
|
|
+#define io_apic_modify io_apic_write
|
|
|
|
+#else
|
|
|
|
/*
|
|
|
|
* Re-write a value: to be used for read-modify-write
|
|
|
|
* cycles where the read already set up the index register.
|
|
|
|
@@ -175,8 +177,32 @@ static inline void io_apic_modify(unsign
|
|
|
|
struct io_apic __iomem *io_apic = io_apic_base(apic);
|
|
|
|
writel(value, &io_apic->data);
|
|
|
|
}
|
|
|
|
-#else
|
|
|
|
-#define io_apic_modify io_apic_write
|
|
|
|
+
|
|
|
|
+static int io_apic_level_ack_pending(unsigned int irq)
|
|
|
|
+{
|
|
|
|
+ struct irq_pin_list *entry;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ int pending = 0;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ioapic_lock, flags);
|
|
|
|
+ entry = irq_2_pin + irq;
|
|
|
|
+ for (;;) {
|
|
|
|
+ unsigned int reg;
|
|
|
|
+ int pin;
|
|
|
|
+
|
|
|
|
+ pin = entry->pin;
|
|
|
|
+ if (pin == -1)
|
|
|
|
+ break;
|
|
|
|
+ reg = io_apic_read(entry->apic, 0x10 + pin*2);
|
|
|
|
+ /* Is the remote IRR bit set? */
|
|
|
|
+ pending |= (reg >> 14) & 1;
|
|
|
|
+ if (!entry->next)
|
|
|
|
+ break;
|
|
|
|
+ entry = irq_2_pin + entry->next;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&ioapic_lock, flags);
|
|
|
|
+ return pending;
|
|
|
|
+}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -406,14 +432,12 @@ static void clear_IO_APIC (void)
|
|
|
|
int skip_ioapic_setup;
|
|
|
|
int ioapic_force;
|
|
|
|
|
|
|
|
-/* dummy parsing: see setup.c */
|
|
|
|
-
|
|
|
|
-static int __init disable_ioapic_setup(char *str)
|
|
|
|
+static int __init parse_noapic(char *str)
|
|
|
|
{
|
|
|
|
- skip_ioapic_setup = 1;
|
|
|
|
+ disable_ioapic_setup();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
-early_param("noapic", disable_ioapic_setup);
|
|
|
|
+early_param("noapic", parse_noapic);
|
|
|
|
|
|
|
|
/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
|
|
|
|
static int __init disable_timer_pin_setup(char *arg)
|
|
|
|
@@ -765,12 +789,15 @@ static struct irq_chip ioapic_chip;
|
|
|
|
|
|
|
|
static void ioapic_register_intr(int irq, unsigned long trigger)
|
|
|
|
{
|
|
|
|
- if (trigger)
|
|
|
|
+ if (trigger) {
|
|
|
|
+ irq_desc[irq].status |= IRQ_LEVEL;
|
|
|
|
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
|
|
|
handle_fasteoi_irq, "fasteoi");
|
|
|
|
- else
|
|
|
|
+ } else {
|
|
|
|
+ irq_desc[irq].status &= ~IRQ_LEVEL;
|
|
|
|
set_irq_chip_and_handler_name(irq, &ioapic_chip,
|
|
|
|
handle_edge_irq, "edge");
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define ioapic_register_intr(irq, trigger) evtchn_register_pirq(irq)
|
|
|
|
@@ -1419,9 +1446,37 @@ static void ack_apic_level(unsigned int
|
|
|
|
ack_APIC_irq();
|
|
|
|
|
|
|
|
/* Now we can move and renable the irq */
|
|
|
|
- move_masked_irq(irq);
|
|
|
|
- if (unlikely(do_unmask_irq))
|
|
|
|
+ if (unlikely(do_unmask_irq)) {
|
|
|
|
+ /* Only migrate the irq if the ack has been received.
|
|
|
|
+ *
|
|
|
|
+ * On rare occasions the broadcast level triggered ack gets
|
|
|
|
+ * delayed going to ioapics, and if we reprogram the
|
|
|
|
+ * vector while Remote IRR is still set the irq will never
|
|
|
|
+ * fire again.
|
|
|
|
+ *
|
|
|
|
+ * To prevent this scenario we read the Remote IRR bit
|
|
|
|
+ * of the ioapic. This has two effects.
|
|
|
|
+ * - On any sane system the read of the ioapic will
|
|
|
|
+ * flush writes (and acks) going to the ioapic from
|
|
|
|
+ * this cpu.
|
|
|
|
+ * - We get to see if the ACK has actually been delivered.
|
|
|
|
+ *
|
|
|
|
+ * Based on failed experiments of reprogramming the
|
|
|
|
+ * ioapic entry from outside of irq context starting
|
|
|
|
+ * with masking the ioapic entry and then polling until
|
|
|
|
+ * Remote IRR was clear before reprogramming the
|
|
|
|
+ * ioapic I don't trust the Remote IRR bit to be
|
|
|
|
+ * completey accurate.
|
|
|
|
+ *
|
|
|
|
+ * However there appears to be no other way to plug
|
|
|
|
+ * this race, so if the Remote IRR bit is not
|
|
|
|
+ * accurate and is causing problems then it is a hardware bug
|
|
|
|
+ * and you can go talk to the chipset vendor about it.
|
|
|
|
+ */
|
|
|
|
+ if (!io_apic_level_ack_pending(irq))
|
|
|
|
+ move_masked_irq(irq);
|
|
|
|
unmask_IO_APIC_irq(irq);
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_chip ioapic_chip __read_mostly = {
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/ldt_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -114,6 +114,8 @@ int init_new_context(struct task_struct
|
|
|
|
memset(&mm->context, 0, sizeof(mm->context));
|
|
|
|
init_MUTEX(&mm->context.sem);
|
|
|
|
old_mm = current->mm;
|
|
|
|
+ if (old_mm)
|
|
|
|
+ mm->context.vdso = old_mm->context.vdso;
|
|
|
|
if (old_mm && old_mm->context.size > 0) {
|
|
|
|
down(&old_mm->context.sem);
|
|
|
|
retval = copy_ldt(&mm->context, &old_mm->context);
|
|
|
|
@@ -146,7 +148,7 @@ void destroy_context(struct mm_struct *m
|
|
|
|
kfree(mm->context.ldt);
|
|
|
|
mm->context.size = 0;
|
|
|
|
}
|
|
|
|
- if (!mm->context.pinned) {
|
|
|
|
+ if (!PagePinned(virt_to_page(mm->pgd))) {
|
|
|
|
spin_lock(&mm_unpinned_lock);
|
|
|
|
list_del(&mm->context.unpinned);
|
|
|
|
spin_unlock(&mm_unpinned_lock);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/mpparse_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -32,7 +32,6 @@
|
|
|
|
|
|
|
|
/* Have we found an MP table */
|
|
|
|
int smp_found_config;
|
|
|
|
-unsigned int __initdata maxcpus = NR_CPUS;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Various Linux-internal data structures created from the
|
|
|
|
@@ -657,6 +656,20 @@ static int mp_find_ioapic(int gsi)
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static u8 uniq_ioapic_id(u8 id)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ DECLARE_BITMAP(used, 256);
|
|
|
|
+ bitmap_zero(used, 256);
|
|
|
|
+ for (i = 0; i < nr_ioapics; i++) {
|
|
|
|
+ struct mpc_config_ioapic *ia = &mp_ioapics[i];
|
|
|
|
+ __set_bit(ia->mpc_apicid, used);
|
|
|
|
+ }
|
|
|
|
+ if (!test_bit(id, used))
|
|
|
|
+ return id;
|
|
|
|
+ return find_first_zero_bit(used, 256);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base)
|
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
@@ -664,7 +677,7 @@ void __init mp_register_ioapic(u8 id, u3
|
|
|
|
if (bad_ioapic(address))
|
|
|
|
return;
|
|
|
|
|
|
|
|
- idx = nr_ioapics++;
|
|
|
|
+ idx = nr_ioapics;
|
|
|
|
|
|
|
|
mp_ioapics[idx].mpc_type = MP_IOAPIC;
|
|
|
|
mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
|
|
|
|
@@ -673,7 +686,7 @@ void __init mp_register_ioapic(u8 id, u3
|
|
|
|
#ifndef CONFIG_XEN
|
|
|
|
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
|
|
|
|
#endif
|
|
|
|
- mp_ioapics[idx].mpc_apicid = id;
|
|
|
|
+ mp_ioapics[idx].mpc_apicid = uniq_ioapic_id(id);
|
|
|
|
mp_ioapics[idx].mpc_apicver = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -690,6 +703,8 @@ void __init mp_register_ioapic(u8 id, u3
|
|
|
|
mp_ioapics[idx].mpc_apicaddr,
|
|
|
|
mp_ioapic_routing[idx].gsi_start,
|
|
|
|
mp_ioapic_routing[idx].gsi_end);
|
|
|
|
+
|
|
|
|
+ nr_ioapics++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:39.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/process_64-xen.c 2011-02-02 08:31:50.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -26,6 +26,7 @@
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
+#include <linux/fs.h>
|
|
|
|
#include <linux/elfcore.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/slab.h>
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -248,6 +249,7 @@ early_param("idle", idle_setup);
|
2010-07-07 11:12:45 +00:00
|
|
|
void __show_regs(struct pt_regs * regs)
|
|
|
|
{
|
|
|
|
unsigned long fs, gs, shadowgs;
|
|
|
|
+ unsigned long d0, d1, d2, d3, d6, d7;
|
|
|
|
unsigned int fsindex,gsindex;
|
|
|
|
unsigned int ds,cs,es;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -287,6 +289,14 @@ void __show_regs(struct pt_regs * regs)
|
2010-07-07 11:12:45 +00:00
|
|
|
fs,fsindex,gs,gsindex,shadowgs);
|
|
|
|
printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
|
|
|
|
|
|
|
|
+ get_debugreg(d0, 0);
|
|
|
|
+ get_debugreg(d1, 1);
|
|
|
|
+ get_debugreg(d2, 2);
|
|
|
|
+ printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
|
|
|
|
+ get_debugreg(d3, 3);
|
|
|
|
+ get_debugreg(d6, 6);
|
|
|
|
+ get_debugreg(d7, 7);
|
|
|
|
+ printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
|
|
|
|
}
|
|
|
|
|
|
|
|
void show_regs(struct pt_regs *regs)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/setup_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -828,6 +828,8 @@ static void __cpuinit init_amd(struct cp
|
|
|
|
level = cpuid_eax(1);
|
|
|
|
if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
|
|
|
|
set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
|
|
|
|
+ if (c->x86 == 0x10)
|
|
|
|
+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
|
|
|
|
|
|
|
|
/* Enable workaround for FXSAVE leak */
|
|
|
|
if (c->x86 >= 6)
|
|
|
|
@@ -853,8 +855,14 @@ static void __cpuinit init_amd(struct cp
|
|
|
|
if (c->extended_cpuid_level >= 0x80000008)
|
|
|
|
amd_detect_cmp(c);
|
|
|
|
|
|
|
|
- /* Fix cpuid4 emulation for more */
|
|
|
|
- num_cache_leaves = 3;
|
|
|
|
+ if (c->extended_cpuid_level >= 0x80000006 &&
|
|
|
|
+ (cpuid_edx(0x80000006) & 0xf000))
|
|
|
|
+ num_cache_leaves = 4;
|
|
|
|
+ else
|
|
|
|
+ num_cache_leaves = 3;
|
|
|
|
+
|
|
|
|
+ if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x11)
|
|
|
|
+ set_bit(X86_FEATURE_K8, &c->x86_capability);
|
|
|
|
|
|
|
|
/* RDTSC can be speculated around */
|
|
|
|
clear_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
|
|
|
|
@@ -1099,6 +1107,8 @@ void __cpuinit identify_cpu(struct cpuin
|
|
|
|
c->x86_capability[2] = cpuid_edx(0x80860001);
|
|
|
|
}
|
|
|
|
|
|
|
|
+ init_scattered_cpuid_features(c);
|
|
|
|
+
|
|
|
|
#ifndef CONFIG_XEN
|
|
|
|
c->apicid = phys_pkg_id(0);
|
|
|
|
#endif
|
|
|
|
@@ -1186,7 +1196,7 @@ static int show_cpuinfo(struct seq_file
|
|
|
|
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
|
|
|
|
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
|
|
|
|
"pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
|
|
|
|
- "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
|
|
|
|
+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
|
|
|
|
|
|
|
|
/* AMD-defined */
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
@@ -1202,10 +1212,11 @@ static int show_cpuinfo(struct seq_file
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
|
|
|
|
/* Other (Linux-defined) */
|
|
|
|
- "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
|
|
|
|
- "constant_tsc", NULL, NULL,
|
|
|
|
- "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
+ "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
|
|
|
|
+ NULL, NULL, NULL, NULL,
|
|
|
|
+ "constant_tsc", "up", NULL, "arch_perfmon",
|
|
|
|
+ "pebs", "bts", NULL, "sync_rdtsc",
|
|
|
|
+ "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
|
|
|
|
/* Intel-defined (#2) */
|
|
|
|
@@ -1216,7 +1227,7 @@ static int show_cpuinfo(struct seq_file
|
|
|
|
|
|
|
|
/* VIA/Cyrix/Centaur-defined */
|
|
|
|
NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
|
|
|
|
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
+ "ace2", "ace2_en", "phe", "phe_en", "pmm", "pmm_en", NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
|
|
|
|
@@ -1227,6 +1238,12 @@ static int show_cpuinfo(struct seq_file
|
|
|
|
"osvw", "ibs", NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
+
|
|
|
|
+ /* Auxiliary (Linux-defined) */
|
|
|
|
+ "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
|
|
|
|
};
|
|
|
|
static char *x86_power_flags[] = {
|
|
|
|
"ts", /* temperature sensor */
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/setup64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/setup64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -125,11 +125,14 @@ void __init setup_per_cpu_areas(void)
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_XEN
|
|
|
|
-static void switch_pt(void)
|
|
|
|
+static void __init_refok switch_pt(int cpu)
|
|
|
|
{
|
|
|
|
+ if (cpu == 0)
|
|
|
|
+ xen_init_pt();
|
|
|
|
xen_pt_switch(__pa_symbol(init_level4_pgt));
|
|
|
|
xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
|
|
|
|
}
|
|
|
|
+#define switch_pt() switch_pt(cpu)
|
|
|
|
|
|
|
|
static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
|
|
|
|
{
|
|
|
|
@@ -185,9 +188,6 @@ void pda_init(int cpu)
|
|
|
|
pda->mmu_state = 0;
|
|
|
|
|
|
|
|
if (cpu == 0) {
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
- xen_init_pt();
|
|
|
|
-#endif
|
|
|
|
/* others are initialized in smpboot.c */
|
|
|
|
pda->pcurrent = &init_task;
|
|
|
|
pda->irqstackptr = boot_cpu_stack;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/smp_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -362,7 +362,7 @@ __smp_call_function_single(int cpu, void
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
- * smp_call_function_single - Run a function on another CPU
|
|
|
|
+ * smp_call_function_single - Run a function on a specific CPU
|
|
|
|
* @func: The function to run. This must be fast and non-blocking.
|
|
|
|
* @info: An arbitrary pointer to pass to the function.
|
|
|
|
* @nonatomic: Currently unused.
|
|
|
|
@@ -379,17 +379,21 @@ int smp_call_function_single (int cpu, v
|
|
|
|
{
|
|
|
|
/* prevent preemption and reschedule on another processor */
|
|
|
|
int me = get_cpu();
|
|
|
|
+
|
|
|
|
+ /* Can deadlock when called with interrupts disabled */
|
|
|
|
+ WARN_ON(irqs_disabled());
|
|
|
|
+
|
|
|
|
if (cpu == me) {
|
|
|
|
+ local_irq_disable();
|
|
|
|
+ func(info);
|
|
|
|
+ local_irq_enable();
|
|
|
|
put_cpu();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
- /* Can deadlock when called with interrupts disabled */
|
|
|
|
- WARN_ON(irqs_disabled());
|
|
|
|
-
|
|
|
|
- spin_lock_bh(&call_lock);
|
|
|
|
+ spin_lock(&call_lock);
|
|
|
|
__smp_call_function_single(cpu, func, info, nonatomic, wait);
|
|
|
|
- spin_unlock_bh(&call_lock);
|
|
|
|
+ spin_unlock(&call_lock);
|
|
|
|
put_cpu();
|
|
|
|
return 0;
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/traps_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -34,6 +34,10 @@
|
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/kdebug.h>
|
|
|
|
|
|
|
|
+#if defined(CONFIG_EDAC)
|
|
|
|
+#include <linux/edac.h>
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/atomic.h>
|
|
|
|
@@ -332,6 +336,7 @@ static int print_trace_stack(void *data,
|
|
|
|
|
|
|
|
static void print_trace_address(void *data, unsigned long addr)
|
|
|
|
{
|
|
|
|
+ touch_nmi_watchdog();
|
|
|
|
printk_address(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -520,6 +525,7 @@ void __kprobes __die(const char * str, s
|
|
|
|
printk("\n");
|
|
|
|
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
|
|
|
|
show_registers(regs);
|
|
|
|
+ add_taint(TAINT_DIE);
|
|
|
|
/* Executive summary in case the oops scrolled away */
|
|
|
|
printk(KERN_ALERT "RIP ");
|
|
|
|
printk_address(regs->rip);
|
|
|
|
@@ -533,7 +539,7 @@ void die(const char * str, struct pt_reg
|
|
|
|
unsigned long flags = oops_begin();
|
|
|
|
|
|
|
|
if (!user_mode(regs))
|
|
|
|
- report_bug(regs->rip);
|
|
|
|
+ report_bug(regs->rip, regs);
|
|
|
|
|
|
|
|
__die(str, regs, err);
|
|
|
|
oops_end(flags);
|
|
|
|
@@ -582,7 +588,8 @@ static void __kprobes do_trap(int trapnr
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = trapnr;
|
|
|
|
|
|
|
|
- if (exception_trace && unhandled_signal(tsk, signr))
|
|
|
|
+ if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
|
|
|
|
+ printk_ratelimit())
|
|
|
|
printk(KERN_INFO
|
|
|
|
"%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
|
|
|
|
tsk->comm, tsk->pid, str,
|
|
|
|
@@ -686,7 +693,8 @@ asmlinkage void __kprobes do_general_pro
|
|
|
|
tsk->thread.error_code = error_code;
|
|
|
|
tsk->thread.trap_no = 13;
|
|
|
|
|
|
|
|
- if (exception_trace && unhandled_signal(tsk, SIGSEGV))
|
|
|
|
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
|
|
|
|
+ printk_ratelimit())
|
|
|
|
printk(KERN_INFO
|
|
|
|
"%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
|
|
|
|
tsk->comm, tsk->pid,
|
|
|
|
@@ -721,6 +729,13 @@ mem_parity_error(unsigned char reason, s
|
|
|
|
reason);
|
|
|
|
printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
|
|
|
|
|
|
|
|
+#if defined(CONFIG_EDAC)
|
|
|
|
+ if(edac_handler_set()) {
|
|
|
|
+ edac_atomic_assert_error();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
if (panic_on_unrecovered_nmi)
|
|
|
|
panic("NMI: Not continuing");
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/kernel/vsyscall_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -42,6 +42,7 @@
|
|
|
|
#include <asm/segment.h>
|
|
|
|
#include <asm/desc.h>
|
|
|
|
#include <asm/topology.h>
|
|
|
|
+#include <asm/vgtod.h>
|
|
|
|
|
|
|
|
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
|
|
|
|
#define __syscall_clobber "r11","rcx","memory"
|
|
|
|
@@ -57,26 +58,9 @@
|
|
|
|
* - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
|
|
|
|
* Try to keep this structure as small as possible to avoid cache line ping pongs
|
|
|
|
*/
|
|
|
|
-struct vsyscall_gtod_data_t {
|
|
|
|
- seqlock_t lock;
|
|
|
|
-
|
|
|
|
- /* open coded 'struct timespec' */
|
|
|
|
- time_t wall_time_sec;
|
|
|
|
- u32 wall_time_nsec;
|
|
|
|
-
|
|
|
|
- int sysctl_enabled;
|
|
|
|
- struct timezone sys_tz;
|
|
|
|
- struct { /* extract of a clocksource struct */
|
|
|
|
- cycle_t (*vread)(void);
|
|
|
|
- cycle_t cycle_last;
|
|
|
|
- cycle_t mask;
|
|
|
|
- u32 mult;
|
|
|
|
- u32 shift;
|
|
|
|
- } clock;
|
|
|
|
-};
|
|
|
|
int __vgetcpu_mode __section_vgetcpu_mode;
|
|
|
|
|
|
|
|
-struct vsyscall_gtod_data_t __vsyscall_gtod_data __section_vsyscall_gtod_data =
|
|
|
|
+struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
|
|
|
|
{
|
|
|
|
.lock = SEQLOCK_UNLOCKED,
|
|
|
|
.sysctl_enabled = 1,
|
|
|
|
@@ -96,6 +80,8 @@ void update_vsyscall(struct timespec *wa
|
|
|
|
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
|
|
|
|
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
|
|
|
|
vsyscall_gtod_data.sys_tz = sys_tz;
|
|
|
|
+ vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
|
|
|
|
+ vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
|
|
|
|
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/fault_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/fault_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -159,7 +159,9 @@ void dump_pagetable(unsigned long addres
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
- pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
|
|
|
|
+ pgd = (pgd_t *)read_cr3();
|
|
|
|
+
|
|
|
|
+ pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
|
|
|
|
pgd += pgd_index(address);
|
|
|
|
if (bad_address(pgd)) goto bad;
|
|
|
|
printk("PGD %lx ", pgd_val(*pgd));
|
|
|
|
@@ -219,16 +221,6 @@ static int is_errata93(struct pt_regs *r
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-int unhandled_signal(struct task_struct *tsk, int sig)
|
|
|
|
-{
|
|
|
|
- if (is_init(tsk))
|
|
|
|
- return 1;
|
|
|
|
- if (tsk->ptrace & PT_PTRACED)
|
|
|
|
- return 0;
|
|
|
|
- return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
|
|
|
|
- (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
|
|
|
|
unsigned long error_code)
|
|
|
|
{
|
|
|
|
@@ -302,8 +294,8 @@ static int vmalloc_fault(unsigned long a
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-int page_fault_trace = 0;
|
|
|
|
-int exception_trace = 1;
|
|
|
|
+static int page_fault_trace;
|
|
|
|
+int show_unhandled_signals = 1;
|
|
|
|
|
|
|
|
|
|
|
|
#define MEM_VERBOSE 1
|
|
|
|
@@ -372,7 +364,7 @@ asmlinkage void __kprobes do_page_fault(
|
|
|
|
struct vm_area_struct * vma;
|
|
|
|
unsigned long address;
|
|
|
|
const struct exception_table_entry *fixup;
|
|
|
|
- int write;
|
|
|
|
+ int write, fault;
|
|
|
|
unsigned long flags;
|
|
|
|
siginfo_t info;
|
|
|
|
|
|
|
|
@@ -384,7 +376,7 @@ asmlinkage void __kprobes do_page_fault(
|
|
|
|
prefetchw(&mm->mmap_sem);
|
|
|
|
|
|
|
|
/* get the address */
|
|
|
|
- address = current_vcpu_info()->arch.cr2;
|
|
|
|
+ address = read_cr2();
|
|
|
|
|
|
|
|
info.si_code = SEGV_MAPERR;
|
|
|
|
|
|
|
|
@@ -445,6 +437,13 @@ asmlinkage void __kprobes do_page_fault(
|
|
|
|
if (unlikely(in_atomic() || !mm))
|
|
|
|
goto bad_area_nosemaphore;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * User-mode registers count as a user access even for any
|
|
|
|
+ * potential system fault or CPU buglet.
|
|
|
|
+ */
|
|
|
|
+ if (user_mode_vm(regs))
|
|
|
|
+ error_code |= PF_USER;
|
|
|
|
+
|
|
|
|
again:
|
|
|
|
/* When running in the kernel we expect faults to occur only to
|
|
|
|
* addresses in user space. All other faults represent errors in the
|
|
|
|
@@ -511,19 +510,18 @@ good_area:
|
|
|
|
* make sure we exit gracefully rather than endlessly redo
|
|
|
|
* the fault.
|
|
|
|
*/
|
|
|
|
- switch (handle_mm_fault(mm, vma, address, write)) {
|
|
|
|
- case VM_FAULT_MINOR:
|
|
|
|
- tsk->min_flt++;
|
|
|
|
- break;
|
|
|
|
- case VM_FAULT_MAJOR:
|
|
|
|
- tsk->maj_flt++;
|
|
|
|
- break;
|
|
|
|
- case VM_FAULT_SIGBUS:
|
|
|
|
- goto do_sigbus;
|
|
|
|
- default:
|
|
|
|
- goto out_of_memory;
|
|
|
|
+ fault = handle_mm_fault(mm, vma, address, write);
|
|
|
|
+ if (unlikely(fault & VM_FAULT_ERROR)) {
|
|
|
|
+ if (fault & VM_FAULT_OOM)
|
|
|
|
+ goto out_of_memory;
|
|
|
|
+ else if (fault & VM_FAULT_SIGBUS)
|
|
|
|
+ goto do_sigbus;
|
|
|
|
+ BUG();
|
|
|
|
}
|
|
|
|
-
|
|
|
|
+ if (fault & VM_FAULT_MAJOR)
|
|
|
|
+ tsk->maj_flt++;
|
|
|
|
+ else
|
|
|
|
+ tsk->min_flt++;
|
|
|
|
up_read(&mm->mmap_sem);
|
|
|
|
return;
|
|
|
|
|
|
|
|
@@ -556,7 +554,8 @@ bad_area_nosemaphore:
|
|
|
|
(address >> 32))
|
|
|
|
return;
|
|
|
|
|
|
|
|
- if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
|
|
|
|
+ if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
|
|
|
|
+ printk_ratelimit()) {
|
|
|
|
printk(
|
|
|
|
"%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
|
|
|
|
tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
|
|
|
|
@@ -630,7 +629,7 @@ out_of_memory:
|
|
|
|
}
|
|
|
|
printk("VM: killing process %s\n", tsk->comm);
|
|
|
|
if (error_code & 4)
|
|
|
|
- do_exit(SIGKILL);
|
|
|
|
+ do_group_exit(SIGKILL);
|
|
|
|
goto no_context;
|
|
|
|
|
|
|
|
do_sigbus:
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -66,6 +66,9 @@ int after_bootmem;
|
|
|
|
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
|
|
|
extern unsigned long start_pfn;
|
|
|
|
|
|
|
|
+extern pmd_t level2_fixmap_pgt[PTRS_PER_PMD];
|
|
|
|
+extern pte_t level1_fixmap_pgt[PTRS_PER_PTE];
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* Use this until direct mapping is established, i.e. before __va() is
|
|
|
|
* available in init_memory_mapping().
|
|
|
|
@@ -362,6 +365,10 @@ __set_fixmap (enum fixed_addresses idx,
|
|
|
|
set_pte_phys(address, phys, prot, 0);
|
|
|
|
set_pte_phys(address, phys, prot, 1);
|
|
|
|
break;
|
|
|
|
+ case FIX_EARLYCON_MEM_BASE:
|
|
|
|
+ xen_l1_entry_update(level1_fixmap_pgt + pte_index(address),
|
|
|
|
+ pfn_pte_ma(phys >> PAGE_SHIFT, prot));
|
|
|
|
+ break;
|
|
|
|
default:
|
|
|
|
set_pte_phys_ma(address, phys, prot);
|
|
|
|
break;
|
|
|
|
@@ -595,6 +602,19 @@ void __init xen_init_pt(void)
|
|
|
|
__user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
|
|
|
|
__pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
|
|
|
|
|
|
|
|
+ /* Do an early initialization of the fixmap area. */
|
|
|
|
+ addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE);
|
|
|
|
+ if (pud_present(level3_kernel_pgt[pud_index(addr)])) {
|
|
|
|
+ unsigned long adr = page[pud_index(addr)];
|
|
|
|
+
|
|
|
|
+ addr_to_page(adr, page);
|
2011-04-19 20:09:59 +00:00
|
|
|
+ copy_page(level2_fixmap_pgt, page);
|
2010-07-07 11:12:45 +00:00
|
|
|
+ }
|
|
|
|
+ level3_kernel_pgt[pud_index(addr)] =
|
|
|
|
+ __pud(__pa_symbol(level2_fixmap_pgt) | _PAGE_TABLE);
|
|
|
|
+ level2_fixmap_pgt[pmd_index(addr)] =
|
|
|
|
+ __pmd(__pa_symbol(level1_fixmap_pgt) | _PAGE_TABLE);
|
|
|
|
+
|
|
|
|
early_make_page_readonly(init_level4_pgt,
|
|
|
|
XENFEAT_writable_page_tables);
|
|
|
|
early_make_page_readonly(__user_pgd(init_level4_pgt),
|
|
|
|
@@ -603,6 +623,10 @@ void __init xen_init_pt(void)
|
|
|
|
XENFEAT_writable_page_tables);
|
|
|
|
early_make_page_readonly(level3_user_pgt,
|
|
|
|
XENFEAT_writable_page_tables);
|
|
|
|
+ early_make_page_readonly(level2_fixmap_pgt,
|
|
|
|
+ XENFEAT_writable_page_tables);
|
|
|
|
+ early_make_page_readonly(level1_fixmap_pgt,
|
|
|
|
+ XENFEAT_writable_page_tables);
|
|
|
|
|
|
|
|
if (!xen_feature(XENFEAT_writable_page_tables)) {
|
|
|
|
xen_pgd_pin(__pa_symbol(init_level4_pgt));
|
|
|
|
@@ -832,7 +856,7 @@ void __init paging_init(void)
|
|
|
|
sparse_init();
|
|
|
|
free_area_init_nodes(max_zone_pfns);
|
|
|
|
|
|
|
|
- init_mm.context.pinned = 1;
|
|
|
|
+ SetPagePinned(virt_to_page(init_mm.pgd));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
@@ -1155,41 +1179,6 @@ int kern_addr_valid(unsigned long addr)
|
|
|
|
return pfn_valid(pte_pfn(*pte));
|
|
|
|
}
|
|
|
|
|
|
|
|
-#ifdef CONFIG_SYSCTL
|
|
|
|
-#include <linux/sysctl.h>
|
|
|
|
-
|
|
|
|
-extern int exception_trace, page_fault_trace;
|
|
|
|
-
|
|
|
|
-static ctl_table debug_table2[] = {
|
|
|
|
- {
|
|
|
|
- .ctl_name = 99,
|
|
|
|
- .procname = "exception-trace",
|
|
|
|
- .data = &exception_trace,
|
|
|
|
- .maxlen = sizeof(int),
|
|
|
|
- .mode = 0644,
|
|
|
|
- .proc_handler = proc_dointvec
|
|
|
|
- },
|
|
|
|
- {}
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static ctl_table debug_root_table2[] = {
|
|
|
|
- {
|
|
|
|
- .ctl_name = CTL_DEBUG,
|
|
|
|
- .procname = "debug",
|
|
|
|
- .mode = 0555,
|
|
|
|
- .child = debug_table2
|
|
|
|
- },
|
|
|
|
- {}
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static __init int x8664_sysctl_init(void)
|
|
|
|
-{
|
|
|
|
- register_sysctl_table(debug_root_table2);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-__initcall(x8664_sysctl_init);
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
/* A pseudo VMA to allow ptrace access for the vsyscall page. This only
|
|
|
|
covers the 64bit vsyscall page now. 32bit has a real VMA now and does
|
|
|
|
not need special handling anymore. */
|
|
|
|
@@ -1228,9 +1217,18 @@ int in_gate_area_no_task(unsigned long a
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef CONFIG_XEN
|
|
|
|
-void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
|
|
|
|
+void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
|
|
|
|
{
|
|
|
|
return __alloc_bootmem_core(pgdat->bdata, size,
|
|
|
|
SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
+
|
|
|
|
+const char *arch_vma_name(struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
|
|
|
|
+ return "[vdso]";
|
|
|
|
+ if (vma == &gate_vma)
|
|
|
|
+ return "[vsyscall]";
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/mm/pageattr_64-xen.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -171,7 +171,7 @@ void mm_pin(struct mm_struct *mm)
|
|
|
|
mm_walk(mm, PAGE_KERNEL_RO);
|
|
|
|
xen_pgd_pin(__pa(mm->pgd)); /* kernel */
|
|
|
|
xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
|
|
|
|
- mm->context.pinned = 1;
|
|
|
|
+ SetPagePinned(virt_to_page(mm->pgd));
|
|
|
|
spin_lock(&mm_unpinned_lock);
|
|
|
|
list_del(&mm->context.unpinned);
|
|
|
|
spin_unlock(&mm_unpinned_lock);
|
|
|
|
@@ -189,7 +189,7 @@ void mm_unpin(struct mm_struct *mm)
|
|
|
|
xen_pgd_unpin(__pa(mm->pgd));
|
|
|
|
xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
|
|
|
|
mm_walk(mm, PAGE_KERNEL);
|
|
|
|
- mm->context.pinned = 0;
|
|
|
|
+ ClearPagePinned(virt_to_page(mm->pgd));
|
|
|
|
spin_lock(&mm_unpinned_lock);
|
|
|
|
list_add(&mm->context.unpinned, &mm_unpinned);
|
|
|
|
spin_unlock(&mm_unpinned_lock);
|
|
|
|
@@ -217,7 +217,7 @@ void mm_pin_all(void)
|
|
|
|
|
|
|
|
void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
- if (!mm->context.pinned)
|
|
|
|
+ if (!PagePinned(virt_to_page(mm->pgd)))
|
|
|
|
mm_pin(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -243,8 +243,9 @@ void arch_exit_mmap(struct mm_struct *mm
|
|
|
|
|
|
|
|
task_unlock(tsk);
|
|
|
|
|
|
|
|
- if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
|
|
|
|
- !mm->context.has_foreign_mappings )
|
|
|
|
+ if (PagePinned(virt_to_page(mm->pgd))
|
|
|
|
+ && (atomic_read(&mm->mm_count) == 1)
|
|
|
|
+ && !mm->context.has_foreign_mappings)
|
|
|
|
mm_unpin(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -343,14 +344,13 @@ static void flush_kernel_map(void *arg)
|
|
|
|
struct page *pg;
|
|
|
|
|
|
|
|
/* When clflush is available always use it because it is
|
|
|
|
- much cheaper than WBINVD. Disable clflush for now because
|
|
|
|
- the high level code is not ready yet */
|
|
|
|
+ much cheaper than WBINVD. */
|
|
|
|
+ /* clflush is still broken. Disable for now. */
|
|
|
|
if (1 || !cpu_has_clflush)
|
|
|
|
asm volatile("wbinvd" ::: "memory");
|
|
|
|
else list_for_each_entry(pg, l, lru) {
|
|
|
|
void *adr = page_address(pg);
|
|
|
|
- if (cpu_has_clflush)
|
|
|
|
- cache_flush_page(adr);
|
|
|
|
+ cache_flush_page(adr);
|
|
|
|
}
|
|
|
|
__flush_tlb_all();
|
|
|
|
}
|
|
|
|
@@ -364,7 +364,8 @@ static LIST_HEAD(deferred_pages); /* pro
|
|
|
|
|
|
|
|
static inline void save_page(struct page *fpage)
|
|
|
|
{
|
|
|
|
- list_add(&fpage->lru, &deferred_pages);
|
|
|
|
+ if (!test_and_set_bit(PG_arch_1, &fpage->flags))
|
|
|
|
+ list_add(&fpage->lru, &deferred_pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -398,9 +399,12 @@ __change_page_attr(unsigned long address
|
|
|
|
pte_t *kpte;
|
|
|
|
struct page *kpte_page;
|
|
|
|
pgprot_t ref_prot2;
|
|
|
|
+
|
|
|
|
kpte = lookup_address(address);
|
|
|
|
if (!kpte) return 0;
|
|
|
|
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
|
|
|
|
+ BUG_ON(PageLRU(kpte_page));
|
|
|
|
+ BUG_ON(PageCompound(kpte_page));
|
|
|
|
if (pgprot_val(prot) != pgprot_val(ref_prot)) {
|
|
|
|
if (!pte_huge(*kpte)) {
|
|
|
|
set_pte(kpte, pfn_pte(pfn, prot));
|
|
|
|
@@ -439,10 +443,9 @@ __change_page_attr(unsigned long address
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
- if (page_private(kpte_page) == 0) {
|
|
|
|
- save_page(kpte_page);
|
|
|
|
+ save_page(kpte_page);
|
|
|
|
+ if (page_private(kpte_page) == 0)
|
|
|
|
revert_page(address, ref_prot);
|
|
|
|
- }
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -514,6 +517,10 @@ void global_flush_tlb(void)
|
|
|
|
flush_map(&l);
|
|
|
|
|
|
|
|
list_for_each_entry_safe(pg, next, &l, lru) {
|
|
|
|
+ list_del(&pg->lru);
|
|
|
|
+ clear_bit(PG_arch_1, &pg->flags);
|
|
|
|
+ if (page_private(pg) != 0)
|
|
|
|
+ continue;
|
|
|
|
ClearPagePrivate(pg);
|
|
|
|
__free_page(pg);
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/oprofile/xenoprof.c 2008-01-28 12:24:19.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/oprofile/xenoprof.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -18,9 +18,9 @@
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/oprofile.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
+#include <linux/vmalloc.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
#include <xen/interface/xenoprof.h>
|
|
|
|
#include <xen/xenoprof.h>
|
|
|
|
--- head-2011-03-17.orig/arch/x86/vdso/vdso32/note.S 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/vdso/vdso32/note.S 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -13,7 +13,7 @@ ELFNOTE_START(Linux, 0, "a")
|
|
|
|
.long LINUX_VERSION_CODE
|
|
|
|
ELFNOTE_END
|
|
|
|
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
+#if defined(CONFIG_X86_XEN) || defined(CONFIG_PARAVIRT_XEN)
|
|
|
|
/*
|
|
|
|
* Add a special note telling glibc's dynamic linker a fake hardware
|
|
|
|
* flavor that it will use to choose the search path for libraries in the
|
|
|
|
@@ -37,8 +37,12 @@ ELFNOTE_END
|
|
|
|
|
|
|
|
ELFNOTE_START(GNU, 2, "a")
|
|
|
|
.long 1 /* ncaps */
|
|
|
|
+#ifdef CONFIG_PARAVIRT_XEN
|
|
|
|
VDSO32_NOTE_MASK: /* Symbol used by arch/x86/xen/setup.c */
|
|
|
|
.long 0 /* mask */
|
|
|
|
+#else
|
|
|
|
+ .long 1 << VDSO_NOTE_NONEGSEG_BIT /* mask */
|
|
|
|
+#endif
|
|
|
|
.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */
|
|
|
|
ELFNOTE_END
|
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/Makefile 2011-01-31 14:53:38.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/Makefile 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -19,7 +19,7 @@ obj-$(CONFIG_PNP) += pnp/
|
2010-07-07 11:12:45 +00:00
|
|
|
obj-$(CONFIG_ARM_AMBA) += amba/
|
|
|
|
|
|
|
|
obj-$(CONFIG_VIRTIO) += virtio/
|
|
|
|
-obj-$(CONFIG_XEN) += xen/
|
|
|
|
+obj-$(CONFIG_PARAVIRT_XEN) += xen/
|
|
|
|
|
|
|
|
# regulators early, since some subsystems rely on them to initialize
|
|
|
|
obj-$(CONFIG_REGULATOR) += regulator/
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/block/Kconfig 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/block/Kconfig 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -466,9 +466,9 @@ config XILINX_SYSACE
|
|
|
|
help
|
|
|
|
Include support for the Xilinx SystemACE CompactFlash interface
|
|
|
|
|
|
|
|
-config XEN_BLKDEV_FRONTEND
|
2011-04-19 20:09:59 +00:00
|
|
|
+config PARAVIRT_XEN_BLKDEV_FRONTEND
|
2010-07-07 11:12:45 +00:00
|
|
|
tristate "Xen virtual block device support"
|
|
|
|
- depends on XEN
|
|
|
|
+ depends on PARAVIRT_XEN
|
|
|
|
default y
|
2011-04-19 20:09:59 +00:00
|
|
|
select XEN_XENBUS_FRONTEND
|
2010-07-07 11:12:45 +00:00
|
|
|
help
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/block/Makefile 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/block/Makefile 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -35,7 +35,7 @@ obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
|
|
|
|
obj-$(CONFIG_BLK_DEV_UB) += ub.o
|
|
|
|
obj-$(CONFIG_BLK_DEV_HD) += hd.o
|
|
|
|
|
|
|
|
-obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
|
2011-04-19 20:09:59 +00:00
|
|
|
+obj-$(CONFIG_PARAVIRT_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
|
2010-07-07 11:12:45 +00:00
|
|
|
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
|
2011-04-19 20:09:59 +00:00
|
|
|
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
|
2010-07-07 11:12:45 +00:00
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/block/xen-blkfront.c 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/block/xen-blkfront.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -1282,7 +1282,6 @@ static const struct xenbus_device_id blk
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
static struct xenbus_driver blkfront = {
|
|
|
|
.name = "vbd",
|
|
|
|
- .owner = THIS_MODULE,
|
|
|
|
.ids = blkfront_ids,
|
|
|
|
.probe = blkfront_probe,
|
|
|
|
.remove = blkfront_remove,
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/char/Kconfig 2011-01-31 14:42:03.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/char/Kconfig 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -669,7 +669,7 @@ config HVC_IUCV
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
config HVC_XEN
|
|
|
|
bool "Xen Hypervisor Console support"
|
|
|
|
- depends on XEN
|
|
|
|
+ depends on PARAVIRT_XEN
|
|
|
|
select HVC_DRIVER
|
|
|
|
select HVC_IRQ
|
|
|
|
default y
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/net/Kconfig 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/net/Kconfig 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -2960,9 +2960,9 @@ config TILE_NET
|
|
|
|
To compile this driver as a module, choose M here: the module
|
|
|
|
will be called tile_net.
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
-config XEN_NETDEV_FRONTEND
|
2011-04-19 20:09:59 +00:00
|
|
|
+config PARAVIRT_XEN_NETDEV_FRONTEND
|
2010-07-07 11:12:45 +00:00
|
|
|
tristate "Xen network device frontend driver"
|
|
|
|
- depends on XEN
|
|
|
|
+ depends on PARAVIRT_XEN
|
2011-04-19 20:09:59 +00:00
|
|
|
select XEN_XENBUS_FRONTEND
|
2010-07-07 11:12:45 +00:00
|
|
|
default y
|
|
|
|
help
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/net/Makefile 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/net/Makefile 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -170,7 +170,7 @@ obj-$(CONFIG_PPTP) += pppox.o pptp.o
|
2010-07-07 11:12:45 +00:00
|
|
|
obj-$(CONFIG_SLIP) += slip.o
|
|
|
|
obj-$(CONFIG_SLHC) += slhc.o
|
|
|
|
|
|
|
|
-obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
|
2011-04-19 20:09:59 +00:00
|
|
|
+obj-$(CONFIG_PARAVIRT_XEN_NETDEV_FRONTEND) += xen-netfront.o
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
obj-$(CONFIG_DUMMY) += dummy.o
|
|
|
|
obj-$(CONFIG_IFB) += ifb.o
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/net/xen-netfront.c 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/net/xen-netfront.c 2011-02-17 10:10:21.000000000 +0100
|
|
|
|
@@ -1878,7 +1878,6 @@ static int __devexit xennet_remove(struc
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
static struct xenbus_driver netfront_driver = {
|
|
|
|
.name = "vif",
|
|
|
|
- .owner = THIS_MODULE,
|
|
|
|
.ids = netfront_ids,
|
|
|
|
.probe = netfront_probe,
|
|
|
|
.remove = __devexit_p(xennet_remove),
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/Kconfig 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/Kconfig 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -23,6 +23,9 @@ config XEN_PRIVILEGED_GUEST
|
|
|
|
config XEN_UNPRIVILEGED_GUEST
|
|
|
|
def_bool !XEN_PRIVILEGED_GUEST
|
|
|
|
select PM
|
|
|
|
+ select PM_SLEEP
|
|
|
|
+ select PM_SLEEP_SMP if SMP
|
|
|
|
+ select SUSPEND
|
|
|
|
|
|
|
|
config XEN_PRIVCMD
|
|
|
|
def_bool y
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/Makefile 2011-02-24 13:56:24.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/Makefile 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -1,10 +1,14 @@
|
2010-07-07 11:12:45 +00:00
|
|
|
-obj-y += core/
|
|
|
|
-obj-y += console/
|
|
|
|
-obj-y += evtchn/
|
|
|
|
-obj-y += xenbus/
|
|
|
|
-obj-y += char/
|
|
|
|
+obj-$(CONFIG_PARAVIRT_XEN) += grant-table.o
|
|
|
|
|
|
|
|
-obj-y += util.o
|
|
|
|
+obj-$(CONFIG_XEN) += core/
|
|
|
|
+obj-$(CONFIG_XEN) += console/
|
|
|
|
+obj-$(CONFIG_XEN) += evtchn/
|
|
|
|
+obj-y += xenbus/
|
|
|
|
+obj-$(CONFIG_XEN) += char/
|
|
|
|
+
|
2011-04-19 20:09:59 +00:00
|
|
|
+xen-backend-$(CONFIG_XEN_BACKEND) := util.o
|
|
|
|
+
|
|
|
|
+obj-$(CONFIG_XEN) += $(xen-backend-y) $(xen-backend-m)
|
2010-07-07 11:12:45 +00:00
|
|
|
obj-$(CONFIG_XEN_BALLOON) += balloon/
|
|
|
|
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += blkback/
|
|
|
|
obj-$(CONFIG_XEN_BLKDEV_TAP) += blktap/
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/balloon/balloon.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/balloon/balloon.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -324,13 +324,9 @@ static int increase_reservation(unsigned
|
|
|
|
|
|
|
|
#ifndef MODULE
|
|
|
|
setup_per_zone_pages_min();
|
|
|
|
-# if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_ACPI_HOTPLUG_MEMORY) \
|
|
|
|
- || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
|
|
|
|
- /* build_all_zonelists() is __meminit */
|
|
|
|
if (need_zonelists_rebuild)
|
|
|
|
build_all_zonelists();
|
|
|
|
else
|
|
|
|
-# endif
|
|
|
|
vm_total_pages = nr_free_pagecache_pages();
|
|
|
|
#endif
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkback/blkback.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkback/blkback.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -156,7 +156,7 @@ static void unplug_queue(blkif_t *blkif)
|
|
|
|
|
|
|
|
static void plug_queue(blkif_t *blkif, struct block_device *bdev)
|
|
|
|
{
|
|
|
|
- request_queue_t *q = bdev_get_queue(bdev);
|
|
|
|
+ struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
|
|
|
|
if (q == blkif->plug)
|
|
|
|
return;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkback/common.h 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkback/common.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -41,7 +41,6 @@
|
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <xen/blkif.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
#include "blkback-pagemap.h"
|
|
|
|
|
|
|
|
@@ -82,7 +81,7 @@ typedef struct blkif_st {
|
2010-07-07 11:12:45 +00:00
|
|
|
wait_queue_head_t wq;
|
|
|
|
struct task_struct *xenblkd;
|
|
|
|
unsigned int waiting_reqs;
|
|
|
|
- request_queue_t *plug;
|
|
|
|
+ struct request_queue *plug;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
/* statistics */
|
|
|
|
unsigned long st_print;
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkback/interface.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkback/interface.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -178,5 +178,5 @@ void blkif_free(blkif_t *blkif)
|
2010-07-07 11:12:45 +00:00
|
|
|
void __init blkif_interface_init(void)
|
|
|
|
{
|
|
|
|
blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
|
|
|
|
- 0, 0, NULL, NULL);
|
|
|
|
+ 0, 0, NULL);
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkback/xenbus.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkback/xenbus.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -27,8 +27,6 @@
|
|
|
|
pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
|
|
|
|
__FUNCTION__, __LINE__, ##args)
|
|
|
|
|
|
|
|
-static DEFINE_RWLOCK(sysfs_read_lock);
|
|
|
|
-
|
|
|
|
static void connect(struct backend_info *);
|
|
|
|
static int connect_ring(struct backend_info *);
|
|
|
|
static void backend_changed(struct xenbus_watch *, const char **,
|
|
|
|
@@ -110,10 +108,8 @@ static void update_blkif_status(blkif_t
|
|
|
|
if (!get_device(_dev)) \
|
|
|
|
return ret; \
|
|
|
|
dev = to_xenbus_device(_dev); \
|
|
|
|
- read_lock(&sysfs_read_lock); \
|
|
|
|
if ((be = dev->dev.driver_data) != NULL) \
|
|
|
|
ret = sprintf(buf, format, ##args); \
|
|
|
|
- read_unlock(&sysfs_read_lock); \
|
|
|
|
put_device(_dev); \
|
|
|
|
return ret; \
|
|
|
|
} \
|
|
|
|
@@ -181,7 +177,6 @@ static int blkback_remove(struct xenbus_
|
|
|
|
|
|
|
|
DPRINTK("");
|
|
|
|
|
|
|
|
- write_lock(&sysfs_read_lock);
|
|
|
|
if (be->major || be->minor)
|
|
|
|
xenvbd_sysfs_delif(dev);
|
|
|
|
|
|
|
|
@@ -200,7 +195,6 @@ static int blkback_remove(struct xenbus_
|
|
|
|
|
|
|
|
kfree(be);
|
|
|
|
dev->dev.driver_data = NULL;
|
|
|
|
- write_unlock(&sysfs_read_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkfront/blkfront.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkfront/blkfront.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -574,14 +574,20 @@ int blkif_ioctl(struct inode *inode, str
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
- if (info->mi && info->gd) {
|
|
|
|
+ if (info->mi && info->gd && info->rq) {
|
|
|
|
switch (info->mi->major) {
|
|
|
|
case SCSI_DISK0_MAJOR:
|
|
|
|
case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
|
|
|
|
case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
|
|
|
|
case SCSI_CDROM_MAJOR:
|
|
|
|
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
|
|
|
|
return scsi_cmd_ioctl(filep, info->gd, command,
|
|
|
|
(void __user *)argument);
|
|
|
|
+#else
|
|
|
|
+ return scsi_cmd_ioctl(filep, info->rq,
|
|
|
|
+ info->gd, command,
|
|
|
|
+ (void __user *)argument);
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -698,7 +704,7 @@ static int blkif_queue_request(struct re
|
2010-07-07 11:12:45 +00:00
|
|
|
* do_blkif_request
|
|
|
|
* read a block; request is in a request queue
|
|
|
|
*/
|
|
|
|
-void do_blkif_request(request_queue_t *rq)
|
|
|
|
+void do_blkif_request(struct request_queue *rq)
|
|
|
|
{
|
|
|
|
struct blkfront_info *info = NULL;
|
|
|
|
struct request *req;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkfront/block.h 2010-02-24 13:13:46.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkfront/block.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -107,7 +107,7 @@ struct blkfront_info
|
|
|
|
struct scatterlist sg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
|
|
|
unsigned int irq;
|
|
|
|
struct xlbd_major_info *mi;
|
|
|
|
- request_queue_t *rq;
|
|
|
|
+ struct request_queue *rq;
|
|
|
|
struct work_struct work;
|
|
|
|
struct gnttab_free_callback callback;
|
|
|
|
struct blk_shadow shadow[BLK_RING_SIZE];
|
|
|
|
@@ -131,7 +131,7 @@ extern int blkif_ioctl(struct inode *ino
|
|
|
|
extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
|
|
|
|
extern int blkif_check(dev_t dev);
|
|
|
|
extern int blkif_revalidate(dev_t dev);
|
|
|
|
-extern void do_blkif_request (request_queue_t *rq);
|
|
|
|
+extern void do_blkif_request (struct request_queue *rq);
|
|
|
|
|
|
|
|
/* Virtual block-device subsystem. */
|
|
|
|
/* Note that xlvbd_add doesn't call add_disk for you: you're expected
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blkfront/vbd.c 2010-01-18 15:23:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blkfront/vbd.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -298,7 +298,7 @@ xlbd_release_minors(struct xlbd_major_in
|
|
|
|
static int
|
|
|
|
xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
|
|
|
{
|
|
|
|
- request_queue_t *rq;
|
|
|
|
+ struct request_queue *rq;
|
|
|
|
|
|
|
|
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
|
|
|
|
if (rq == NULL)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap/common.h 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap/common.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -40,7 +40,6 @@
|
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <xen/blkif.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
|
|
|
|
#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
|
|
|
|
__FILE__ , __LINE__ , ## _a )
|
|
|
|
@@ -68,7 +67,7 @@ typedef struct blkif_st {
|
2010-07-07 11:12:45 +00:00
|
|
|
wait_queue_head_t wq;
|
|
|
|
struct task_struct *xenblkd;
|
|
|
|
unsigned int waiting_reqs;
|
|
|
|
- request_queue_t *plug;
|
|
|
|
+ struct request_queue *plug;
|
|
|
|
|
|
|
|
/* statistics */
|
|
|
|
unsigned long st_print;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap/interface.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap/interface.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -178,5 +178,5 @@ void tap_blkif_kmem_cache_free(blkif_t *
|
2010-07-07 11:12:45 +00:00
|
|
|
void __init tap_blkif_interface_init(void)
|
|
|
|
{
|
|
|
|
blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t),
|
|
|
|
- 0, 0, NULL, NULL);
|
|
|
|
+ 0, 0, NULL);
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap/xenbus.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap/xenbus.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -50,8 +50,6 @@ struct backend_info
|
|
|
|
int group_added;
|
|
|
|
};
|
|
|
|
|
|
|
|
-static DEFINE_RWLOCK(sysfs_read_lock);
|
|
|
|
-
|
|
|
|
static void connect(struct backend_info *);
|
|
|
|
static int connect_ring(struct backend_info *);
|
|
|
|
static int blktap_remove(struct xenbus_device *dev);
|
|
|
|
@@ -130,10 +128,8 @@ static int blktap_name(blkif_t *blkif, c
|
|
|
|
if (!get_device(_dev)) \
|
|
|
|
return ret; \
|
|
|
|
dev = to_xenbus_device(_dev); \
|
|
|
|
- read_lock(&sysfs_read_lock); \
|
|
|
|
if ((be = dev->dev.driver_data) != NULL) \
|
|
|
|
ret = sprintf(buf, format, ##args); \
|
|
|
|
- read_unlock(&sysfs_read_lock); \
|
|
|
|
put_device(_dev); \
|
|
|
|
return ret; \
|
|
|
|
} \
|
|
|
|
@@ -180,7 +176,6 @@ static int blktap_remove(struct xenbus_d
|
|
|
|
{
|
|
|
|
struct backend_info *be = dev->dev.driver_data;
|
|
|
|
|
|
|
|
- write_lock(&sysfs_read_lock);
|
|
|
|
if (be->group_added)
|
|
|
|
xentap_sysfs_delif(be->dev);
|
|
|
|
if (be->backend_watch.node) {
|
|
|
|
@@ -198,7 +193,6 @@ static int blktap_remove(struct xenbus_d
|
|
|
|
}
|
|
|
|
kfree(be);
|
|
|
|
dev->dev.driver_data = NULL;
|
|
|
|
- write_unlock(&sysfs_read_lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap2/blktap.h 2010-02-24 13:13:46.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap2/blktap.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -203,6 +203,7 @@ blktap_validate_params(struct blktap *ta
|
|
|
|
}
|
|
|
|
|
|
|
|
int blktap_control_destroy_device(struct blktap *);
|
|
|
|
+int blktap_control_finish_destroy(struct blktap *);
|
|
|
|
|
|
|
|
int blktap_ring_init(int *);
|
|
|
|
int blktap_ring_free(void);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap2/control.c 2011-02-24 15:15:38.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap2/control.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -194,14 +194,20 @@ blktap_control_destroy_device(struct blk
|
|
|
|
|
|
|
|
clear_bit(BLKTAP_SHUTDOWN_REQUESTED, &tap->dev_inuse);
|
|
|
|
|
|
|
|
- if (tap->dev_inuse == (1UL << BLKTAP_CONTROL)) {
|
|
|
|
+ if (blktap_control_finish_destroy(tap))
|
|
|
|
err = 0;
|
|
|
|
- clear_bit(BLKTAP_CONTROL, &tap->dev_inuse);
|
|
|
|
- }
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
+int
|
|
|
|
+blktap_control_finish_destroy(struct blktap *tap)
|
|
|
|
+{
|
|
|
|
+ if (tap->dev_inuse == (1UL << BLKTAP_CONTROL))
|
|
|
|
+ clear_bit(BLKTAP_CONTROL, &tap->dev_inuse);
|
|
|
|
+ return !tap->dev_inuse;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
static int __init
|
|
|
|
blktap_control_init(void)
|
|
|
|
{
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap2/device.c 2010-11-25 09:36:37.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap2/device.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -833,7 +833,7 @@ static void
|
2010-07-07 11:12:45 +00:00
|
|
|
blktap_device_run_queue(struct blktap *tap)
|
|
|
|
{
|
|
|
|
int queued, err;
|
|
|
|
- request_queue_t *rq;
|
|
|
|
+ struct request_queue *rq;
|
|
|
|
struct request *req;
|
|
|
|
struct blktap_ring *ring;
|
|
|
|
struct blktap_device *dev;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -910,7 +910,7 @@ blktap_device_run_queue(struct blktap *t
|
2010-07-07 11:12:45 +00:00
|
|
|
* dev->lock held on entry
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
-blktap_device_do_request(request_queue_t *rq)
|
|
|
|
+blktap_device_do_request(struct request_queue *rq)
|
|
|
|
{
|
|
|
|
struct request *req;
|
|
|
|
struct blktap *tap;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1186,6 +1186,5 @@ void
|
2010-07-07 11:12:45 +00:00
|
|
|
blktap_device_free(void)
|
|
|
|
{
|
|
|
|
if (blktap_device_major)
|
|
|
|
- if (unregister_blkdev(blktap_device_major, "tapdev"))
|
|
|
|
- BTERR("blktap device unregister failed\n");
|
|
|
|
+ unregister_blkdev(blktap_device_major, "tapdev");
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/blktap2/sysfs.c 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/blktap2/sysfs.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -334,6 +334,24 @@ blktap_sysfs_create(struct blktap *tap)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+_blktap_sysfs_destroy(struct device *_dev)
|
|
|
|
+{
|
|
|
|
+ struct class_device *dev = _dev->???;
|
|
|
|
+ struct blktap *tap = dev->class_data;
|
|
|
|
+
|
|
|
|
+ class_device_remove_file(dev, &class_device_attr_name);
|
|
|
|
+ class_device_remove_file(dev, &class_device_attr_remove);
|
|
|
|
+ class_device_remove_file(dev, &class_device_attr_pause);
|
|
|
|
+ class_device_remove_file(dev, &class_device_attr_resume);
|
|
|
|
+ class_device_remove_file(dev, &class_device_attr_debug);
|
|
|
|
+ class_device_unregister(dev);
|
|
|
|
+
|
|
|
|
+ clear_bit(BLKTAP_SYSFS, &tap->dev_inuse);
|
|
|
|
+
|
|
|
|
+ blktap_control_finish_destroy(tap);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
int
|
|
|
|
blktap_sysfs_destroy(struct blktap *tap)
|
|
|
|
{
|
|
|
|
@@ -350,17 +368,7 @@ blktap_sysfs_destroy(struct blktap *tap)
|
|
|
|
!atomic_read(&tap->ring.sysfs_refcnt)))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
- /* XXX: is it safe to remove the class from a sysfs attribute? */
|
|
|
|
- class_device_remove_file(dev, &class_device_attr_name);
|
|
|
|
- class_device_remove_file(dev, &class_device_attr_remove);
|
|
|
|
- class_device_remove_file(dev, &class_device_attr_pause);
|
|
|
|
- class_device_remove_file(dev, &class_device_attr_resume);
|
|
|
|
- class_device_remove_file(dev, &class_device_attr_debug);
|
|
|
|
- class_device_destroy(class, ring->devno);
|
|
|
|
-
|
|
|
|
- clear_bit(BLKTAP_SYSFS, &tap->dev_inuse);
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
+ return device_schedule_callback(dev->???, _blktap_sysfs_destroy);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/core/gnttab.c 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/core/gnttab.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -42,7 +42,6 @@
|
|
|
|
#include <asm/synch_bitops.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <xen/interface/memory.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <asm/gnttab_dma.h>
|
|
|
|
|
|
|
|
#ifdef HAVE_XEN_PLATFORM_COMPAT_H
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/core/reboot.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/core/reboot.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -3,6 +3,7 @@
|
2010-07-07 11:12:45 +00:00
|
|
|
#include <linux/unistd.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
+#include <linux/sched.h>
|
|
|
|
#include <linux/sysrq.h>
|
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <xen/xenbus.h>
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/netback/common.h 2011-02-17 10:09:57.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/netback/common.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -44,7 +44,6 @@
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <xen/interface/grant_table.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
|
|
|
|
#define DPRINTK(_f, _a...) \
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/scsiback/common.h 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/scsiback/common.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -55,7 +55,6 @@
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
#include <xen/interface/io/ring.h>
|
|
|
|
#include <xen/interface/grant_table.h>
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/scsiback/interface.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/scsiback/interface.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -166,7 +166,7 @@ void scsiback_free(struct vscsibk_info *
|
2010-07-07 11:12:45 +00:00
|
|
|
int __init scsiback_interface_init(void)
|
|
|
|
{
|
|
|
|
scsiback_cachep = kmem_cache_create("vscsiif_cache",
|
|
|
|
- sizeof(struct vscsibk_info), 0, 0, NULL, NULL);
|
|
|
|
+ sizeof(struct vscsibk_info), 0, 0, NULL);
|
|
|
|
if (!scsiback_cachep) {
|
|
|
|
printk(KERN_ERR "scsiback: can't init scsi cache\n");
|
|
|
|
return -ENOMEM;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/scsifront/scsifront.c 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/scsifront/scsifront.c 2011-02-08 10:03:55.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -147,7 +147,7 @@ static void scsifront_cdb_cmd_done(struc
|
|
|
|
add_id_to_freelist(info, id);
|
|
|
|
|
|
|
|
sc->result = ring_res->rslt;
|
|
|
|
- sc->resid = ring_res->residual_len;
|
|
|
|
+ scsi_set_resid(sc, ring_res->residual_len);
|
|
|
|
|
|
|
|
if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
|
|
|
|
sense_len = VSCSIIF_SENSE_BUFFERSIZE;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -351,8 +351,7 @@ big_to_sg:
|
|
|
|
static int scsifront_queuecommand(struct scsi_cmnd *sc,
|
|
|
|
void (*done)(struct scsi_cmnd *))
|
|
|
|
{
|
|
|
|
- struct vscsifrnt_info *info =
|
|
|
|
- (struct vscsifrnt_info *) sc->device->host->hostdata;
|
|
|
|
+ struct vscsifrnt_info *info = shost_priv(sc->device->host);
|
|
|
|
vscsiif_request_t *ring_req;
|
|
|
|
int ref_cnt;
|
|
|
|
uint16_t rqid;
|
|
|
|
@@ -428,8 +427,7 @@ static int scsifront_eh_abort_handler(st
|
|
|
|
static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *host = sc->device->host;
|
|
|
|
- struct vscsifrnt_info *info =
|
|
|
|
- (struct vscsifrnt_info *) sc->device->host->hostdata;
|
|
|
|
+ struct vscsifrnt_info *info = shost_priv(host);
|
|
|
|
|
|
|
|
vscsiif_request_t *ring_req;
|
|
|
|
uint16_t rqid;
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/sfc_netback/accel_solarflare.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/sfc_netback/accel_solarflare.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -37,7 +37,6 @@
|
|
|
|
#include "ci/efhw/public.h"
|
|
|
|
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/sfc_netutil/accel_util.c 2010-09-23 15:39:04.000000000 +0200
|
|
|
|
+++ head-2011-03-17/drivers/xen/sfc_netutil/accel_util.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -28,7 +28,6 @@
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/hypercall.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
|
|
|
|
#include "accel_util.h"
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/tpmback/common.h 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/tpmback/common.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -10,7 +10,6 @@
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/interface/grant_table.h>
|
|
|
|
#include <xen/interface/io/tpmif.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/tpmback/interface.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/tpmback/interface.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -13,6 +13,7 @@
|
|
|
|
|
|
|
|
#include "common.h"
|
|
|
|
#include <linux/delay.h>
|
|
|
|
+#include <linux/err.h>
|
|
|
|
#include <xen/balloon.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -160,7 +161,7 @@ void tpmif_disconnect_complete(tpmif_t *
|
2010-07-07 11:12:45 +00:00
|
|
|
int __init tpmif_interface_init(void)
|
|
|
|
{
|
|
|
|
tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
|
|
|
|
- 0, 0, NULL, NULL);
|
|
|
|
+ 0, 0, NULL);
|
|
|
|
return tpmif_cachep ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/usbback/usbback.h 2011-01-31 17:29:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/usbback/usbback.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -57,7 +57,6 @@
|
|
|
|
#include <linux/kref.h>
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
#include <xen/interface/io/usbif.h>
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/usbfront/xenbus.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/usbfront/xenbus.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -393,7 +393,7 @@ static int __init usbfront_init(void)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
|
|
|
|
- sizeof(struct urb_priv), 0, 0, NULL, NULL);
|
|
|
|
+ sizeof(struct urb_priv), 0, 0, NULL);
|
|
|
|
if (!xenhcd_urbp_cachep) {
|
|
|
|
printk(KERN_ERR "usbfront failed to create kmem cache\n");
|
|
|
|
return -ENOMEM;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/util.c 2007-07-10 09:42:30.000000000 +0200
|
|
|
|
+++ head-2011-03-17/drivers/xen/util.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -1,8 +1,5 @@
|
|
|
|
-#include <linux/mm.h>
|
|
|
|
+#include <linux/err.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
-#include <linux/slab.h>
|
|
|
|
-#include <linux/vmalloc.h>
|
|
|
|
-#include <asm/uaccess.h>
|
|
|
|
#include <xen/driver_util.h>
|
|
|
|
|
|
|
|
struct class *get_xen_class(void)
|
|
|
|
@@ -21,45 +18,3 @@ struct class *get_xen_class(void)
|
|
|
|
return xen_class;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(get_xen_class);
|
|
|
|
-
|
|
|
|
-#ifdef CONFIG_X86
|
|
|
|
-static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
|
|
|
|
-{
|
|
|
|
- /* apply_to_page_range() does all the hard work. */
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-struct vm_struct *alloc_vm_area(unsigned long size)
|
|
|
|
-{
|
|
|
|
- struct vm_struct *area;
|
|
|
|
-
|
|
|
|
- area = get_vm_area(size, VM_IOREMAP);
|
|
|
|
- if (area == NULL)
|
|
|
|
- return NULL;
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * This ensures that page tables are constructed for this region
|
|
|
|
- * of kernel virtual address space and mapped into init_mm.
|
|
|
|
- */
|
|
|
|
- if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
|
|
|
|
- area->size, f, NULL)) {
|
|
|
|
- free_vm_area(area);
|
|
|
|
- return NULL;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Map page directories into every address space. */
|
|
|
|
- vmalloc_sync_all();
|
|
|
|
-
|
|
|
|
- return area;
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(alloc_vm_area);
|
|
|
|
-
|
|
|
|
-void free_vm_area(struct vm_struct *area)
|
|
|
|
-{
|
|
|
|
- struct vm_struct *ret;
|
|
|
|
- ret = remove_vm_area(area->addr);
|
|
|
|
- BUG_ON(ret != area);
|
|
|
|
- kfree(area);
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL_GPL(free_vm_area);
|
|
|
|
-#endif /* CONFIG_X86 */
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_backend_client.c 2010-09-23 15:39:04.000000000 +0200
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_backend_client.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -32,9 +32,9 @@
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
+#include <linux/vmalloc.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
|
|
|
|
/* Based on Rusty Russell's skeleton driver's map_page */
|
|
|
|
struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
|
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_client.c 2011-01-31 15:14:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_client.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -30,19 +30,25 @@
|
2010-07-07 11:12:45 +00:00
|
|
|
* IN THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <xen/gnttab.h>
|
|
|
|
+#else
|
|
|
|
+#include <linux/types.h>
|
|
|
|
+#include <linux/vmalloc.h>
|
|
|
|
+#include <asm/xen/hypervisor.h>
|
|
|
|
+#include <xen/interface/xen.h>
|
|
|
|
+#include <xen/interface/event_channel.h>
|
|
|
|
+#include <xen/events.h>
|
|
|
|
+#include <xen/grant_table.h>
|
|
|
|
+#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
#include <xen/xenbus.h>
|
|
|
|
-#include <xen/driver_util.h>
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
#ifdef HAVE_XEN_PLATFORM_COMPAT_H
|
|
|
|
#include <xen/platform-compat.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-#define DPRINTK(fmt, args...) \
|
|
|
|
- pr_debug("xenbus_client (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
|
|
|
|
-
|
|
|
|
const char *xenbus_strstate(enum xenbus_state state)
|
|
|
|
{
|
|
|
|
static const char *const name[] = {
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -60,6 +66,20 @@ const char *xenbus_strstate(enum xenbus_
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xenbus_strstate);
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * xenbus_watch_path - register a watch
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @path: path to watch
|
|
|
|
+ * @watch: watch to register
|
|
|
|
+ * @callback: callback to register
|
|
|
|
+ *
|
|
|
|
+ * Register a @watch on the given path, using the given xenbus_watch structure
|
|
|
|
+ * for storage, and the given @callback function as the callback. Return 0 on
|
|
|
|
+ * success, or -errno on error. On success, the given @path will be saved as
|
|
|
|
+ * @watch->node, and remains the caller's to free. On error, @watch->node will
|
|
|
|
+ * be NULL, the device will switch to %XenbusStateClosing, and the error will
|
|
|
|
+ * be saved in the store.
|
|
|
|
+ */
|
|
|
|
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
|
|
|
|
struct xenbus_watch *watch,
|
|
|
|
void (*callback)(struct xenbus_watch *,
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -83,6 +103,7 @@ int xenbus_watch_path(struct xenbus_devi
|
2010-07-07 11:12:45 +00:00
|
|
|
EXPORT_SYMBOL_GPL(xenbus_watch_path);
|
|
|
|
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
int xenbus_watch_path2(struct xenbus_device *dev, const char *path,
|
|
|
|
const char *path2, struct xenbus_watch *watch,
|
|
|
|
void (*callback)(struct xenbus_watch *,
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -101,8 +122,60 @@ int xenbus_watch_path2(struct xenbus_dev
|
2010-07-07 11:12:45 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(xenbus_watch_path2);
|
|
|
|
+#else
|
|
|
|
+/**
|
|
|
|
+ * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @watch: watch to register
|
|
|
|
+ * @callback: callback to register
|
|
|
|
+ * @pathfmt: format of path to watch
|
|
|
|
+ *
|
|
|
|
+ * Register a watch on the given @path, using the given xenbus_watch
|
|
|
|
+ * structure for storage, and the given @callback function as the callback.
|
|
|
|
+ * Return 0 on success, or -errno on error. On success, the watched path
|
|
|
|
+ * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
|
|
|
|
+ * kfree(). On error, watch->node will be NULL, so the caller has nothing to
|
|
|
|
+ * free, the device will switch to %XenbusStateClosing, and the error will be
|
|
|
|
+ * saved in the store.
|
|
|
|
+ */
|
|
|
|
+int xenbus_watch_pathfmt(struct xenbus_device *dev,
|
|
|
|
+ struct xenbus_watch *watch,
|
|
|
|
+ void (*callback)(struct xenbus_watch *,
|
|
|
|
+ const char **, unsigned int),
|
|
|
|
+ const char *pathfmt, ...)
|
|
|
|
+{
|
|
|
|
+ int err;
|
|
|
|
+ va_list ap;
|
|
|
|
+ char *path;
|
2011-04-19 20:09:59 +00:00
|
|
|
+
|
2010-07-07 11:12:45 +00:00
|
|
|
+ va_start(ap, pathfmt);
|
|
|
|
+ path = kvasprintf(GFP_KERNEL, pathfmt, ap);
|
|
|
|
+ va_end(ap);
|
2011-04-19 20:09:59 +00:00
|
|
|
+
|
2010-07-07 11:12:45 +00:00
|
|
|
+ if (!path) {
|
|
|
|
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ err = xenbus_watch_path(dev, path, watch, callback);
|
|
|
|
+
|
|
|
|
+ if (err)
|
|
|
|
+ kfree(path);
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
|
|
|
|
+#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
|
|
|
|
|
2010-07-07 11:12:45 +00:00
|
|
|
+/**
|
|
|
|
+ * xenbus_switch_state
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @xbt: transaction handle
|
|
|
|
+ * @state: new state
|
|
|
|
+ *
|
|
|
|
+ * Advertise in the store a change of the given driver to the given new_state.
|
|
|
|
+ * Return 0 on success, or -errno on error. On error, the device will switch
|
|
|
|
+ * to XenbusStateClosing, and the error will be saved in the store.
|
|
|
|
+ */
|
|
|
|
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
|
|
|
|
{
|
|
|
|
/* We check whether the state is currently set to the given value, and
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -161,8 +234,8 @@ static char *error_path(struct xenbus_de
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
-void _dev_error(struct xenbus_device *dev, int err, const char *fmt,
|
|
|
|
- va_list ap)
|
|
|
|
+static void _dev_error(struct xenbus_device *dev, int err,
|
|
|
|
+ const char *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned int len;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -183,14 +256,16 @@ void _dev_error(struct xenbus_device *de
|
2010-07-07 11:12:45 +00:00
|
|
|
path_buffer = error_path(dev);
|
|
|
|
|
|
|
|
if (path_buffer == NULL) {
|
|
|
|
- printk("xenbus: failed to write error node for %s (%s)\n",
|
|
|
|
- dev->nodename, printf_buffer);
|
|
|
|
+ dev_err(&dev->dev,
|
|
|
|
+ "xenbus: failed to write error node for %s (%s)\n",
|
|
|
|
+ dev->nodename, printf_buffer);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
|
|
|
|
- printk("xenbus: failed to write error node for %s (%s)\n",
|
|
|
|
- dev->nodename, printf_buffer);
|
|
|
|
+ dev_err(&dev->dev,
|
|
|
|
+ "xenbus: failed to write error node for %s (%s)\n",
|
|
|
|
+ dev->nodename, printf_buffer);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -200,6 +275,15 @@ fail:
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * xenbus_dev_error
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @err: error to report
|
|
|
|
+ * @fmt: error message format
|
|
|
|
+ *
|
|
|
|
+ * Report the given negative errno into the store, along with the given
|
|
|
|
+ * formatted message.
|
|
|
|
+ */
|
2011-04-19 20:09:59 +00:00
|
|
|
void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
|
2010-07-07 11:12:45 +00:00
|
|
|
{
|
2011-04-19 20:09:59 +00:00
|
|
|
va_list ap;
|
|
|
|
@@ -211,6 +295,16 @@ void xenbus_dev_error(struct xenbus_devi
|
2010-07-07 11:12:45 +00:00
|
|
|
EXPORT_SYMBOL_GPL(xenbus_dev_error);
|
|
|
|
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * xenbus_dev_fatal
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @err: error to report
|
|
|
|
+ * @fmt: error message format
|
|
|
|
+ *
|
|
|
|
+ * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
|
|
|
|
+ * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly
|
|
|
|
+ * closedown of this driver and its peer.
|
|
|
|
+ */
|
2011-04-19 20:09:59 +00:00
|
|
|
void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
|
2010-07-07 11:12:45 +00:00
|
|
|
{
|
2011-04-19 20:09:59 +00:00
|
|
|
va_list ap;
|
|
|
|
@@ -224,6 +318,15 @@ void xenbus_dev_fatal(struct xenbus_devi
|
2010-07-07 11:12:45 +00:00
|
|
|
EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
|
|
|
|
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * xenbus_grant_ring
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @ring_mfn: mfn of ring to grant
|
|
|
|
+ *
|
|
|
|
+ * Grant access to the given @ring_mfn to the peer of the given device. Return
|
|
|
|
+ * 0 on success, or -errno on error. On error, the device will switch to
|
|
|
|
+ * XenbusStateClosing, and the error will be saved in the store.
|
|
|
|
+ */
|
|
|
|
int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn)
|
|
|
|
{
|
|
|
|
int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -234,6 +337,12 @@ int xenbus_grant_ring(struct xenbus_devi
|
2010-07-07 11:12:45 +00:00
|
|
|
EXPORT_SYMBOL_GPL(xenbus_grant_ring);
|
|
|
|
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * Allocate an event channel for the given xenbus_device, assigning the newly
|
|
|
|
+ * created local port to *port. Return 0 on success, or -errno on error. On
|
|
|
|
+ * error, the device will switch to XenbusStateClosing, and the error will be
|
|
|
|
+ * saved in the store.
|
|
|
|
+ */
|
|
|
|
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
|
|
|
|
{
|
|
|
|
struct evtchn_alloc_unbound alloc_unbound;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -254,6 +363,38 @@ int xenbus_alloc_evtchn(struct xenbus_de
|
2010-07-07 11:12:45 +00:00
|
|
|
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
|
|
|
|
|
|
|
|
|
|
|
|
+#if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */
|
|
|
|
+/**
|
|
|
|
+ * Bind to an existing interdomain event channel in another domain. Returns 0
|
|
|
|
+ * on success and stores the local port in *port. On error, returns -errno,
|
|
|
|
+ * switches the device to XenbusStateClosing, and saves the error in XenStore.
|
|
|
|
+ */
|
|
|
|
+int xenbus_bind_evtchn(struct xenbus_device *dev, int remote_port, int *port)
|
|
|
|
+{
|
|
|
|
+ struct evtchn_bind_interdomain bind_interdomain;
|
|
|
|
+ int err;
|
|
|
|
+
|
|
|
|
+ bind_interdomain.remote_dom = dev->otherend_id;
|
|
|
|
+ bind_interdomain.remote_port = remote_port;
|
|
|
|
+
|
|
|
|
+ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
|
|
|
|
+ &bind_interdomain);
|
|
|
|
+ if (err)
|
|
|
|
+ xenbus_dev_fatal(dev, err,
|
|
|
|
+ "binding to event channel %d from domain %d",
|
|
|
|
+ remote_port, dev->otherend_id);
|
|
|
|
+ else
|
|
|
|
+ *port = bind_interdomain.local_port;
|
|
|
|
+
|
|
|
|
+ return err;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_bind_evtchn);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Free an existing event channel. Returns 0 on success or -errno on error.
|
|
|
|
+ */
|
|
|
|
int xenbus_free_evtchn(struct xenbus_device *dev, int port)
|
|
|
|
{
|
|
|
|
struct evtchn_close close;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -270,6 +411,191 @@ int xenbus_free_evtchn(struct xenbus_dev
|
2010-07-07 11:12:45 +00:00
|
|
|
EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
|
|
|
|
|
|
|
|
|
|
|
|
+#if 0 /* !defined(CONFIG_XEN) && !defined(MODULE) */
|
|
|
|
+/**
|
|
|
|
+ * xenbus_map_ring_valloc
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @gnt_ref: grant reference
|
|
|
|
+ * @vaddr: pointer to address to be filled out by mapping
|
|
|
|
+ *
|
|
|
|
+ * Based on Rusty Russell's skeleton driver's map_page.
|
|
|
|
+ * Map a page of memory into this domain from another domain's grant table.
|
|
|
|
+ * xenbus_map_ring_valloc allocates a page of virtual address space, maps the
|
|
|
|
+ * page to that address, and sets *vaddr to that address.
|
|
|
|
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
|
|
|
|
+ * or -ENOMEM on error. If an error is returned, device will switch to
|
|
|
|
+ * XenbusStateClosing and the error message will be saved in XenStore.
|
|
|
|
+ */
|
|
|
|
+int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
|
|
|
|
+{
|
|
|
|
+ struct gnttab_map_grant_ref op = {
|
|
|
|
+ .flags = GNTMAP_host_map,
|
|
|
|
+ .ref = gnt_ref,
|
|
|
|
+ .dom = dev->otherend_id,
|
|
|
|
+ };
|
|
|
|
+ struct vm_struct *area;
|
|
|
|
+
|
|
|
|
+ *vaddr = NULL;
|
|
|
|
+
|
|
|
|
+ area = alloc_vm_area(PAGE_SIZE);
|
|
|
|
+ if (!area)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ op.host_addr = (unsigned long)area->addr;
|
|
|
|
+
|
|
|
|
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
|
|
|
|
+ BUG();
|
|
|
|
+
|
|
|
|
+ if (op.status != GNTST_okay) {
|
|
|
|
+ free_vm_area(area);
|
|
|
|
+ xenbus_dev_fatal(dev, op.status,
|
|
|
|
+ "mapping in shared page %d from domain %d",
|
|
|
|
+ gnt_ref, dev->otherend_id);
|
|
|
|
+ return op.status;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Stuff the handle in an unused field */
|
|
|
|
+ area->phys_addr = (unsigned long)op.handle;
|
|
|
|
+
|
|
|
|
+ *vaddr = area->addr;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * xenbus_map_ring
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @gnt_ref: grant reference
|
|
|
|
+ * @handle: pointer to grant handle to be filled
|
|
|
|
+ * @vaddr: address to be mapped to
|
|
|
|
+ *
|
|
|
|
+ * Map a page of memory into this domain from another domain's grant table.
|
|
|
|
+ * xenbus_map_ring does not allocate the virtual address space (you must do
|
|
|
|
+ * this yourself!). It only maps in the page to the specified address.
|
|
|
|
+ * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
|
|
|
|
+ * or -ENOMEM on error. If an error is returned, device will switch to
|
|
|
|
+ * XenbusStateClosing and the error message will be saved in XenStore.
|
|
|
|
+ */
|
|
|
|
+int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
|
|
|
|
+ grant_handle_t *handle, void *vaddr)
|
|
|
|
+{
|
|
|
|
+ struct gnttab_map_grant_ref op = {
|
|
|
|
+ .host_addr = (unsigned long)vaddr,
|
|
|
|
+ .flags = GNTMAP_host_map,
|
|
|
|
+ .ref = gnt_ref,
|
|
|
|
+ .dom = dev->otherend_id,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
|
|
|
|
+ BUG();
|
|
|
|
+
|
|
|
|
+ if (op.status != GNTST_okay) {
|
|
|
|
+ xenbus_dev_fatal(dev, op.status,
|
|
|
|
+ "mapping in shared page %d from domain %d",
|
|
|
|
+ gnt_ref, dev->otherend_id);
|
|
|
|
+ } else
|
|
|
|
+ *handle = op.handle;
|
|
|
|
+
|
|
|
|
+ return op.status;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_map_ring);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * xenbus_unmap_ring_vfree
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @vaddr: addr to unmap
|
|
|
|
+ *
|
|
|
|
+ * Based on Rusty Russell's skeleton driver's unmap_page.
|
|
|
|
+ * Unmap a page of memory in this domain that was imported from another domain.
|
|
|
|
+ * Use xenbus_unmap_ring_vfree if you mapped in your memory with
|
|
|
|
+ * xenbus_map_ring_valloc (it will free the virtual address space).
|
|
|
|
+ * Returns 0 on success and returns GNTST_* on error
|
|
|
|
+ * (see xen/include/interface/grant_table.h).
|
|
|
|
+ */
|
|
|
|
+int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
|
|
|
|
+{
|
|
|
|
+ struct vm_struct *area;
|
|
|
|
+ struct gnttab_unmap_grant_ref op = {
|
|
|
|
+ .host_addr = (unsigned long)vaddr,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
|
|
|
|
+ * method so that we don't have to muck with vmalloc internals here.
|
|
|
|
+ * We could force the user to hang on to their struct vm_struct from
|
|
|
|
+ * xenbus_map_ring_valloc, but these 6 lines considerably simplify
|
|
|
|
+ * this API.
|
|
|
|
+ */
|
|
|
|
+ read_lock(&vmlist_lock);
|
|
|
|
+ for (area = vmlist; area != NULL; area = area->next) {
|
|
|
|
+ if (area->addr == vaddr)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ read_unlock(&vmlist_lock);
|
|
|
|
+
|
|
|
|
+ if (!area) {
|
|
|
|
+ xenbus_dev_error(dev, -ENOENT,
|
|
|
|
+ "can't find mapped virtual address %p", vaddr);
|
|
|
|
+ return GNTST_bad_virt_addr;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ op.handle = (grant_handle_t)area->phys_addr;
|
|
|
|
+
|
|
|
|
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
|
|
|
|
+ BUG();
|
|
|
|
+
|
|
|
|
+ if (op.status == GNTST_okay)
|
|
|
|
+ free_vm_area(area);
|
|
|
|
+ else
|
|
|
|
+ xenbus_dev_error(dev, op.status,
|
|
|
|
+ "unmapping page at handle %d error %d",
|
|
|
|
+ (int16_t)area->phys_addr, op.status);
|
|
|
|
+
|
|
|
|
+ return op.status;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * xenbus_unmap_ring
|
|
|
|
+ * @dev: xenbus device
|
|
|
|
+ * @handle: grant handle
|
|
|
|
+ * @vaddr: addr to unmap
|
|
|
|
+ *
|
|
|
|
+ * Unmap a page of memory in this domain that was imported from another domain.
|
|
|
|
+ * Returns 0 on success and returns GNTST_* on error
|
|
|
|
+ * (see xen/include/interface/grant_table.h).
|
|
|
|
+ */
|
|
|
|
+int xenbus_unmap_ring(struct xenbus_device *dev,
|
|
|
|
+ grant_handle_t handle, void *vaddr)
|
|
|
|
+{
|
|
|
|
+ struct gnttab_unmap_grant_ref op = {
|
|
|
|
+ .host_addr = (unsigned long)vaddr,
|
|
|
|
+ .handle = handle,
|
|
|
|
+ };
|
|
|
|
+
|
|
|
|
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
|
|
|
|
+ BUG();
|
|
|
|
+
|
|
|
|
+ if (op.status != GNTST_okay)
|
|
|
|
+ xenbus_dev_error(dev, op.status,
|
|
|
|
+ "unmapping page at handle %d error %d",
|
|
|
|
+ handle, op.status);
|
|
|
|
+
|
|
|
|
+ return op.status;
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * xenbus_read_driver_state
|
|
|
|
+ * @path: path for driver
|
|
|
|
+ *
|
|
|
|
+ * Return the state of the driver rooted at the given store path, or
|
|
|
|
+ * XenbusStateUnknown if no state can be read.
|
|
|
|
+ */
|
|
|
|
enum xenbus_state xenbus_read_driver_state(const char *path)
|
|
|
|
{
|
|
|
|
enum xenbus_state result;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:32:16.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_comms.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -34,12 +34,15 @@
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
-#include <linux/ptrace.h>
|
|
|
|
-#include <linux/workqueue.h>
|
|
|
|
-#include <xen/evtchn.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
-
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
+#include <xen/evtchn.h>
|
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
+#else
|
|
|
|
+#include <asm/xen/hypervisor.h>
|
|
|
|
+#include <xen/events.h>
|
|
|
|
+#include <xen/page.h>
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
#include "xenbus_comms.h"
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -49,7 +52,6 @@
|
|
|
|
|
|
|
|
static int xenbus_irq;
|
|
|
|
|
|
|
|
-extern void xenbus_probe(struct work_struct *);
|
|
|
|
static DECLARE_WORK(probe_work, xenbus_probe);
|
|
|
|
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(xb_waitq);
|
|
|
|
@@ -112,6 +114,13 @@ static const void *get_input_chunk(XENST
|
2010-07-07 11:12:45 +00:00
|
|
|
return buf + MASK_XENSTORE_IDX(cons);
|
|
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * xb_write - low level write
|
|
|
|
+ * @data: buffer to send
|
|
|
|
+ * @len: length of buffer
|
|
|
|
+ *
|
|
|
|
+ * Returns 0 on success, error otherwise.
|
|
|
|
+ */
|
|
|
|
int xb_write(const void *data, unsigned len)
|
|
|
|
{
|
|
|
|
struct xenstore_domain_interface *intf = xen_store_interface;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -220,7 +229,9 @@ int xb_read(void *data, unsigned len)
|
2010-07-07 11:12:45 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
-/* Set up interrupt handler off store event channel. */
|
|
|
|
+/**
|
|
|
|
+ * xb_init_comms - Set up interrupt handler off store event channel.
|
|
|
|
+ */
|
|
|
|
int xb_init_comms(void)
|
|
|
|
{
|
|
|
|
struct xenstore_domain_interface *intf = xen_store_interface;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -240,7 +251,11 @@ int xb_init_comms(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
if (xenbus_irq)
|
|
|
|
unbind_from_irqhandler(xenbus_irq, &xb_waitq);
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
err = bind_caller_port_to_irqhandler(
|
|
|
|
+#else
|
|
|
|
+ err = bind_evtchn_to_irqhandler(
|
|
|
|
+#endif
|
|
|
|
xen_store_evtchn, wake_waiting,
|
|
|
|
0, "xenbus", &xb_waitq);
|
|
|
|
if (err <= 0) {
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -43,21 +43,26 @@
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
-#include <linux/module.h>
|
|
|
|
-#include <xen/gnttab.h>
|
|
|
|
+#include <linux/io.h>
|
|
|
|
|
|
|
|
-#include <asm/io.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
-#include <asm/maddr.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <xen/xenbus.h>
|
|
|
|
#include <xen/xen_proc.h>
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <xen/features.h>
|
|
|
|
+#include <xen/gnttab.h>
|
|
|
|
#ifdef MODULE
|
|
|
|
#include <xen/hvm.h>
|
|
|
|
#endif
|
|
|
|
+#else
|
|
|
|
+#include <asm/xen/hypervisor.h>
|
|
|
|
+#include <xen/xenbus.h>
|
|
|
|
+#include <xen/events.h>
|
|
|
|
+#include <xen/page.h>
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
#include "xenbus_comms.h"
|
|
|
|
#include "xenbus_probe.h"
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -170,7 +175,7 @@ static int read_backend_details(struct x
|
2010-07-07 11:12:45 +00:00
|
|
|
return read_otherend_details(xendev, "backend-id", "backend");
|
|
|
|
}
|
|
|
|
|
|
|
|
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
|
|
|
|
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) && (defined(CONFIG_XEN) || defined(MODULE))
|
|
|
|
static int xenbus_uevent_frontend(struct device *dev, char **envp,
|
|
|
|
int num_envp, char *buffer, int buffer_size)
|
|
|
|
{
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -209,12 +214,16 @@ static struct xen_bus_type xenbus_fronte
|
2010-07-07 11:12:45 +00:00
|
|
|
.probe = xenbus_dev_probe,
|
|
|
|
.remove = xenbus_dev_remove,
|
|
|
|
.shutdown = xenbus_dev_shutdown,
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
.uevent = xenbus_uevent_frontend,
|
|
|
|
#endif
|
|
|
|
+#endif
|
|
|
|
},
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
.dev = {
|
|
|
|
.bus_id = "xen",
|
|
|
|
},
|
|
|
|
+#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
static void otherend_changed(struct xenbus_watch *watch,
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -230,14 +239,15 @@ static void otherend_changed(struct xenb
|
2010-07-07 11:12:45 +00:00
|
|
|
if (!dev->otherend ||
|
|
|
|
strncmp(dev->otherend, vec[XS_WATCH_PATH],
|
|
|
|
strlen(dev->otherend))) {
|
|
|
|
- DPRINTK("Ignoring watch at %s", vec[XS_WATCH_PATH]);
|
|
|
|
+ dev_dbg(&dev->dev, "Ignoring watch at %s", vec[XS_WATCH_PATH]);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
state = xenbus_read_driver_state(dev->otherend);
|
|
|
|
|
|
|
|
- DPRINTK("state is %d (%s), %s, %s", state, xenbus_strstate(state),
|
|
|
|
- dev->otherend_watch.node, vec[XS_WATCH_PATH]);
|
|
|
|
+ dev_dbg(&dev->dev, "state is %d (%s), %s, %s",
|
|
|
|
+ state, xenbus_strstate(state), dev->otherend_watch.node,
|
|
|
|
+ vec[XS_WATCH_PATH]);
|
|
|
|
|
|
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
|
|
|
|
/*
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -274,8 +284,13 @@ static int talk_to_otherend(struct xenbu
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
static int watch_otherend(struct xenbus_device *dev)
|
|
|
|
{
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
return xenbus_watch_path2(dev, dev->otherend, "state",
|
|
|
|
&dev->otherend_watch, otherend_changed);
|
|
|
|
+#else
|
|
|
|
+ return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
|
|
|
|
+ "%s/%s", dev->otherend, "state");
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -301,9 +316,9 @@ int xenbus_dev_probe(struct device *_dev
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
err = talk_to_otherend(dev);
|
|
|
|
if (err) {
|
|
|
|
- printk(KERN_WARNING
|
|
|
|
- "xenbus_probe: talk_to_otherend on %s failed.\n",
|
|
|
|
- dev->nodename);
|
|
|
|
+ dev_warn(&dev->dev,
|
|
|
|
+ "xenbus_probe: talk_to_otherend on %s failed.\n",
|
|
|
|
+ dev->nodename);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -313,9 +328,9 @@ int xenbus_dev_probe(struct device *_dev
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
err = watch_otherend(dev);
|
|
|
|
if (err) {
|
|
|
|
- printk(KERN_WARNING
|
|
|
|
- "xenbus_probe: watch_otherend on %s failed.\n",
|
|
|
|
- dev->nodename);
|
|
|
|
+ dev_warn(&dev->dev,
|
|
|
|
+ "xenbus_probe: watch_otherend on %s failed.\n",
|
|
|
|
+ dev->nodename);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -360,8 +375,8 @@ static void xenbus_dev_shutdown(struct d
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
get_device(&dev->dev);
|
|
|
|
if (dev->state != XenbusStateConnected) {
|
|
|
|
- printk("%s: %s: %s != Connected, skipping\n", __FUNCTION__,
|
|
|
|
- dev->nodename, xenbus_strstate(dev->state));
|
|
|
|
+ dev_info(&dev->dev, "%s: %s: %s != Connected, skipping\n", __FUNCTION__,
|
|
|
|
+ dev->nodename, xenbus_strstate(dev->state));
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
xenbus_switch_state(dev, XenbusStateClosing);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -371,7 +386,8 @@ static void xenbus_dev_shutdown(struct d
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
timeout = wait_for_completion_timeout(&dev->down, timeout);
|
|
|
|
if (!timeout)
|
|
|
|
- printk("%s: %s timeout closing device\n", __FUNCTION__, dev->nodename);
|
|
|
|
+ dev_info(&dev->dev, "%s: %s timeout closing device\n",
|
|
|
|
+ __FUNCTION__, dev->nodename);
|
|
|
|
out:
|
|
|
|
put_device(&dev->dev);
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -559,7 +575,9 @@ int xenbus_probe_node(struct xen_bus_typ
|
2010-07-07 11:12:45 +00:00
|
|
|
xendev->devicetype = tmpstring;
|
|
|
|
init_completion(&xendev->down);
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
xendev->dev.parent = &bus->dev;
|
|
|
|
+#endif
|
|
|
|
xendev->dev.bus = &bus->bus;
|
|
|
|
xendev->dev.release = xenbus_dev_release;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -574,15 +592,16 @@ int xenbus_probe_node(struct xen_bus_typ
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
err = device_create_file(&xendev->dev, &dev_attr_nodename);
|
|
|
|
if (err)
|
|
|
|
- goto unregister;
|
|
|
|
+ goto fail_unregister;
|
|
|
|
+
|
|
|
|
err = device_create_file(&xendev->dev, &dev_attr_devtype);
|
|
|
|
if (err)
|
|
|
|
- goto unregister;
|
|
|
|
+ goto fail_remove_file;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
-unregister:
|
|
|
|
+fail_remove_file:
|
|
|
|
device_remove_file(&xendev->dev, &dev_attr_nodename);
|
|
|
|
- device_remove_file(&xendev->dev, &dev_attr_devtype);
|
|
|
|
+fail_unregister:
|
|
|
|
device_unregister(&xendev->dev);
|
|
|
|
fail:
|
|
|
|
kfree(xendev);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -598,7 +617,8 @@ static int xenbus_probe_frontend(const c
|
2010-07-07 11:12:45 +00:00
|
|
|
if (!strcmp(type, "console"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
- nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_frontend.root, type, name);
|
|
|
|
+ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
|
|
|
|
+ xenbus_frontend.root, type, name);
|
|
|
|
if (!nodename)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -676,7 +696,7 @@ static int strsep_len(const char *str, c
|
2010-07-07 11:12:45 +00:00
|
|
|
return (len == 0) ? i : -ERANGE;
|
|
|
|
}
|
|
|
|
|
|
|
|
-void dev_changed(const char *node, struct xen_bus_type *bus)
|
|
|
|
+void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
|
|
|
|
{
|
|
|
|
int exists, rootlen;
|
|
|
|
struct xenbus_device *dev;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -684,7 +704,7 @@ void dev_changed(const char *node, struc
|
2010-07-07 11:12:45 +00:00
|
|
|
const char *p, *root;
|
|
|
|
|
|
|
|
if (bus->error || char_count(node, '/') < 2)
|
|
|
|
- return;
|
|
|
|
+ return;
|
|
|
|
|
|
|
|
exists = xenbus_exists(XBT_NIL, node, "");
|
|
|
|
if (!exists) {
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -718,7 +738,7 @@ static void frontend_changed(struct xenb
|
2010-07-07 11:12:45 +00:00
|
|
|
{
|
|
|
|
DPRINTK("");
|
|
|
|
|
|
|
|
- dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
|
|
|
|
+ xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We watch for devices appearing and vanishing. */
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -943,6 +963,7 @@ static int xsd_port_read(char *page, cha
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
+#if defined(CONFIG_XEN_XENBUS_DEV) || defined(MODULE)
|
2010-07-07 11:12:45 +00:00
|
|
|
static int xb_free_port(evtchn_port_t port)
|
|
|
|
{
|
|
|
|
struct evtchn_close close;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -958,7 +979,7 @@ int xenbus_conn(domid_t remote_dom, unsi
|
|
|
|
BUG_ON(atomic_read(&xenbus_xsd_state) != XENBUS_XSD_FOREIGN_INIT);
|
|
|
|
BUG_ON(!is_initial_xendomain());
|
|
|
|
|
|
|
|
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_XEN_PRIVILEGED_GUEST)
|
|
|
|
+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
|
|
|
|
remove_xen_proc_entry("xsd_kva");
|
|
|
|
remove_xen_proc_entry("xsd_port");
|
|
|
|
#endif
|
|
|
|
@@ -998,11 +1019,18 @@ fail0:
|
2010-07-07 11:12:45 +00:00
|
|
|
xen_store_evtchn = -1;
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
-static int xenbus_probe_init(void)
|
|
|
|
+#ifndef MODULE
|
|
|
|
+static int __init xenbus_probe_init(void)
|
|
|
|
+#else
|
|
|
|
+static int __devinit xenbus_probe_init(void)
|
|
|
|
+#endif
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
unsigned long page = 0;
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
DPRINTK("");
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1021,6 +1049,7 @@ static int xenbus_probe_init(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
* Domain0 doesn't have a store_evtchn or store_mfn yet.
|
|
|
|
*/
|
|
|
|
if (is_initial_xendomain()) {
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
struct evtchn_alloc_unbound alloc_unbound;
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
/* Allocate Xenstore page */
|
|
|
|
@@ -1059,10 +1088,13 @@ static int xenbus_probe_init(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
if (xsd_port_intf)
|
|
|
|
xsd_port_intf->read_proc = xsd_port_read;
|
|
|
|
#endif
|
|
|
|
+#else
|
|
|
|
+ /* dom0 not yet supported */
|
|
|
|
+#endif
|
|
|
|
xen_store_interface = mfn_to_virt(xen_store_mfn);
|
|
|
|
} else {
|
|
|
|
atomic_set(&xenbus_xsd_state, XENBUS_XSD_FOREIGN_READY);
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
+#ifndef MODULE
|
|
|
|
xen_store_evtchn = xen_start_info->store_evtchn;
|
|
|
|
xen_store_mfn = xen_start_info->store_mfn;
|
|
|
|
xen_store_interface = mfn_to_virt(xen_store_mfn);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1078,7 +1110,9 @@ static int xenbus_probe_init(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
xenbus_dev_init();
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
/* Initialize the interface to xenstore. */
|
|
|
|
err = xs_init();
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1088,6 +1122,7 @@ static int xenbus_probe_init(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
/* Register ourselves with the kernel device subsystem */
|
|
|
|
if (!xenbus_frontend.error) {
|
|
|
|
xenbus_frontend.error = device_register(&xenbus_frontend.dev);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1098,6 +1133,7 @@ static int xenbus_probe_init(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
xenbus_frontend.error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
+#endif
|
|
|
|
xenbus_backend_device_register();
|
|
|
|
|
|
|
|
if (!is_initial_xendomain())
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1112,16 +1148,22 @@ static int xenbus_probe_init(void)
|
|
|
|
* registered.
|
|
|
|
*/
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
2011-04-19 20:09:59 +00:00
|
|
|
if (page != 0)
|
2010-07-07 11:12:45 +00:00
|
|
|
free_page(page);
|
|
|
|
+#endif
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
-#ifdef CONFIG_XEN
|
|
|
|
+#ifndef MODULE
|
|
|
|
postcore_initcall(xenbus_probe_init);
|
|
|
|
+#ifdef CONFIG_XEN
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
#else
|
|
|
|
-int xenbus_init(void)
|
|
|
|
+MODULE_LICENSE("GPL");
|
|
|
|
+#endif
|
|
|
|
+#else
|
|
|
|
+int __devinit xenbus_init(void)
|
|
|
|
{
|
|
|
|
return xenbus_probe_init();
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -62,7 +62,9 @@ struct xen_bus_type
|
|
|
|
int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
|
|
|
|
int (*probe)(const char *type, const char *dir);
|
|
|
|
struct bus_type bus;
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
struct device dev;
|
|
|
|
+#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
|
|
|
|
@@ -77,6 +79,6 @@ extern int xenbus_probe_node(struct xen_
|
|
|
|
const char *nodename);
|
|
|
|
extern int xenbus_probe_devices(struct xen_bus_type *bus);
|
|
|
|
|
|
|
|
-extern void dev_changed(const char *node, struct xen_bus_type *bus);
|
|
|
|
+extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
|
|
|
|
|
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_probe_backend.c 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -236,7 +236,7 @@ static void backend_changed(struct xenbu
|
|
|
|
{
|
|
|
|
DPRINTK("");
|
|
|
|
|
|
|
|
- dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
|
|
|
|
+ xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct xenbus_watch be_watch = {
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenbus/xenbus_xs.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -662,7 +662,9 @@ void unregister_xenbus_watch(struct xenb
|
2010-07-07 11:12:45 +00:00
|
|
|
char token[sizeof(watch) * 2 + 1];
|
|
|
|
int err;
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
BUG_ON(watch->flags & XBWF_new_thread);
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
sprintf(token, "%lX", (long)watch);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -681,6 +683,11 @@ void unregister_xenbus_watch(struct xenb
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
up_read(&xs_state.watch_mutex);
|
|
|
|
|
|
|
|
+ /* Make sure there are no callbacks running currently (unless
|
|
|
|
+ its us) */
|
|
|
|
+ if (current->pid != xenwatch_pid)
|
|
|
|
+ mutex_lock(&xenwatch_mutex);
|
|
|
|
+
|
|
|
|
/* Cancel pending watch events. */
|
|
|
|
spin_lock(&watch_events_lock);
|
|
|
|
list_for_each_entry_safe(msg, tmp, &watch_events, list) {
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -692,11 +699,8 @@ void unregister_xenbus_watch(struct xenb
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
spin_unlock(&watch_events_lock);
|
|
|
|
|
|
|
|
- /* Flush any currently-executing callback, unless we are it. :-) */
|
|
|
|
- if (current->pid != xenwatch_pid) {
|
|
|
|
- mutex_lock(&xenwatch_mutex);
|
|
|
|
+ if (current->pid != xenwatch_pid)
|
|
|
|
mutex_unlock(&xenwatch_mutex);
|
|
|
|
- }
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -734,6 +738,7 @@ void xs_suspend_cancel(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
mutex_unlock(&xs_state.transaction_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
static int xenwatch_handle_callback(void *data)
|
|
|
|
{
|
|
|
|
struct xs_stored_msg *msg = data;
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -751,6 +756,7 @@ static int xenwatch_handle_callback(void
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
static int xenwatch_thread(void *unused)
|
|
|
|
{
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -780,6 +786,7 @@ static int xenwatch_thread(void *unused)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
msg = list_entry(ent, struct xs_stored_msg, list);
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(MODULE)
|
|
|
|
/*
|
|
|
|
* Unlock the mutex before running an XBWF_new_thread
|
|
|
|
* handler. kthread_run can block which can deadlock
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -796,6 +803,15 @@ static int xenwatch_thread(void *unused)
|
2010-07-07 11:12:45 +00:00
|
|
|
xenwatch_handle_callback(msg);
|
|
|
|
mutex_unlock(&xenwatch_mutex);
|
|
|
|
}
|
|
|
|
+#else
|
|
|
|
+ msg->u.watch.handle->callback(
|
|
|
|
+ msg->u.watch.handle,
|
|
|
|
+ (const char **)msg->u.watch.vec,
|
|
|
|
+ msg->u.watch.vec_size);
|
|
|
|
+ mutex_unlock(&xenwatch_mutex);
|
|
|
|
+ kfree(msg->u.watch.vec);
|
|
|
|
+ kfree(msg);
|
|
|
|
+#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/drivers/xen/xenoprof/xenoprofile.c 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -26,7 +26,6 @@
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <xen/evtchn.h>
|
|
|
|
#include <xen/xenoprof.h>
|
|
|
|
-#include <xen/driver_util.h>
|
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
#include <xen/interface/xenoprof.h>
|
|
|
|
#include "../../../drivers/oprofile/cpu_buffer.h"
|
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -53,6 +53,8 @@ extern unsigned long __FIXADDR_TOP;
|
|
|
|
enum fixed_addresses {
|
|
|
|
FIX_HOLE,
|
|
|
|
FIX_VDSO,
|
|
|
|
+ FIX_DBGP_BASE,
|
|
|
|
+ FIX_EARLYCON_MEM_BASE,
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
|
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/highmem.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -75,8 +75,7 @@ struct page *kmap_atomic_to_page(void *p
|
|
|
|
|
|
|
|
#define kmap_atomic_pte(page, type) \
|
|
|
|
kmap_atomic_prot(page, type, \
|
|
|
|
- test_bit(PG_pinned, &(page)->flags) \
|
|
|
|
- ? PAGE_KERNEL_RO : kmap_prot)
|
|
|
|
+ PagePinned(page) ? PAGE_KERNEL_RO : kmap_prot)
|
|
|
|
|
|
|
|
#define flush_cache_kmaps() do { } while (0)
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/maddr_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -155,6 +155,7 @@ static inline paddr_t pte_machine_to_phy
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_PAE
|
|
|
|
#define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } )
|
|
|
|
+extern unsigned long long __supported_pte_mask;
|
|
|
|
static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
|
|
|
|
{
|
|
|
|
pte_t pte;
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -16,7 +16,7 @@ void mm_pin_all(void);
|
|
|
|
static inline void xen_activate_mm(struct mm_struct *prev,
|
|
|
|
struct mm_struct *next)
|
|
|
|
{
|
|
|
|
- if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
|
|
|
|
+ if (!PagePinned(virt_to_page(next->pgd)))
|
|
|
|
mm_pin(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
@@ -51,6 +51,8 @@ static inline void __prepare_arch_switch
|
|
|
|
: : "r" (0) );
|
|
|
|
}
|
|
|
|
|
|
|
|
+void leave_mm(unsigned long cpu);
|
|
|
|
+
|
|
|
|
static inline void switch_mm(struct mm_struct *prev,
|
|
|
|
struct mm_struct *next,
|
|
|
|
struct task_struct *tsk)
|
|
|
|
@@ -60,7 +62,7 @@ static inline void switch_mm(struct mm_s
|
|
|
|
|
|
|
|
if (likely(prev != next)) {
|
|
|
|
BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
|
|
|
|
- !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
|
|
|
|
+ !PagePinned(virt_to_page(next->pgd)));
|
|
|
|
|
|
|
|
/* stop flush ipis for the previous mm */
|
|
|
|
cpu_clear(cpu, prev->cpu_vm_mask);
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -5,7 +5,7 @@
|
|
|
|
#include <linux/mm.h> /* for struct page */
|
|
|
|
#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
|
|
|
|
|
|
|
|
-#define paravirt_alloc_pt(pfn) do { } while (0)
|
|
|
|
+#define paravirt_alloc_pt(mm, pfn) do { } while (0)
|
|
|
|
#define paravirt_alloc_pd(pfn) do { } while (0)
|
|
|
|
#define paravirt_alloc_pd(pfn) do { } while (0)
|
|
|
|
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
|
|
|
|
@@ -14,15 +14,15 @@
|
|
|
|
|
|
|
|
#define pmd_populate_kernel(mm, pmd, pte) \
|
|
|
|
do { \
|
|
|
|
- paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
|
|
|
|
+ paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
|
|
|
|
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define pmd_populate(mm, pmd, pte) \
|
|
|
|
do { \
|
|
|
|
unsigned long pfn = page_to_pfn(pte); \
|
|
|
|
- paravirt_alloc_pt(pfn); \
|
|
|
|
- if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
|
|
|
|
+ paravirt_alloc_pt(mm, pfn); \
|
|
|
|
+ if (PagePinned(virt_to_page((mm)->pgd))) { \
|
|
|
|
if (!PageHighMem(pte)) \
|
|
|
|
BUG_ON(HYPERVISOR_update_va_mapping( \
|
|
|
|
(unsigned long)__va(pfn << PAGE_SHIFT), \
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:07.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_32.h 2011-02-07 15:38:23.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -26,9 +26,6 @@
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
|
|
|
-/* Is this pagetable pinned? */
|
|
|
|
-#define PG_pinned PG_arch_1
|
|
|
|
-
|
|
|
|
struct vm_area_struct;
|
|
|
|
|
|
|
|
/*
|
|
|
|
@@ -82,7 +79,7 @@ void paging_init(void);
|
|
|
|
* area for the same reason. ;)
|
|
|
|
*/
|
|
|
|
#define VMALLOC_OFFSET (8*1024*1024)
|
|
|
|
-#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
|
|
|
|
+#define VMALLOC_START (((unsigned long) high_memory + \
|
|
|
|
2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
|
|
|
|
@@ -231,8 +228,6 @@ extern unsigned long pg0[];
|
|
|
|
* The following only work if pte_present() is true.
|
|
|
|
* Undefined behaviour if not..
|
|
|
|
*/
|
|
|
|
-static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
|
|
|
|
-static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
|
|
|
|
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
|
|
|
|
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
|
|
|
|
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
|
|
|
|
@@ -243,13 +238,9 @@ static inline int pte_huge(pte_t pte) {
|
|
|
|
*/
|
|
|
|
static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
|
|
|
|
|
|
|
|
-static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
|
|
|
|
-static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
|
|
|
|
static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
|
|
|
|
static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
|
|
|
|
static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
|
|
|
|
-static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
|
|
|
|
-static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
|
|
|
|
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
|
|
|
|
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
|
|
|
|
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
|
|
|
|
@@ -295,22 +286,20 @@ static inline pte_t xen_local_ptep_get_a
|
|
|
|
#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
|
|
|
|
({ \
|
|
|
|
int __changed = !pte_same(*(ptep), entry); \
|
|
|
|
- if (__changed && (dirty)) \
|
|
|
|
- ptep_establish(vma, address, ptep, entry); \
|
|
|
|
+ if (__changed && (dirty)) { \
|
|
|
|
+ if ( likely((vma)->vm_mm == current->mm) ) { \
|
|
|
|
+ BUG_ON(HYPERVISOR_update_va_mapping(address, \
|
|
|
|
+ entry, \
|
|
|
|
+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
|
|
|
|
+ UVMF_INVLPG|UVMF_MULTI)); \
|
|
|
|
+ } else { \
|
|
|
|
+ xen_l1_entry_update(ptep, entry); \
|
|
|
|
+ flush_tlb_page(vma, address); \
|
|
|
|
+ } \
|
|
|
|
+ } \
|
|
|
|
__changed; \
|
|
|
|
})
|
|
|
|
|
|
|
|
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
|
|
|
|
-#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
|
|
|
|
- int __ret = 0; \
|
|
|
|
- if (pte_dirty(*(ptep))) \
|
|
|
|
- __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
|
|
|
|
- &(ptep)->pte_low); \
|
|
|
|
- if (__ret) \
|
|
|
|
- pte_update((vma)->vm_mm, addr, ptep); \
|
|
|
|
- __ret; \
|
|
|
|
-})
|
|
|
|
-
|
|
|
|
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
|
|
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
|
|
|
|
int __ret = 0; \
|
|
|
|
@@ -322,44 +311,13 @@ static inline pte_t xen_local_ptep_get_a
|
|
|
|
__ret; \
|
|
|
|
})
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * Rules for using ptep_establish: the pte MUST be a user pte, and
|
|
|
|
- * must be a present->present transition.
|
|
|
|
- */
|
|
|
|
-#define __HAVE_ARCH_PTEP_ESTABLISH
|
|
|
|
-#define ptep_establish(vma, address, ptep, pteval) \
|
|
|
|
-do { \
|
|
|
|
- if ( likely((vma)->vm_mm == current->mm) ) { \
|
|
|
|
- BUG_ON(HYPERVISOR_update_va_mapping(address, \
|
|
|
|
- pteval, \
|
|
|
|
- (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
|
|
|
|
- UVMF_INVLPG|UVMF_MULTI)); \
|
|
|
|
- } else { \
|
|
|
|
- xen_l1_entry_update(ptep, pteval); \
|
|
|
|
- flush_tlb_page(vma, address); \
|
|
|
|
- } \
|
|
|
|
-} while (0)
|
|
|
|
-
|
|
|
|
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
|
|
|
|
-#define ptep_clear_flush_dirty(vma, address, ptep) \
|
|
|
|
-({ \
|
|
|
|
- pte_t __pte = *(ptep); \
|
|
|
|
- int __dirty = pte_dirty(__pte); \
|
|
|
|
- __pte = pte_mkclean(__pte); \
|
|
|
|
- if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
|
|
|
|
- (void)ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
|
|
|
|
- else if (__dirty) \
|
|
|
|
- (ptep)->pte_low = __pte.pte_low; \
|
|
|
|
- __dirty; \
|
|
|
|
-})
|
|
|
|
-
|
|
|
|
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
|
|
#define ptep_clear_flush_young(vma, address, ptep) \
|
|
|
|
({ \
|
|
|
|
pte_t __pte = *(ptep); \
|
|
|
|
int __young = pte_young(__pte); \
|
|
|
|
__pte = pte_mkold(__pte); \
|
|
|
|
- if (test_bit(PG_pinned, &virt_to_page((vma)->vm_mm->pgd)->flags)) \
|
|
|
|
+ if (PagePinned(virt_to_page((vma)->vm_mm->pgd))) \
|
|
|
|
(void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \
|
|
|
|
else if (__young) \
|
|
|
|
(ptep)->pte_low = __pte.pte_low; \
|
|
|
|
@@ -383,7 +341,7 @@ static inline pte_t ptep_get_and_clear(s
|
|
|
|
#define ptep_get_and_clear_full(mm, addr, ptep, full) \
|
|
|
|
((full) ? ({ \
|
|
|
|
pte_t __res = *(ptep); \
|
|
|
|
- if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
|
|
|
|
+ if (PagePinned(virt_to_page((mm)->pgd))) \
|
|
|
|
xen_l1_entry_update(ptep, __pte(0)); \
|
|
|
|
else \
|
|
|
|
*(ptep) = __pte(0); \
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable-3level.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -23,26 +23,11 @@
|
|
|
|
#define pud_present(pud) 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
- * Is the pte executable?
|
|
|
|
- */
|
|
|
|
-static inline int pte_x(pte_t pte)
|
|
|
|
-{
|
|
|
|
- return !(__pte_val(pte) & _PAGE_NX);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * All present user-pages with !NX bit are user-executable:
|
|
|
|
- */
|
|
|
|
-static inline int pte_exec(pte_t pte)
|
|
|
|
-{
|
|
|
|
- return pte_user(pte) && pte_x(pte);
|
|
|
|
-}
|
|
|
|
-/*
|
|
|
|
* All present pages with !NX bit are kernel-executable:
|
|
|
|
*/
|
|
|
|
static inline int pte_exec_kernel(pte_t pte)
|
|
|
|
{
|
|
|
|
- return pte_x(pte);
|
|
|
|
+ return !(__pte_val(pte) & _PAGE_NX);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Rules for using set_pte: the pte being assigned *must* be
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
|
|
|
|
#define X86_VENDOR_UMC 3
|
|
|
|
#define X86_VENDOR_NEXGEN 4
|
|
|
|
#define X86_VENDOR_CENTAUR 5
|
|
|
|
-#define X86_VENDOR_RISE 6
|
|
|
|
#define X86_VENDOR_TRANSMETA 7
|
|
|
|
#define X86_VENDOR_NSC 8
|
|
|
|
#define X86_VENDOR_NUM 9
|
|
|
|
@@ -122,6 +121,7 @@ void __init cpu_detect(struct cpuinfo_x8
|
|
|
|
extern void identify_boot_cpu(void);
|
|
|
|
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
|
|
|
extern void print_cpu_info(struct cpuinfo_x86 *);
|
|
|
|
+extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
|
|
|
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
|
|
|
extern unsigned short num_cache_leaves;
|
|
|
|
|
|
|
|
@@ -171,17 +171,6 @@ static inline void clear_in_cr4 (unsigne
|
|
|
|
write_cr4(cr4);
|
|
|
|
}
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * NSC/Cyrix CPU indexed register access macros
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
|
|
|
|
-
|
|
|
|
-#define setCx86(reg, data) do { \
|
|
|
|
- outb((reg), 0x22); \
|
|
|
|
- outb((data), 0x23); \
|
|
|
|
-} while (0)
|
|
|
|
-
|
|
|
|
/* Stop speculative execution */
|
|
|
|
static inline void sync_core(void)
|
|
|
|
{
|
|
|
|
@@ -230,6 +219,10 @@ extern int bootloader_type;
|
|
|
|
|
|
|
|
#define HAVE_ARCH_PICK_MMAP_LAYOUT
|
|
|
|
|
|
|
|
+extern void hard_disable_TSC(void);
|
|
|
|
+extern void disable_TSC(void);
|
|
|
|
+extern void hard_enable_TSC(void);
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* Size of io_bitmap.
|
|
|
|
*/
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -205,11 +205,6 @@ static inline unsigned long get_limit(un
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * Actually only lfence would be needed for mb() because all stores done
|
|
|
|
- * by the kernel should be already ordered. But keep a full barrier for now.
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
|
|
|
|
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
|
|
|
|
|
|
|
|
@@ -301,15 +296,6 @@ void enable_hlt(void);
|
|
|
|
extern int es7000_plat;
|
|
|
|
void cpu_idle_wait(void);
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * On SMP systems, when the scheduler does migration-cost autodetection,
|
|
|
|
- * it needs a way to flush as much of the CPU's caches as possible:
|
|
|
|
- */
|
|
|
|
-static inline void sched_cacheflush(void)
|
|
|
|
-{
|
|
|
|
- wbinvd();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
extern unsigned long arch_align_stack(unsigned long sp);
|
|
|
|
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush_32.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -91,7 +91,11 @@ struct tlb_state
|
|
|
|
DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
|
|
|
|
#endif /* SMP */
|
|
|
|
|
|
|
|
-#define flush_tlb_kernel_range(start, end) flush_tlb_all()
|
|
|
|
+static inline void flush_tlb_kernel_range(unsigned long start,
|
|
|
|
+ unsigned long end)
|
|
|
|
+{
|
|
|
|
+ flush_tlb_all();
|
|
|
|
+}
|
|
|
|
|
|
|
|
static inline void flush_tlb_pgtables(struct mm_struct *mm,
|
|
|
|
unsigned long start, unsigned long end)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/fixmap_64.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -23,9 +23,9 @@
|
|
|
|
* compile time, but to set the physical address only
|
|
|
|
* in the boot process.
|
|
|
|
*
|
|
|
|
- * these 'compile-time allocated' memory buffers are
|
|
|
|
- * fixed-size 4k pages. (or larger if used with an increment
|
|
|
|
- * highger than 1) use fixmap_set(idx,phys) to associate
|
|
|
|
+ * These 'compile-time allocated' memory buffers are
|
|
|
|
+ * fixed-size 4k pages (or larger if used with an increment
|
|
|
|
+ * higher than 1). Use set_fixmap(idx,phys) to associate
|
|
|
|
* physical memory with fixmap indices.
|
|
|
|
*
|
|
|
|
* TLB entries of such buffers will not be flushed across
|
|
|
|
@@ -36,6 +36,8 @@ enum fixed_addresses {
|
|
|
|
VSYSCALL_LAST_PAGE,
|
|
|
|
VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
|
|
|
|
VSYSCALL_HPET,
|
|
|
|
+ FIX_DBGP_BASE,
|
|
|
|
+ FIX_EARLYCON_MEM_BASE,
|
|
|
|
FIX_HPET_BASE,
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
|
|
|
|
@@ -105,7 +107,7 @@ static __always_inline unsigned long fix
|
|
|
|
if (idx >= __end_of_fixed_addresses)
|
|
|
|
__this_fixmap_does_not_exist();
|
|
|
|
|
|
|
|
- return __fix_to_virt(idx);
|
|
|
|
+ return __fix_to_virt(idx);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/mmu_context_64.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -76,7 +76,7 @@ static inline void switch_mm(struct mm_s
|
|
|
|
|
|
|
|
if (likely(prev != next)) {
|
|
|
|
BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
|
|
|
|
- !next->context.pinned);
|
|
|
|
+ !PagePinned(virt_to_page(next->pgd)));
|
|
|
|
|
|
|
|
/* stop flush ipis for the previous mm */
|
|
|
|
cpu_clear(cpu, prev->cpu_vm_mask);
|
|
|
|
@@ -131,7 +131,7 @@ static inline void switch_mm(struct mm_s
|
|
|
|
|
|
|
|
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
|
|
|
{
|
|
|
|
- if (!next->context.pinned)
|
|
|
|
+ if (!PagePinned(virt_to_page(next->pgd)))
|
|
|
|
mm_pin(next);
|
|
|
|
switch_mm(prev, next, NULL);
|
|
|
|
}
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgalloc_64.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -21,7 +21,7 @@ static inline void pmd_populate_kernel(s
|
|
|
|
|
|
|
|
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
|
|
|
|
{
|
|
|
|
- if (unlikely((mm)->context.pinned)) {
|
|
|
|
+ if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) {
|
|
|
|
BUG_ON(HYPERVISOR_update_va_mapping(
|
|
|
|
(unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
|
|
|
|
pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
|
|
|
|
@@ -33,7 +33,7 @@ static inline void pmd_populate(struct m
|
|
|
|
|
|
|
|
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
|
|
|
{
|
|
|
|
- if (unlikely((mm)->context.pinned)) {
|
|
|
|
+ if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) {
|
|
|
|
BUG_ON(HYPERVISOR_update_va_mapping(
|
|
|
|
(unsigned long)pmd,
|
|
|
|
pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
|
|
|
|
@@ -50,7 +50,7 @@ static inline void pud_populate(struct m
|
|
|
|
*/
|
|
|
|
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
|
|
|
{
|
|
|
|
- if (unlikely((mm)->context.pinned)) {
|
|
|
|
+ if (unlikely(PagePinned(virt_to_page((mm)->pgd)))) {
|
|
|
|
BUG_ON(HYPERVISOR_update_va_mapping(
|
|
|
|
(unsigned long)pud,
|
|
|
|
pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:08.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/pgtable_64.h 2011-02-07 15:38:18.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -304,7 +304,7 @@ static inline pte_t ptep_get_and_clear_f
|
|
|
|
{
|
|
|
|
if (full) {
|
|
|
|
pte_t pte = *ptep;
|
|
|
|
- if (mm->context.pinned)
|
|
|
|
+ if (PagePinned(virt_to_page(mm->pgd)))
|
|
|
|
xen_l1_entry_update(ptep, __pte(0));
|
|
|
|
else
|
|
|
|
*ptep = __pte(0);
|
|
|
|
@@ -333,21 +333,15 @@ static inline pte_t ptep_get_and_clear_f
|
|
|
|
* Undefined behaviour if not..
|
|
|
|
*/
|
|
|
|
#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
|
|
|
|
-static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
|
|
|
|
-static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
|
|
|
|
-static inline int pte_exec(pte_t pte) { return !(__pte_val(pte) & _PAGE_NX); }
|
|
|
|
static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
|
|
|
|
static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
|
|
|
|
static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
|
|
|
|
static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
|
|
|
|
static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
|
|
|
|
|
|
|
|
-static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
|
|
|
|
-static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
|
|
|
|
static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
|
|
|
|
static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
|
|
|
|
static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
|
|
|
|
-static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
|
|
|
|
static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) &= ~_PAGE_NX; return pte; }
|
|
|
|
static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
|
|
|
|
static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
|
|
|
|
@@ -355,13 +349,6 @@ static inline pte_t pte_mkwrite(pte_t pt
|
|
|
|
static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
|
|
|
|
static inline pte_t pte_clrhuge(pte_t pte) { __pte_val(pte) &= ~_PAGE_PSE; return pte; }
|
|
|
|
|
|
|
|
-static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
|
|
|
-{
|
|
|
|
- if (!pte_dirty(*ptep))
|
|
|
|
- return 0;
|
|
|
|
- return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
|
|
|
{
|
|
|
|
if (!pte_young(*ptep))
|
|
|
|
@@ -500,26 +487,13 @@ static inline pte_t pte_modify(pte_t pte
|
|
|
|
__changed; \
|
|
|
|
})
|
|
|
|
|
|
|
|
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
|
|
|
|
-#define ptep_clear_flush_dirty(vma, address, ptep) \
|
|
|
|
-({ \
|
|
|
|
- pte_t __pte = *(ptep); \
|
|
|
|
- int __dirty = pte_dirty(__pte); \
|
|
|
|
- __pte = pte_mkclean(__pte); \
|
|
|
|
- if ((vma)->vm_mm->context.pinned) \
|
|
|
|
- (void)ptep_set_access_flags(vma, address, ptep, __pte, __dirty); \
|
|
|
|
- else if (__dirty) \
|
|
|
|
- set_pte(ptep, __pte); \
|
|
|
|
- __dirty; \
|
|
|
|
-})
|
|
|
|
-
|
|
|
|
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
|
|
|
|
#define ptep_clear_flush_young(vma, address, ptep) \
|
|
|
|
({ \
|
|
|
|
pte_t __pte = *(ptep); \
|
|
|
|
int __young = pte_young(__pte); \
|
|
|
|
__pte = pte_mkold(__pte); \
|
|
|
|
- if ((vma)->vm_mm->context.pinned) \
|
|
|
|
+ if (PagePinned(virt_to_page((vma)->vm_mm->pgd))) \
|
|
|
|
(void)ptep_set_access_flags(vma, address, ptep, __pte, __young); \
|
|
|
|
else if (__young) \
|
|
|
|
set_pte(ptep, __pte); \
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -566,6 +540,8 @@ int xen_change_pte_range(struct mm_struc
|
2010-07-07 11:12:45 +00:00
|
|
|
#define arch_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable) \
|
|
|
|
xen_change_pte_range(mm, pmd, addr, end, newprot, dirty_accountable)
|
|
|
|
|
|
|
|
+pte_t *lookup_address(unsigned long addr);
|
|
|
|
+
|
|
|
|
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
|
|
|
direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -583,7 +559,6 @@ int xen_change_pte_range(struct mm_struc
|
2010-07-07 11:12:45 +00:00
|
|
|
(((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
|
|
|
|
|
|
|
|
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
|
|
|
|
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
|
|
|
|
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
|
|
|
|
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
|
|
|
|
#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/processor_64.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -83,7 +83,6 @@ struct cpuinfo_x86 {
|
|
|
|
#define X86_VENDOR_UMC 3
|
|
|
|
#define X86_VENDOR_NEXGEN 4
|
|
|
|
#define X86_VENDOR_CENTAUR 5
|
|
|
|
-#define X86_VENDOR_RISE 6
|
|
|
|
#define X86_VENDOR_TRANSMETA 7
|
|
|
|
#define X86_VENDOR_NUM 8
|
|
|
|
#define X86_VENDOR_UNKNOWN 0xff
|
|
|
|
@@ -100,6 +99,7 @@ extern char ignore_irq13;
|
|
|
|
|
|
|
|
extern void identify_cpu(struct cpuinfo_x86 *);
|
|
|
|
extern void print_cpu_info(struct cpuinfo_x86 *);
|
|
|
|
+extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
|
|
|
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
|
|
|
extern unsigned short num_cache_leaves;
|
|
|
|
|
|
|
|
@@ -377,12 +377,10 @@ static inline void sync_core(void)
|
|
|
|
asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
|
|
|
|
}
|
|
|
|
|
|
|
|
-#define cpu_has_fpu 1
|
|
|
|
-
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
|
|
static inline void prefetch(void *x)
|
|
|
|
{
|
|
|
|
- asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
|
|
|
|
+ asm volatile("prefetcht0 (%0)" :: "r" (x));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_HAS_PREFETCHW 1
|
|
|
|
@@ -400,17 +398,6 @@ static inline void prefetchw(void *x)
|
|
|
|
|
|
|
|
#define cpu_relax() rep_nop()
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * NSC/Cyrix CPU indexed register access macros
|
|
|
|
- */
|
|
|
|
-
|
|
|
|
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
|
|
|
|
-
|
|
|
|
-#define setCx86(reg, data) do { \
|
|
|
|
- outb((reg), 0x22); \
|
|
|
|
- outb((data), 0x23); \
|
|
|
|
-} while (0)
|
|
|
|
-
|
|
|
|
static inline void serialize_cpu(void)
|
|
|
|
{
|
|
|
|
__asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/system_64.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -79,12 +79,16 @@ static inline unsigned long read_cr0(voi
|
|
|
|
unsigned long cr0;
|
|
|
|
asm volatile("movq %%cr0,%0" : "=r" (cr0));
|
|
|
|
return cr0;
|
|
|
|
-}
|
|
|
|
+}
|
|
|
|
|
|
|
|
static inline void write_cr0(unsigned long val)
|
|
|
|
{
|
|
|
|
asm volatile("movq %0,%%cr0" :: "r" (val));
|
|
|
|
-}
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define read_cr2() current_vcpu_info()->arch.cr2
|
|
|
|
+
|
|
|
|
+#define write_cr2(val) ((void)(current_vcpu_info()->arch.cr2 = (val)))
|
|
|
|
|
|
|
|
#define read_cr3() ({ \
|
|
|
|
unsigned long __dummy; \
|
|
|
|
@@ -103,27 +107,28 @@ static inline unsigned long read_cr4(voi
|
|
|
|
unsigned long cr4;
|
|
|
|
asm("movq %%cr4,%0" : "=r" (cr4));
|
|
|
|
return cr4;
|
|
|
|
-}
|
|
|
|
+}
|
|
|
|
|
|
|
|
static inline void write_cr4(unsigned long val)
|
|
|
|
{
|
|
|
|
asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-#define stts() (HYPERVISOR_fpu_taskswitch(1))
|
|
|
|
+}
|
|
|
|
|
|
|
|
-#define wbinvd() \
|
|
|
|
- __asm__ __volatile__ ("wbinvd": : :"memory");
|
|
|
|
+static inline unsigned long read_cr8(void)
|
|
|
|
+{
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
|
|
|
|
-/*
|
|
|
|
- * On SMP systems, when the scheduler does migration-cost autodetection,
|
|
|
|
- * it needs a way to flush as much of the CPU's caches as possible.
|
|
|
|
- */
|
|
|
|
-static inline void sched_cacheflush(void)
|
|
|
|
+static inline void write_cr8(unsigned long val)
|
|
|
|
{
|
|
|
|
- wbinvd();
|
|
|
|
+ BUG_ON(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
+#define stts() (HYPERVISOR_fpu_taskswitch(1))
|
|
|
|
+
|
|
|
|
+#define wbinvd() \
|
|
|
|
+ __asm__ __volatile__ ("wbinvd": : :"memory")
|
|
|
|
+
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
|
|
|
|
#define nop() __asm__ __volatile__ ("nop")
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:32:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/mach-xen/asm/tlbflush_64.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -89,7 +89,11 @@ static inline void flush_tlb_range(struc
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-#define flush_tlb_kernel_range(start, end) flush_tlb_all()
|
|
|
|
+static inline void flush_tlb_kernel_range(unsigned long start,
|
|
|
|
+ unsigned long end)
|
|
|
|
+{
|
|
|
|
+ flush_tlb_all();
|
|
|
|
+}
|
|
|
|
|
|
|
|
static inline void flush_tlb_pgtables(struct mm_struct *mm,
|
|
|
|
unsigned long start, unsigned long end)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/asm/thread_info.h 2011-01-31 17:02:29.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/asm/thread_info.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -151,7 +151,8 @@ struct thread_info {
|
2010-07-07 11:12:45 +00:00
|
|
|
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
|
|
|
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW|_TIF_DEBUG)
|
|
|
|
#else
|
|
|
|
-#define _TIF_WORK_CTXSW _TIF_DEBUG
|
|
|
|
+#define _TIF_WORK_CTXSW_NEXT (_TIF_NOTSC | _TIF_DEBUG)
|
|
|
|
+#define _TIF_WORK_CTXSW_PREV (_TIF_NOTSC)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define PREEMPT_ACTIVE 0x10000000
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/arch/x86/include/asm/xen/interface.h 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/arch/x86/include/asm/xen/interface.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -10,17 +10,20 @@
|
|
|
|
#define _ASM_X86_XEN_INTERFACE_H
|
|
|
|
|
|
|
|
#ifdef __XEN__
|
|
|
|
-#define __DEFINE_GUEST_HANDLE(name, type) \
|
|
|
|
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
|
|
|
|
typedef struct { type *p; } __guest_handle_ ## name
|
|
|
|
#else
|
|
|
|
-#define __DEFINE_GUEST_HANDLE(name, type) \
|
|
|
|
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
|
|
|
|
typedef type * __guest_handle_ ## name
|
|
|
|
#endif
|
|
|
|
|
|
|
|
+#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
|
|
|
|
+ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
|
|
|
|
+ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
|
|
|
|
#define DEFINE_GUEST_HANDLE_STRUCT(name) \
|
|
|
|
- __DEFINE_GUEST_HANDLE(name, struct name)
|
|
|
|
-#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name)
|
|
|
|
-#define GUEST_HANDLE(name) __guest_handle_ ## name
|
|
|
|
+ __DEFINE_XEN_GUEST_HANDLE(name, struct name)
|
|
|
|
+#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
|
|
|
|
+#define XEN_GUEST_HANDLE(name) __guest_handle_ ## name
|
|
|
|
|
|
|
|
#ifdef __XEN__
|
|
|
|
#if defined(__i386__)
|
|
|
|
@@ -47,14 +50,8 @@
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
-/* Guest handles for primitive C types. */
|
|
|
|
-__DEFINE_GUEST_HANDLE(uchar, unsigned char);
|
|
|
|
-__DEFINE_GUEST_HANDLE(uint, unsigned int);
|
|
|
|
-__DEFINE_GUEST_HANDLE(ulong, unsigned long);
|
|
|
|
-DEFINE_GUEST_HANDLE(char);
|
|
|
|
-DEFINE_GUEST_HANDLE(int);
|
|
|
|
-DEFINE_GUEST_HANDLE(long);
|
|
|
|
-DEFINE_GUEST_HANDLE(void);
|
|
|
|
+typedef unsigned long xen_pfn_t;
|
|
|
|
+typedef unsigned long xen_ulong_t;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef HYPERVISOR_VIRT_START
|
|
|
|
@@ -66,7 +63,7 @@ DEFINE_GUEST_HANDLE(void);
|
2011-04-19 20:09:59 +00:00
|
|
|
#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
/* Maximum number of virtual CPUs in multi-processor guests. */
|
|
|
|
-#define MAX_VIRT_CPUS 32
|
|
|
|
+#define XEN_LEGACY_MAX_VCPUS 32
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SEGMENT DESCRIPTOR TABLES
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/linux/elfnote.h 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/linux/elfnote.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -52,7 +52,7 @@
|
|
|
|
4484:.balign 4 ; \
|
|
|
|
.popsection ;
|
|
|
|
|
|
|
|
-#define ELFNOTE(name, type, desc) \
|
|
|
|
+#define ELFNOTE(name, type, desc...) \
|
|
|
|
ELFNOTE_START(name, type, "") \
|
|
|
|
desc ; \
|
|
|
|
ELFNOTE_END
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/linux/page-flags.h 2011-01-31 17:01:49.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/linux/page-flags.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -125,8 +125,15 @@ enum pageflags {
|
2010-07-07 11:12:45 +00:00
|
|
|
PG_fscache = PG_private_2, /* page backed by cache */
|
|
|
|
|
|
|
|
/* XEN */
|
|
|
|
+#ifdef CONFIG_XEN
|
|
|
|
+ PG_pinned = PG_locked, /* Cannot alias with PG_owner_priv_1 since
|
|
|
|
+ * bad_page() checks should include this bit.
|
|
|
|
+ * Also cannot use PG_arch_1 since that now
|
|
|
|
+ * has a different purpose on x86. */
|
|
|
|
+#else
|
|
|
|
PG_pinned = PG_owner_priv_1,
|
|
|
|
PG_savepinned = PG_dirty,
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
/* SLOB */
|
|
|
|
PG_slob_free = PG_private,
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -491,10 +498,8 @@ static inline int PageTransCompound(stru
|
|
|
|
#define __PG_COMPOUND_LOCK 0
|
2010-07-07 11:12:45 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
-#if !defined(CONFIG_XEN)
|
|
|
|
+#ifndef CONFIG_XEN
|
|
|
|
# define __PG_XEN 0
|
|
|
|
-#elif defined(CONFIG_X86)
|
|
|
|
-# define __PG_XEN ((1 << PG_pinned) | (1 << PG_foreign))
|
|
|
|
#else
|
|
|
|
# define __PG_XEN (1 << PG_foreign)
|
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200
|
|
|
|
+++ head-2011-03-17/include/xen/driver_util.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -1,14 +1,8 @@
|
|
|
|
+#ifndef __XEN_DRIVER_UTIL_H__
|
|
|
|
+#define __XEN_DRIVER_UTIL_H__
|
|
|
|
|
|
|
|
-#ifndef __ASM_XEN_DRIVER_UTIL_H__
|
|
|
|
-#define __ASM_XEN_DRIVER_UTIL_H__
|
|
|
|
-
|
|
|
|
-#include <linux/vmalloc.h>
|
2010-07-07 11:12:45 +00:00
|
|
|
#include <linux/device.h>
|
|
|
|
|
|
|
|
-/* Allocate/destroy a 'vmalloc' VM area. */
|
|
|
|
-extern struct vm_struct *alloc_vm_area(unsigned long size);
|
|
|
|
-extern void free_vm_area(struct vm_struct *area);
|
|
|
|
-
|
|
|
|
extern struct class *get_xen_class(void);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
-#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
|
|
|
|
+#endif /* __XEN_DRIVER_UTIL_H__ */
|
|
|
|
--- head-2011-03-17.orig/include/xen/features.h 2011-03-17 13:45:28.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/features.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -10,6 +10,7 @@
|
|
|
|
#define __XEN_FEATURES_H__
|
|
|
|
|
|
|
|
#include <xen/interface/features.h>
|
|
|
|
+#include <xen/interface/version.h>
|
|
|
|
|
|
|
|
void xen_setup_features(void);
|
|
|
|
|
|
|
|
@@ -20,4 +21,4 @@ static inline int xen_feature(int flag)
|
|
|
|
return xen_features[flag];
|
|
|
|
}
|
|
|
|
|
|
|
|
-#endif /* __ASM_XEN_FEATURES_H__ */
|
|
|
|
+#endif /* __XEN_FEATURES_H__ */
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/arch-x86/xen.h 2011-03-17 13:50:24.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/arch-x86/xen.h 2011-03-17 14:11:48.000000000 +0100
|
|
|
|
@@ -50,6 +50,9 @@
|
2010-07-07 11:12:45 +00:00
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
#define set_xen_guest_handle(hnd, val) set_xen_guest_handle_raw(hnd, val)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
+/* Allow co-existing Linux 2.6.23+ Xen interface definitions. */
|
|
|
|
+#define DEFINE_GUEST_HANDLE_STRUCT(name) struct name
|
|
|
|
+
|
|
|
|
#if defined(__i386__)
|
|
|
|
#include "xen-x86_32.h"
|
|
|
|
#elif defined(__x86_64__)
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/event_channel.h 2011-01-31 15:14:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/event_channel.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -250,6 +250,7 @@ struct evtchn_op {
|
|
|
|
struct evtchn_unmask unmask;
|
|
|
|
} u;
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(evtchn_op);
|
|
|
|
typedef struct evtchn_op evtchn_op_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(evtchn_op_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/io/netif.h 2011-01-31 15:14:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/io/netif.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -183,8 +183,22 @@ typedef struct netif_rx_response netif_r
|
|
|
|
* Generate netif ring structures and types.
|
|
|
|
*/
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H)
|
|
|
|
DEFINE_RING_TYPES(netif_tx, struct netif_tx_request, struct netif_tx_response);
|
|
|
|
DEFINE_RING_TYPES(netif_rx, struct netif_rx_request, struct netif_rx_response);
|
|
|
|
+#else
|
|
|
|
+#define xen_netif_tx_request netif_tx_request
|
|
|
|
+#define xen_netif_rx_request netif_rx_request
|
|
|
|
+#define xen_netif_tx_response netif_tx_response
|
|
|
|
+#define xen_netif_rx_response netif_rx_response
|
|
|
|
+DEFINE_RING_TYPES(xen_netif_tx,
|
|
|
|
+ struct xen_netif_tx_request,
|
|
|
|
+ struct xen_netif_tx_response);
|
|
|
|
+DEFINE_RING_TYPES(xen_netif_rx,
|
|
|
|
+ struct xen_netif_rx_request,
|
|
|
|
+ struct xen_netif_rx_response);
|
|
|
|
+#define xen_netif_extra_info netif_extra_info
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
#define NETIF_RSP_DROPPED -2
|
|
|
|
#define NETIF_RSP_ERROR -1
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/memory.h 2011-01-31 15:14:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/memory.h 2011-01-31 17:49:31.000000000 +0100
|
|
|
|
@@ -88,6 +88,7 @@ struct xen_memory_reservation {
|
2010-07-07 11:12:45 +00:00
|
|
|
*/
|
|
|
|
domid_t domid;
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(xen_memory_reservation);
|
|
|
|
typedef struct xen_memory_reservation xen_memory_reservation_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -181,6 +182,7 @@ struct xen_machphys_mfn_list {
|
2010-07-07 11:12:45 +00:00
|
|
|
*/
|
|
|
|
unsigned int nr_extents;
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
|
|
|
|
typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -222,6 +224,7 @@ struct xen_add_to_physmap {
|
2010-07-07 11:12:45 +00:00
|
|
|
/* GPFN where the source mapping page should appear. */
|
|
|
|
xen_pfn_t gpfn;
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap);
|
|
|
|
typedef struct xen_add_to_physmap xen_add_to_physmap_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/sched.h 2011-01-31 15:14:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/sched.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -67,6 +67,7 @@
|
|
|
|
struct sched_shutdown {
|
|
|
|
unsigned int reason; /* SHUTDOWN_* */
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(sched_shutdown);
|
|
|
|
typedef struct sched_shutdown sched_shutdown_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
|
|
|
|
|
|
|
|
@@ -81,6 +82,7 @@ struct sched_poll {
|
|
|
|
unsigned int nr_ports;
|
|
|
|
uint64_t timeout;
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(sched_poll);
|
|
|
|
typedef struct sched_poll sched_poll_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/version.h 2011-01-31 15:14:12.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/version.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -36,6 +36,9 @@
|
|
|
|
/* arg == xen_extraversion_t. */
|
|
|
|
#define XENVER_extraversion 1
|
|
|
|
typedef char xen_extraversion_t[16];
|
|
|
|
+struct xen_extraversion {
|
|
|
|
+ xen_extraversion_t extraversion;
|
|
|
|
+};
|
|
|
|
#define XEN_EXTRAVERSION_LEN (sizeof(xen_extraversion_t))
|
|
|
|
|
|
|
|
/* arg == xen_compile_info_t. */
|
|
|
|
@@ -50,10 +53,16 @@ typedef struct xen_compile_info xen_comp
|
|
|
|
|
|
|
|
#define XENVER_capabilities 3
|
|
|
|
typedef char xen_capabilities_info_t[1024];
|
|
|
|
+struct xen_capabilities_info {
|
|
|
|
+ xen_capabilities_info_t info;
|
|
|
|
+};
|
|
|
|
#define XEN_CAPABILITIES_INFO_LEN (sizeof(xen_capabilities_info_t))
|
|
|
|
|
|
|
|
#define XENVER_changeset 4
|
|
|
|
typedef char xen_changeset_info_t[64];
|
|
|
|
+struct xen_changeset_info {
|
|
|
|
+ xen_changeset_info_t info;
|
|
|
|
+};
|
|
|
|
#define XEN_CHANGESET_INFO_LEN (sizeof(xen_changeset_info_t))
|
|
|
|
|
|
|
|
#define XENVER_platform_parameters 5
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/interface/xen.h 2011-03-17 13:50:24.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/interface/xen.h 2011-03-17 14:12:10.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -32,7 +32,9 @@
|
|
|
|
#include <asm/pvclock-abi.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
-#if defined(__i386__) || defined(__x86_64__)
|
|
|
|
+#if defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H)
|
|
|
|
+#include <asm/xen/interface.h>
|
|
|
|
+#elif defined(__i386__) || defined(__x86_64__)
|
|
|
|
#include "arch-x86/xen.h"
|
|
|
|
#elif defined(__ia64__)
|
|
|
|
#include "arch-ia64.h"
|
|
|
|
@@ -112,7 +114,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* New sched_op hypercall introduced in 0x00030101. */
|
|
|
|
-#if __XEN_INTERFACE_VERSION__ < 0x00030101
|
|
|
|
+#if __XEN_INTERFACE_VERSION__ < 0x00030101 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H))
|
|
|
|
#undef __HYPERVISOR_sched_op
|
|
|
|
#define __HYPERVISOR_sched_op __HYPERVISOR_sched_op_compat
|
|
|
|
#else
|
|
|
|
@@ -128,7 +130,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* New platform_op hypercall introduced in 0x00030204. */
|
|
|
|
-#if __XEN_INTERFACE_VERSION__ < 0x00030204
|
|
|
|
+#if __XEN_INTERFACE_VERSION__ < 0x00030204 || (defined(CONFIG_PARAVIRT_XEN) && !defined(HAVE_XEN_PLATFORM_COMPAT_H))
|
|
|
|
#define __HYPERVISOR_dom0_op __HYPERVISOR_platform_op
|
|
|
|
#endif
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -307,6 +309,7 @@ struct mmuext_op {
|
2010-07-07 11:12:45 +00:00
|
|
|
xen_pfn_t src_mfn;
|
|
|
|
} arg2;
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
|
|
|
|
typedef struct mmuext_op mmuext_op_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
|
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -399,6 +402,7 @@ struct mmu_update {
|
2010-07-07 11:12:45 +00:00
|
|
|
uint64_t ptr; /* Machine address of PTE. */
|
|
|
|
uint64_t val; /* New contents of PTE. */
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
|
|
|
|
typedef struct mmu_update mmu_update_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -407,9 +411,15 @@ DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
|
2010-07-07 11:12:45 +00:00
|
|
|
* NB. The fields are natural register size for this architecture.
|
|
|
|
*/
|
|
|
|
struct multicall_entry {
|
|
|
|
- unsigned long op, result;
|
|
|
|
+ unsigned long op;
|
|
|
|
+#if !defined(CONFIG_PARAVIRT_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H)
|
|
|
|
+ unsigned long result;
|
|
|
|
+#else
|
|
|
|
+ long result;
|
|
|
|
+#endif
|
|
|
|
unsigned long args[6];
|
|
|
|
};
|
|
|
|
+DEFINE_GUEST_HANDLE_STRUCT(multicall_entry);
|
|
|
|
typedef struct multicall_entry multicall_entry_t;
|
|
|
|
DEFINE_XEN_GUEST_HANDLE(multicall_entry_t);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
--- head-2011-03-17.orig/include/xen/xenbus.h 2011-01-31 17:32:22.000000000 +0100
|
|
|
|
+++ head-2011-03-17/include/xen/xenbus.h 2011-01-31 17:49:31.000000000 +0100
|
2010-07-07 11:12:45 +00:00
|
|
|
@@ -57,16 +57,20 @@ struct xenbus_watch
|
|
|
|
void (*callback)(struct xenbus_watch *,
|
|
|
|
const char **vec, unsigned int len);
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H)
|
|
|
|
/* See XBWF_ definitions below. */
|
|
|
|
unsigned long flags;
|
|
|
|
+#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H)
|
|
|
|
/*
|
|
|
|
* Execute callback in its own kthread. Useful if the callback is long
|
|
|
|
* running or heavily serialised, to avoid taking out the main xenwatch thread
|
|
|
|
* for a long period of time (or even unwittingly causing a deadlock).
|
|
|
|
*/
|
|
|
|
#define XBWF_new_thread 1
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
/* A xenbus device. */
|
|
|
|
struct xenbus_device {
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -184,6 +188,9 @@ void xs_suspend_cancel(void);
|
|
|
|
/* Used by xenbus_dev to borrow kernel's store connection. */
|
|
|
|
void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg);
|
|
|
|
|
|
|
|
+struct work_struct;
|
|
|
|
+void xenbus_probe(struct work_struct *);
|
|
|
|
+
|
|
|
|
/* Prepare for domain suspend: then resume or cancel the suspend. */
|
|
|
|
void xenbus_suspend(void);
|
|
|
|
void xenbus_resume(void);
|
|
|
|
@@ -214,6 +221,7 @@ int xenbus_watch_path(struct xenbus_devi
|
2010-07-07 11:12:45 +00:00
|
|
|
const char **, unsigned int));
|
|
|
|
|
|
|
|
|
|
|
|
+#if defined(CONFIG_XEN) || defined(HAVE_XEN_PLATFORM_COMPAT_H)
|
|
|
|
/**
|
|
|
|
* Register a watch on the given path/path2, using the given xenbus_watch
|
|
|
|
* structure for storage, and the given callback function as the callback.
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -227,7 +235,13 @@ int xenbus_watch_path2(struct xenbus_dev
|
2010-07-07 11:12:45 +00:00
|
|
|
const char *path2, struct xenbus_watch *watch,
|
|
|
|
void (*callback)(struct xenbus_watch *,
|
|
|
|
const char **, unsigned int));
|
|
|
|
-
|
|
|
|
+#else
|
|
|
|
+int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
|
|
|
|
+ void (*callback)(struct xenbus_watch *,
|
|
|
|
+ const char **, unsigned int),
|
|
|
|
+ const char *pathfmt, ...)
|
|
|
|
+ __attribute__ ((format (printf, 4, 5)));
|
|
|
|
+#endif
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Advertise in the store a change of the given driver to the given new_state.
|