533 lines
13 KiB
Plaintext
533 lines
13 KiB
Plaintext
From: jbeulich@novell.com
|
|
Subject: use base kernel suspend/resume infrastructure
|
|
Patch-mainline: n/a
|
|
|
|
... rather than calling just a few functions explicitly.
|
|
|
|
--- head-2011-03-11.orig/arch/x86/kernel/time-xen.c 2010-09-16 16:49:59.000000000 +0200
|
|
+++ head-2011-03-11/arch/x86/kernel/time-xen.c 2010-11-23 15:07:01.000000000 +0100
|
|
@@ -69,6 +69,10 @@ DEFINE_PER_CPU(struct vcpu_runstate_info
|
|
/* Must be signed, as it's compared with s64 quantities which can be -ve. */
|
|
#define NS_PER_TICK (1000000000LL/HZ)
|
|
|
|
+static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
|
|
+ .period_ns = NS_PER_TICK
|
|
+};
|
|
+
|
|
/*
|
|
* GCC 4.3 can turn loops over an induction variable into division. We do
|
|
* not support arbitrary 64-bit division, and so must break the induction.
|
|
@@ -533,6 +537,17 @@ void mark_tsc_unstable(char *reason)
|
|
}
|
|
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
|
|
|
+static void init_missing_ticks_accounting(unsigned int cpu)
|
|
+{
|
|
+ struct vcpu_runstate_info *runstate = setup_runstate_area(cpu);
|
|
+
|
|
+ per_cpu(processed_blocked_time, cpu) =
|
|
+ runstate->time[RUNSTATE_blocked];
|
|
+ per_cpu(processed_stolen_time, cpu) =
|
|
+ runstate->time[RUNSTATE_runnable] +
|
|
+ runstate->time[RUNSTATE_offline];
|
|
+}
|
|
+
|
|
static cycle_t cs_last;
|
|
|
|
static cycle_t xen_clocksource_read(struct clocksource *cs)
|
|
@@ -569,11 +584,32 @@ static cycle_t xen_clocksource_read(stru
|
|
#endif
|
|
}
|
|
|
|
+/* No locking required. Interrupts are disabled on all CPUs. */
|
|
static void xen_clocksource_resume(struct clocksource *cs)
|
|
{
|
|
- extern void time_resume(void);
|
|
+ unsigned int cpu;
|
|
+
|
|
+ init_cpu_khz();
|
|
+
|
|
+ for_each_online_cpu(cpu) {
|
|
+ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
|
|
+ &xen_set_periodic_tick)) {
|
|
+ case 0:
|
|
+#if CONFIG_XEN_COMPAT <= 0x030004
|
|
+ case -ENOSYS:
|
|
+#endif
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ }
|
|
+ get_time_values_from_xen(cpu);
|
|
+ per_cpu(processed_system_time, cpu) =
|
|
+ per_cpu(shadow_time, 0).system_timestamp;
|
|
+ init_missing_ticks_accounting(cpu);
|
|
+ }
|
|
+
|
|
+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
|
|
|
|
- time_resume();
|
|
cs_last = local_clock();
|
|
}
|
|
|
|
@@ -605,17 +641,6 @@ struct vcpu_runstate_info *setup_runstat
|
|
return rs;
|
|
}
|
|
|
|
-static void init_missing_ticks_accounting(unsigned int cpu)
|
|
-{
|
|
- struct vcpu_runstate_info *runstate = setup_runstate_area(cpu);
|
|
-
|
|
- per_cpu(processed_blocked_time, cpu) =
|
|
- runstate->time[RUNSTATE_blocked];
|
|
- per_cpu(processed_stolen_time, cpu) =
|
|
- runstate->time[RUNSTATE_runnable] +
|
|
- runstate->time[RUNSTATE_offline];
|
|
-}
|
|
-
|
|
void xen_read_persistent_clock(struct timespec *ts)
|
|
{
|
|
const shared_info_t *s = HYPERVISOR_shared_info;
|
|
@@ -661,10 +686,6 @@ static void __init setup_cpu0_timer_irq(
|
|
BUG_ON(per_cpu(timer_irq, 0) < 0);
|
|
}
|
|
|
|
-static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
|
|
- .period_ns = NS_PER_TICK
|
|
-};
|
|
-
|
|
static void __init _late_time_init(void)
|
|
{
|
|
update_wallclock();
|
|
@@ -807,35 +828,6 @@ void xen_halt(void)
|
|
VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
|
|
}
|
|
|
|
-/* No locking required. Interrupts are disabled on all CPUs. */
|
|
-void time_resume(void)
|
|
-{
|
|
- unsigned int cpu;
|
|
-
|
|
- init_cpu_khz();
|
|
-
|
|
- for_each_online_cpu(cpu) {
|
|
- switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
|
|
- &xen_set_periodic_tick)) {
|
|
- case 0:
|
|
-#if CONFIG_XEN_COMPAT <= 0x030004
|
|
- case -ENOSYS:
|
|
-#endif
|
|
- break;
|
|
- default:
|
|
- BUG();
|
|
- }
|
|
- get_time_values_from_xen(cpu);
|
|
- per_cpu(processed_system_time, cpu) =
|
|
- per_cpu(shadow_time, 0).system_timestamp;
|
|
- init_missing_ticks_accounting(cpu);
|
|
- }
|
|
-
|
|
- processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
|
|
-
|
|
- update_wallclock();
|
|
-}
|
|
-
|
|
#ifdef CONFIG_SMP
|
|
static char timer_name[NR_CPUS][15];
|
|
|
|
--- head-2011-03-11.orig/drivers/xen/core/evtchn.c 2011-02-10 16:24:57.000000000 +0100
|
|
+++ head-2011-03-11/drivers/xen/core/evtchn.c 2011-02-10 16:18:00.000000000 +0100
|
|
@@ -1097,6 +1097,8 @@ int xen_test_irq_pending(int irq)
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
+#include <linux/sysdev.h>
|
|
+
|
|
static void restore_cpu_virqs(unsigned int cpu)
|
|
{
|
|
struct evtchn_bind_virq bind_virq;
|
|
@@ -1155,9 +1157,20 @@ static void restore_cpu_ipis(unsigned in
|
|
}
|
|
}
|
|
|
|
-void irq_resume(void)
|
|
+static int evtchn_resume(struct sys_device *dev)
|
|
{
|
|
unsigned int cpu, irq, evtchn;
|
|
+ struct evtchn_status status;
|
|
+
|
|
+ /* Avoid doing anything in the 'suspend cancelled' case. */
|
|
+ status.dom = DOMID_SELF;
|
|
+ status.port = evtchn_from_irq(percpu_read(virq_to_irq[VIRQ_TIMER]));
|
|
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status))
|
|
+ BUG();
|
|
+ if (status.status == EVTCHNSTAT_virq
|
|
+ && status.vcpu == smp_processor_id()
|
|
+ && status.u.virq == VIRQ_TIMER)
|
|
+ return 0;
|
|
|
|
init_evtchn_cpu_bindings();
|
|
|
|
@@ -1198,7 +1211,32 @@ void irq_resume(void)
|
|
restore_cpu_ipis(cpu);
|
|
}
|
|
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct sysdev_class evtchn_sysclass = {
|
|
+ .name = "evtchn",
|
|
+ .resume = evtchn_resume,
|
|
+};
|
|
+
|
|
+static struct sys_device device_evtchn = {
|
|
+ .id = 0,
|
|
+ .cls = &evtchn_sysclass,
|
|
+};
|
|
+
|
|
+static int __init evtchn_register(void)
|
|
+{
|
|
+ int err;
|
|
+
|
|
+ if (is_initial_xendomain())
|
|
+ return 0;
|
|
+
|
|
+ err = sysdev_class_register(&evtchn_sysclass);
|
|
+ if (!err)
|
|
+ err = sysdev_register(&device_evtchn);
|
|
+ return err;
|
|
}
|
|
+core_initcall(evtchn_register);
|
|
#endif
|
|
|
|
int __init arch_early_irq_init(void)
|
|
--- head-2011-03-11.orig/drivers/xen/core/gnttab.c 2011-01-14 15:00:13.000000000 +0100
|
|
+++ head-2011-03-11/drivers/xen/core/gnttab.c 2011-01-14 15:13:58.000000000 +0100
|
|
@@ -707,23 +707,40 @@ EXPORT_SYMBOL(gnttab_post_map_adjust);
|
|
|
|
#endif /* __HAVE_ARCH_PTE_SPECIAL */
|
|
|
|
-int gnttab_resume(void)
|
|
+struct sys_device;
|
|
+static int gnttab_resume(struct sys_device *dev)
|
|
{
|
|
if (max_nr_grant_frames() < nr_grant_frames)
|
|
return -ENOSYS;
|
|
return gnttab_map(0, nr_grant_frames - 1);
|
|
}
|
|
+#define gnttab_resume() gnttab_resume(NULL)
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
-int gnttab_suspend(void)
|
|
-{
|
|
+#include <linux/sysdev.h>
|
|
+
|
|
#ifdef CONFIG_X86
|
|
+static int gnttab_suspend(struct sys_device *dev, pm_message_t state)
|
|
+{
|
|
apply_to_page_range(&init_mm, (unsigned long)shared,
|
|
PAGE_SIZE * nr_grant_frames,
|
|
unmap_pte_fn, NULL);
|
|
-#endif
|
|
return 0;
|
|
}
|
|
+#else
|
|
+#define gnttab_suspend NULL
|
|
+#endif
|
|
+
|
|
+static struct sysdev_class gnttab_sysclass = {
|
|
+ .name = "gnttab",
|
|
+ .resume = gnttab_resume,
|
|
+ .suspend = gnttab_suspend,
|
|
+};
|
|
+
|
|
+static struct sys_device device_gnttab = {
|
|
+ .id = 0,
|
|
+ .cls = &gnttab_sysclass,
|
|
+};
|
|
#endif
|
|
|
|
#else /* !CONFIG_XEN */
|
|
@@ -808,6 +825,17 @@ gnttab_init(void)
|
|
if (!is_running_on_xen())
|
|
return -ENODEV;
|
|
|
|
+#if defined(CONFIG_XEN) && defined(CONFIG_PM_SLEEP)
|
|
+ if (!is_initial_xendomain()) {
|
|
+ int err = sysdev_class_register(&gnttab_sysclass);
|
|
+
|
|
+ if (!err)
|
|
+ err = sysdev_register(&device_gnttab);
|
|
+ if (err)
|
|
+ return err;
|
|
+ }
|
|
+#endif
|
|
+
|
|
nr_grant_frames = 1;
|
|
boot_max_nr_grant_frames = __max_nr_grant_frames();
|
|
|
|
--- head-2011-03-11.orig/drivers/xen/core/machine_reboot.c 2011-02-01 15:03:10.000000000 +0100
|
|
+++ head-2011-03-11/drivers/xen/core/machine_reboot.c 2011-01-13 16:21:42.000000000 +0100
|
|
@@ -17,6 +17,7 @@
|
|
#include <xen/xencons.h>
|
|
#include <xen/cpu_hotplug.h>
|
|
#include <xen/interface/vcpu.h>
|
|
+#include "../../base/base.h"
|
|
|
|
#if defined(__i386__) || defined(__x86_64__)
|
|
#include <asm/pci_x86.h>
|
|
@@ -140,50 +141,28 @@ struct suspend {
|
|
static int take_machine_down(void *_suspend)
|
|
{
|
|
struct suspend *suspend = _suspend;
|
|
- int suspend_cancelled, err;
|
|
- extern void time_resume(void);
|
|
+ int suspend_cancelled;
|
|
|
|
- if (suspend->fast_suspend) {
|
|
- BUG_ON(!irqs_disabled());
|
|
- } else {
|
|
- BUG_ON(irqs_disabled());
|
|
-
|
|
- for (;;) {
|
|
- err = smp_suspend();
|
|
- if (err)
|
|
- return err;
|
|
-
|
|
- xenbus_suspend();
|
|
- preempt_disable();
|
|
-
|
|
- if (num_online_cpus() == 1)
|
|
- break;
|
|
-
|
|
- preempt_enable();
|
|
- xenbus_suspend_cancel();
|
|
- }
|
|
-
|
|
- local_irq_disable();
|
|
- }
|
|
+ BUG_ON(!irqs_disabled());
|
|
|
|
mm_pin_all();
|
|
- gnttab_suspend();
|
|
- pre_suspend();
|
|
-
|
|
- /*
|
|
- * This hypercall returns 1 if suspend was cancelled or the domain was
|
|
- * merely checkpointed, and 0 if it is resuming in a new domain.
|
|
- */
|
|
- suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
|
|
+ suspend_cancelled = sysdev_suspend(PMSG_SUSPEND);
|
|
+ if (!suspend_cancelled) {
|
|
+ pre_suspend();
|
|
|
|
+ /*
|
|
+ * This hypercall returns 1 if suspend was cancelled or the domain was
|
|
+ * merely checkpointed, and 0 if it is resuming in a new domain.
|
|
+ */
|
|
+ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
|
|
+ } else
|
|
+ BUG_ON(suspend_cancelled > 0);
|
|
suspend->resume_notifier(suspend_cancelled);
|
|
- post_suspend(suspend_cancelled);
|
|
- gnttab_resume();
|
|
+ if (suspend_cancelled >= 0) {
|
|
+ post_suspend(suspend_cancelled);
|
|
+ sysdev_resume();
|
|
+ }
|
|
if (!suspend_cancelled) {
|
|
- extern void spinlock_resume(void);
|
|
-
|
|
- spinlock_resume();
|
|
- irq_resume();
|
|
#ifdef __x86_64__
|
|
/*
|
|
* Older versions of Xen do not save/restore the user %cr3.
|
|
@@ -195,10 +174,6 @@ static int take_machine_down(void *_susp
|
|
current->active_mm->pgd)));
|
|
#endif
|
|
}
|
|
- time_resume();
|
|
-
|
|
- if (!suspend->fast_suspend)
|
|
- local_irq_enable();
|
|
|
|
return suspend_cancelled;
|
|
}
|
|
@@ -206,8 +181,14 @@ static int take_machine_down(void *_susp
|
|
int __xen_suspend(int fast_suspend, void (*resume_notifier)(int))
|
|
{
|
|
int err, suspend_cancelled;
|
|
+ const char *what;
|
|
struct suspend suspend;
|
|
|
|
+#define _check(fn, args...) ({ \
|
|
+ what = #fn; \
|
|
+ err = (fn)(args); \
|
|
+})
|
|
+
|
|
BUG_ON(smp_processor_id() != 0);
|
|
BUG_ON(in_interrupt());
|
|
|
|
@@ -225,30 +206,77 @@ int __xen_suspend(int fast_suspend, void
|
|
suspend.fast_suspend = fast_suspend;
|
|
suspend.resume_notifier = resume_notifier;
|
|
|
|
+ if (_check(dpm_suspend_start, PMSG_SUSPEND)) {
|
|
+ dpm_resume_end(PMSG_RESUME);
|
|
+ pr_err("%s() failed: %d\n", what, err);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
if (fast_suspend) {
|
|
xenbus_suspend();
|
|
+
|
|
+ if (_check(dpm_suspend_noirq, PMSG_SUSPEND)) {
|
|
+ xenbus_suspend_cancel();
|
|
+ dpm_resume_end(PMSG_RESUME);
|
|
+ pr_err("%s() failed: %d\n", what, err);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
err = stop_machine(take_machine_down, &suspend,
|
|
&cpumask_of_cpu(0));
|
|
if (err < 0)
|
|
xenbus_suspend_cancel();
|
|
} else {
|
|
+ BUG_ON(irqs_disabled());
|
|
+
|
|
+ for (;;) {
|
|
+ xenbus_suspend();
|
|
+
|
|
+ if (!_check(dpm_suspend_noirq, PMSG_SUSPEND)
|
|
+ && _check(smp_suspend))
|
|
+ dpm_resume_noirq(PMSG_RESUME);
|
|
+ if (err) {
|
|
+ xenbus_suspend_cancel();
|
|
+ dpm_resume_end(PMSG_RESUME);
|
|
+ pr_err("%s() failed: %d\n", what, err);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ preempt_disable();
|
|
+
|
|
+ if (num_online_cpus() == 1)
|
|
+ break;
|
|
+
|
|
+ preempt_enable();
|
|
+
|
|
+ dpm_resume_noirq(PMSG_RESUME);
|
|
+
|
|
+ xenbus_suspend_cancel();
|
|
+ }
|
|
+
|
|
+ local_irq_disable();
|
|
err = take_machine_down(&suspend);
|
|
+ local_irq_enable();
|
|
}
|
|
|
|
- if (err < 0)
|
|
- return err;
|
|
+ dpm_resume_noirq(PMSG_RESUME);
|
|
|
|
- suspend_cancelled = err;
|
|
- if (!suspend_cancelled) {
|
|
- xencons_resume();
|
|
- xenbus_resume();
|
|
- } else {
|
|
- xenbus_suspend_cancel();
|
|
+ if (err >= 0) {
|
|
+ suspend_cancelled = err;
|
|
+ if (!suspend_cancelled) {
|
|
+ xencons_resume();
|
|
+ xenbus_resume();
|
|
+ } else {
|
|
+ xenbus_suspend_cancel();
|
|
+ err = 0;
|
|
+ }
|
|
+
|
|
+ if (!fast_suspend)
|
|
+ smp_resume();
|
|
}
|
|
|
|
- if (!fast_suspend)
|
|
- smp_resume();
|
|
+ dpm_resume_end(PMSG_RESUME);
|
|
|
|
- return 0;
|
|
+ return err;
|
|
}
|
|
#endif
|
|
--- head-2011-03-11.orig/drivers/xen/core/spinlock.c 2011-03-15 16:17:10.000000000 +0100
|
|
+++ head-2011-03-11/drivers/xen/core/spinlock.c 2011-03-15 16:18:17.000000000 +0100
|
|
@@ -60,7 +60,9 @@ void __cpuinit xen_spinlock_cleanup(unsi
|
|
}
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
-void __cpuinit spinlock_resume(void)
|
|
+#include <linux/sysdev.h>
|
|
+
|
|
+static int __cpuinit spinlock_resume(struct sys_device *dev)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
@@ -68,7 +70,33 @@ void __cpuinit spinlock_resume(void)
|
|
per_cpu(poll_evtchn, cpu) = 0;
|
|
xen_spinlock_init(cpu);
|
|
}
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static struct sysdev_class __cpuinitdata spinlock_sysclass = {
|
|
+ .name = "spinlock",
|
|
+ .resume = spinlock_resume
|
|
+};
|
|
+
|
|
+static struct sys_device __cpuinitdata device_spinlock = {
|
|
+ .id = 0,
|
|
+ .cls = &spinlock_sysclass
|
|
+};
|
|
+
|
|
+static int __init spinlock_register(void)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ if (is_initial_xendomain())
|
|
+ return 0;
|
|
+
|
|
+ rc = sysdev_class_register(&spinlock_sysclass);
|
|
+ if (!rc)
|
|
+ rc = sysdev_register(&device_spinlock);
|
|
+ return rc;
|
|
}
|
|
+core_initcall(spinlock_register);
|
|
#endif
|
|
|
|
static unsigned int spin_adjust(struct spinning *spinning,
|
|
--- head-2011-03-11.orig/include/xen/evtchn.h 2011-02-01 15:09:47.000000000 +0100
|
|
+++ head-2011-03-11/include/xen/evtchn.h 2010-11-23 15:07:01.000000000 +0100
|
|
@@ -109,7 +109,9 @@ int bind_ipi_to_irqhandler(
|
|
*/
|
|
void unbind_from_irqhandler(unsigned int irq, void *dev_id);
|
|
|
|
+#ifndef CONFIG_XEN
|
|
void irq_resume(void);
|
|
+#endif
|
|
|
|
/* Entry point for notifications into Linux subsystems. */
|
|
asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
|
|
--- head-2011-03-11.orig/include/xen/gnttab.h 2011-01-31 17:56:27.000000000 +0100
|
|
+++ head-2011-03-11/include/xen/gnttab.h 2010-11-23 15:07:01.000000000 +0100
|
|
@@ -111,8 +111,9 @@ static inline void __gnttab_dma_unmap_pa
|
|
|
|
void gnttab_reset_grant_page(struct page *page);
|
|
|
|
-int gnttab_suspend(void);
|
|
+#ifndef CONFIG_XEN
|
|
int gnttab_resume(void);
|
|
+#endif
|
|
|
|
void *arch_gnttab_alloc_shared(unsigned long *frames);
|
|
|