169 lines
4.8 KiB
Plaintext
169 lines
4.8 KiB
Plaintext
Subject: Add fallback when XENMEM_exchange fails to replace contiguous region
|
|
From: jbeulich@novell.com
|
|
Patch-mainline: obsolete
|
|
References: 181869
|
|
|
|
This avoids losing precious special memory in places where any memory can be
|
|
used.
|
|
|
|
--- head-2010-04-15.orig/arch/x86/mm/hypervisor.c 2010-03-24 15:25:06.000000000 +0100
|
|
+++ head-2010-04-15/arch/x86/mm/hypervisor.c 2009-06-09 15:52:17.000000000 +0200
|
|
@@ -43,6 +43,7 @@
|
|
#include <xen/interface/memory.h>
|
|
#include <linux/module.h>
|
|
#include <linux/percpu.h>
|
|
+#include <linux/highmem.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <linux/highmem.h>
|
|
|
|
@@ -719,6 +720,83 @@ void xen_destroy_contiguous_region(unsig
|
|
BUG();
|
|
|
|
balloon_unlock(flags);
|
|
+
|
|
+ if (unlikely(!success)) {
|
|
+ /* Try hard to get the special memory back to Xen. */
|
|
+ exchange.in.extent_order = 0;
|
|
+ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
|
|
+
|
|
+ for (i = 0; i < (1U<<order); i++) {
|
|
+ struct page *page = alloc_page(__GFP_HIGHMEM|__GFP_COLD);
|
|
+ unsigned long pfn;
|
|
+ mmu_update_t mmu;
|
|
+ unsigned int j = 0;
|
|
+
|
|
+ if (!page) {
|
|
+ printk(KERN_WARNING "Xen and kernel out of memory "
|
|
+ "while trying to release an order %u "
|
|
+ "contiguous region\n", order);
|
|
+ break;
|
|
+ }
|
|
+ pfn = page_to_pfn(page);
|
|
+
|
|
+ balloon_lock(flags);
|
|
+
|
|
+ if (!PageHighMem(page)) {
|
|
+ void *v = __va(pfn << PAGE_SHIFT);
|
|
+
|
|
+ scrub_pages(v, 1);
|
|
+ MULTI_update_va_mapping(cr_mcl + j, (unsigned long)v,
|
|
+ __pte_ma(0), UVMF_INVLPG|UVMF_ALL);
|
|
+ ++j;
|
|
+ }
|
|
+#ifdef CONFIG_XEN_SCRUB_PAGES
|
|
+ else {
|
|
+ scrub_pages(kmap(page), 1);
|
|
+ kunmap(page);
|
|
+ kmap_flush_unused();
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ frame = pfn_to_mfn(pfn);
|
|
+ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
|
|
+
|
|
+ MULTI_update_va_mapping(cr_mcl + j, vstart,
|
|
+ pfn_pte_ma(frame, PAGE_KERNEL),
|
|
+ UVMF_INVLPG|UVMF_ALL);
|
|
+ ++j;
|
|
+
|
|
+ pfn = __pa(vstart) >> PAGE_SHIFT;
|
|
+ set_phys_to_machine(pfn, frame);
|
|
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
+ mmu.ptr = ((uint64_t)frame << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
|
|
+ mmu.val = pfn;
|
|
+ cr_mcl[j].op = __HYPERVISOR_mmu_update;
|
|
+ cr_mcl[j].args[0] = (unsigned long)&mmu;
|
|
+ cr_mcl[j].args[1] = 1;
|
|
+ cr_mcl[j].args[2] = 0;
|
|
+ cr_mcl[j].args[3] = DOMID_SELF;
|
|
+ ++j;
|
|
+ }
|
|
+
|
|
+ cr_mcl[j].op = __HYPERVISOR_memory_op;
|
|
+ cr_mcl[j].args[0] = XENMEM_decrease_reservation;
|
|
+ cr_mcl[j].args[1] = (unsigned long)&exchange.in;
|
|
+
|
|
+ if (HYPERVISOR_multicall(cr_mcl, j + 1))
|
|
+ BUG();
|
|
+ BUG_ON(cr_mcl[j].result != 1);
|
|
+ while (j--)
|
|
+ BUG_ON(cr_mcl[j].result != 0);
|
|
+
|
|
+ balloon_unlock(flags);
|
|
+
|
|
+ free_empty_pages(&page, 1);
|
|
+
|
|
+ in_frame++;
|
|
+ vstart += PAGE_SIZE;
|
|
+ }
|
|
+ }
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
|
|
|
|
--- head-2010-04-15.orig/drivers/xen/balloon/balloon.c 2010-04-15 11:44:37.000000000 +0200
|
|
+++ head-2010-04-15/drivers/xen/balloon/balloon.c 2010-04-15 11:46:09.000000000 +0200
|
|
@@ -776,7 +776,11 @@ struct page **alloc_empty_pages_and_page
|
|
}
|
|
EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
|
|
|
|
-void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
|
|
+#endif /* CONFIG_XEN_BACKEND */
|
|
+
|
|
+#ifdef CONFIG_XEN
|
|
+static void _free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages,
|
|
+ bool free_vec)
|
|
{
|
|
unsigned long flags;
|
|
int i;
|
|
@@ -787,17 +791,33 @@ void free_empty_pages_and_pagevec(struct
|
|
balloon_lock(flags);
|
|
for (i = 0; i < nr_pages; i++) {
|
|
BUG_ON(page_count(pagevec[i]) != 1);
|
|
- balloon_append(pagevec[i], 0);
|
|
+ balloon_append(pagevec[i], !free_vec);
|
|
+ }
|
|
+ if (!free_vec) {
|
|
+ bs.current_pages -= nr_pages;
|
|
+ totalram_pages = bs.current_pages - totalram_bias;
|
|
}
|
|
balloon_unlock(flags);
|
|
|
|
- kfree(pagevec);
|
|
+ if (free_vec)
|
|
+ kfree(pagevec);
|
|
|
|
schedule_work(&balloon_worker);
|
|
}
|
|
-EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
|
|
|
|
-#endif /* CONFIG_XEN_BACKEND */
|
|
+void free_empty_pages(struct page **pagevec, int nr_pages)
|
|
+{
|
|
+ _free_empty_pages_and_pagevec(pagevec, nr_pages, false);
|
|
+}
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
|
|
+void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
|
|
+{
|
|
+ _free_empty_pages_and_pagevec(pagevec, nr_pages, true);
|
|
+}
|
|
+EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
|
|
+#endif
|
|
|
|
void balloon_release_driver_page(struct page *page)
|
|
{
|
|
--- head-2010-04-15.orig/include/xen/balloon.h 2010-03-24 15:12:36.000000000 +0100
|
|
+++ head-2010-04-15/include/xen/balloon.h 2009-06-09 15:52:17.000000000 +0200
|
|
@@ -47,6 +47,10 @@ void balloon_update_driver_allowance(lon
|
|
struct page **alloc_empty_pages_and_pagevec(int nr_pages);
|
|
void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages);
|
|
|
|
+/* Free an empty page range (not allocated through
|
|
+ alloc_empty_pages_and_pagevec), adding to the balloon. */
|
|
+void free_empty_pages(struct page **pagevec, int nr_pages);
|
|
+
|
|
void balloon_release_driver_page(struct page *page);
|
|
|
|
/*
|