qubes-linux-kernel/patches.xen/xen3-x86-mark_rodata_rw.patch

154 lines
5.0 KiB
Diff
Raw Normal View History

From: Nick Piggin <npiggin@novell.com>
Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages
Patch-mainline: probably never
References: bnc#439348
CONFIG_RODATA presents a problem for antivirus vendors who do not have a
clean user-space interface for getting virus scanning triggered, and
currently resort to patching the kernel code instead (presumably the
ystem call table). With CONFIG_RODATA enabled, the kernel rejects such
write accesses.
Add a new mark_rodata_rw() function to un-protect the read-only kernel code
pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules.
This is not meant as a permanent workaround, and will be removed again in the
next release!
Acked-by: Andres Gruenbacher <agruen@suse.de>
Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-patches.py
2011-04-19 20:09:59 +00:00
--- head-2011-03-17.orig/arch/x86/mm/init_32-xen.c 2011-02-01 15:41:35.000000000 +0100
+++ head-2011-03-17/arch/x86/mm/init_32-xen.c 2011-02-02 15:07:16.000000000 +0100
@@ -1035,5 +1035,19 @@ void mark_rodata_ro(void)
#endif
2011-04-19 20:09:59 +00:00
mark_nxdata_nx();
}
+EXPORT_SYMBOL_GPL(mark_rodata_ro);
+
+void mark_rodata_rw(void)
+{
+ unsigned long start = PFN_ALIGN(_text);
+ unsigned long size = PFN_ALIGN(_etext) - start;
+
+ start += size;
+ size = (unsigned long)__end_rodata - start;
+ set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
+ size >> 10);
+}
+EXPORT_SYMBOL_GPL(mark_rodata_rw);
#endif
2011-04-19 20:09:59 +00:00
--- head-2011-03-17.orig/arch/x86/mm/init_64-xen.c 2011-03-17 14:22:21.000000000 +0100
+++ head-2011-03-17/arch/x86/mm/init_64-xen.c 2011-02-02 15:07:16.000000000 +0100
@@ -1042,6 +1042,7 @@ void set_kernel_text_ro(void)
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}
+static int initmem_freed __read_mostly = 0;
void mark_rodata_ro(void)
{
unsigned long start = PFN_ALIGN(_text);
2011-04-19 20:09:59 +00:00
@@ -1074,15 +1075,33 @@ void mark_rodata_ro(void)
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
#endif
- free_init_pages("unused kernel memory",
- (unsigned long) page_address(virt_to_page(text_end)),
- (unsigned long)
+ if (!initmem_freed) {
+ initmem_freed = 1;
+ free_init_pages("unused kernel memory",
+ (unsigned long)
+ page_address(virt_to_page(text_end)),
+ (unsigned long)
page_address(virt_to_page(rodata_start)));
- free_init_pages("unused kernel memory",
- (unsigned long) page_address(virt_to_page(rodata_end)),
- (unsigned long) page_address(virt_to_page(data_start)));
+ free_init_pages("unused kernel memory",
+ (unsigned long)
+ page_address(virt_to_page(rodata_end)),
+ (unsigned long)
+ page_address(virt_to_page(data_start)));
+ }
}
+EXPORT_SYMBOL_GPL(mark_rodata_ro);
+void mark_rodata_rw(void)
+{
+ unsigned long rodata_start =
+ ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
+ unsigned long end = (unsigned long) &__end_rodata;
+
+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
+ (end - rodata_start) >> 10);
+ set_memory_rw_force(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mark_rodata_rw);
#endif
2011-04-19 20:09:59 +00:00
int kern_addr_valid(unsigned long addr)
--- head-2011-03-17.orig/arch/x86/mm/pageattr-xen.c 2011-03-17 14:22:21.000000000 +0100
+++ head-2011-03-17/arch/x86/mm/pageattr-xen.c 2011-03-17 14:33:38.000000000 +0100
@@ -244,6 +244,8 @@ static void cpa_flush_array(unsigned lon
}
}
+static int static_protections_allow_rodata __read_mostly;
+
/*
* Certain areas of memory on x86 require very specific protection flags,
* for example the BIOS area or kernel text. Callers don't always get this
2011-04-19 20:09:59 +00:00
@@ -277,8 +279,10 @@ static inline pgprot_t static_protection
* catches all aliases.
*/
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
- __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
- pgprot_val(forbidden) |= _PAGE_RW;
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) {
+ if (!static_protections_allow_rodata)
+ pgprot_val(forbidden) |= _PAGE_RW;
+ }
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) && !defined(CONFIG_XEN)
/*
2011-04-19 20:09:59 +00:00
@@ -1216,6 +1220,21 @@ int set_memory_rw(unsigned long addr, in
}
EXPORT_SYMBOL_GPL(set_memory_rw);
+/* hack: bypass kernel rodata section static_protections check. */
+int set_memory_rw_force(unsigned long addr, int numpages)
+{
+ static DEFINE_MUTEX(lock);
+ int ret;
+
+ mutex_lock(&lock);
+ static_protections_allow_rodata = 1;
+ ret = change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
+ static_protections_allow_rodata = 0;
+ mutex_unlock(&lock);
+
+ return ret;
+}
+
int set_memory_np(unsigned long addr, int numpages)
{
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
2011-04-19 20:09:59 +00:00
@@ -1349,6 +1368,13 @@ int set_pages_rw(struct page *page, int
return set_memory_rw(addr, numpages);
}
+int set_pages_rw_force(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ return set_memory_rw_force(addr, numpages);
+}
+
#ifdef CONFIG_DEBUG_PAGEALLOC
static int __set_pages_p(struct page *page, int numpages)