2010-07-07 11:12:45 +00:00
|
|
|
From: Nick Piggin <npiggin@novell.com>
|
|
|
|
Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages
|
|
|
|
References: bnc#439348
|
|
|
|
Patch-mainline: probably never
|
|
|
|
|
|
|
|
CONFIG_RODATA presents a problem for antivirus vendors who do not have a
|
|
|
|
clean user-space interface for getting virus scanning triggered, and
|
|
|
|
currently resort to patching the kernel code instead (presumably the
|
|
|
|
ystem call table). With CONFIG_RODATA enabled, the kernel rejects such
|
|
|
|
write accesses.
|
|
|
|
|
|
|
|
Add a new mark_rodata_rw() function to un-protect the read-only kernel code
|
|
|
|
pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules.
|
|
|
|
|
|
|
|
This is not meant as a permanent workaround, and will be removed again in the
|
|
|
|
next release!
|
|
|
|
|
|
|
|
Acked-by: Andres Gruenbacher <agruen@suse.de>
|
|
|
|
|
|
|
|
---
|
|
|
|
arch/x86/include/asm/cacheflush.h | 3 +++
|
|
|
|
arch/x86/mm/init_32.c | 14 ++++++++++++++
|
|
|
|
arch/x86/mm/init_64.c | 31 +++++++++++++++++++++++++------
|
2011-04-19 20:09:59 +00:00
|
|
|
arch/x86/mm/pageattr.c | 31 +++++++++++++++++++++++++++++--
|
|
|
|
4 files changed, 71 insertions(+), 8 deletions(-)
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
--- a/arch/x86/include/asm/cacheflush.h
|
|
|
|
+++ b/arch/x86/include/asm/cacheflush.h
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -101,6 +101,7 @@ int set_memory_x(unsigned long addr, int
|
2010-07-07 11:12:45 +00:00
|
|
|
int set_memory_nx(unsigned long addr, int numpages);
|
|
|
|
int set_memory_ro(unsigned long addr, int numpages);
|
|
|
|
int set_memory_rw(unsigned long addr, int numpages);
|
|
|
|
+int set_memory_rw_force(unsigned long addr, int numpages);
|
|
|
|
int set_memory_np(unsigned long addr, int numpages);
|
|
|
|
int set_memory_4k(unsigned long addr, int numpages);
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -138,12 +139,14 @@ int set_pages_x(struct page *page, int n
|
2010-07-07 11:12:45 +00:00
|
|
|
int set_pages_nx(struct page *page, int numpages);
|
|
|
|
int set_pages_ro(struct page *page, int numpages);
|
|
|
|
int set_pages_rw(struct page *page, int numpages);
|
|
|
|
+int set_pages_rw_force(struct page *page, int numpages);
|
|
|
|
|
|
|
|
|
|
|
|
void clflush_cache_range(void *addr, unsigned int size);
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_RODATA
|
|
|
|
void mark_rodata_ro(void);
|
|
|
|
+void mark_rodata_rw(void);
|
|
|
|
extern const int rodata_test_data;
|
|
|
|
extern int kernel_set_to_readonly;
|
|
|
|
void set_kernel_text_rw(void);
|
|
|
|
--- a/arch/x86/mm/init_32.c
|
|
|
|
+++ b/arch/x86/mm/init_32.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -967,5 +967,19 @@ void mark_rodata_ro(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
#endif
|
2011-04-19 20:09:59 +00:00
|
|
|
mark_nxdata_nx();
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(mark_rodata_ro);
|
|
|
|
+
|
|
|
|
+void mark_rodata_rw(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long start = PFN_ALIGN(_text);
|
|
|
|
+ unsigned long size = PFN_ALIGN(_etext) - start;
|
|
|
|
+
|
|
|
|
+ start += size;
|
|
|
|
+ size = (unsigned long)__end_rodata - start;
|
|
|
|
+ set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT);
|
|
|
|
+ printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n",
|
|
|
|
+ size >> 10);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(mark_rodata_rw);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
--- a/arch/x86/mm/init_64.c
|
|
|
|
+++ b/arch/x86/mm/init_64.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -782,6 +782,7 @@ void set_kernel_text_ro(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
+static int initmem_freed __read_mostly = 0;
|
|
|
|
void mark_rodata_ro(void)
|
|
|
|
{
|
|
|
|
unsigned long start = PFN_ALIGN(_text);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -814,15 +815,33 @@ void mark_rodata_ro(void)
|
2010-07-07 11:12:45 +00:00
|
|
|
set_memory_ro(start, (end-start) >> PAGE_SHIFT);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
- free_init_pages("unused kernel memory",
|
|
|
|
- (unsigned long) page_address(virt_to_page(text_end)),
|
|
|
|
- (unsigned long)
|
|
|
|
+ if (!initmem_freed) {
|
|
|
|
+ initmem_freed = 1;
|
|
|
|
+ free_init_pages("unused kernel memory",
|
|
|
|
+ (unsigned long)
|
|
|
|
+ page_address(virt_to_page(text_end)),
|
|
|
|
+ (unsigned long)
|
|
|
|
page_address(virt_to_page(rodata_start)));
|
|
|
|
- free_init_pages("unused kernel memory",
|
|
|
|
- (unsigned long) page_address(virt_to_page(rodata_end)),
|
|
|
|
- (unsigned long) page_address(virt_to_page(data_start)));
|
|
|
|
+ free_init_pages("unused kernel memory",
|
|
|
|
+ (unsigned long)
|
|
|
|
+ page_address(virt_to_page(rodata_end)),
|
|
|
|
+ (unsigned long)
|
|
|
|
+ page_address(virt_to_page(data_start)));
|
|
|
|
+ }
|
|
|
|
}
|
|
|
|
+EXPORT_SYMBOL_GPL(mark_rodata_ro);
|
|
|
|
|
|
|
|
+void mark_rodata_rw(void)
|
|
|
|
+{
|
|
|
|
+ unsigned long rodata_start =
|
|
|
|
+ ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
|
|
|
|
+ unsigned long end = (unsigned long) &__end_rodata_hpage_align;
|
|
|
|
+
|
|
|
|
+ printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
|
|
|
|
+ (end - rodata_start) >> 10);
|
|
|
|
+ set_memory_rw_force(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL_GPL(mark_rodata_rw);
|
|
|
|
#endif
|
|
|
|
|
2011-04-19 20:09:59 +00:00
|
|
|
int kern_addr_valid(unsigned long addr)
|
2010-07-07 11:12:45 +00:00
|
|
|
--- a/arch/x86/mm/pageattr.c
|
|
|
|
+++ b/arch/x86/mm/pageattr.c
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -246,6 +246,8 @@ static void cpa_flush_array(unsigned lon
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
+static int static_protections_allow_rodata __read_mostly;
|
|
|
|
+
|
|
|
|
/*
|
|
|
|
* Certain areas of memory on x86 require very specific protection flags,
|
|
|
|
* for example the BIOS area or kernel text. Callers don't always get this
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -279,8 +281,11 @@ static inline pgprot_t static_protection
|
2010-07-07 11:12:45 +00:00
|
|
|
* catches all aliases.
|
|
|
|
*/
|
|
|
|
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
|
|
|
|
- __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
|
|
|
|
- pgprot_val(forbidden) |= _PAGE_RW;
|
|
|
|
+ __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) {
|
|
|
|
+ if (!static_protections_allow_rodata)
|
|
|
|
+ pgprot_val(forbidden) |= _PAGE_RW;
|
|
|
|
+ }
|
2011-04-19 20:09:59 +00:00
|
|
|
+
|
2010-07-07 11:12:45 +00:00
|
|
|
|
|
|
|
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
|
|
|
|
/*
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1155,6 +1160,21 @@ int set_memory_rw(unsigned long addr, in
|
2010-07-07 11:12:45 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(set_memory_rw);
|
|
|
|
|
|
|
|
+/* hack: bypass kernel rodata section static_protections check. */
|
|
|
|
+int set_memory_rw_force(unsigned long addr, int numpages)
|
|
|
|
+{
|
|
|
|
+ static DEFINE_MUTEX(lock);
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&lock);
|
|
|
|
+ static_protections_allow_rodata = 1;
|
|
|
|
+ ret = change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
|
|
|
|
+ static_protections_allow_rodata = 0;
|
|
|
|
+ mutex_unlock(&lock);
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
int set_memory_np(unsigned long addr, int numpages)
|
|
|
|
{
|
|
|
|
return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
|
2011-04-19 20:09:59 +00:00
|
|
|
@@ -1288,6 +1308,13 @@ int set_pages_rw(struct page *page, int
|
2010-07-07 11:12:45 +00:00
|
|
|
return set_memory_rw(addr, numpages);
|
|
|
|
}
|
|
|
|
|
|
|
|
+int set_pages_rw_force(struct page *page, int numpages)
|
|
|
|
+{
|
|
|
|
+ unsigned long addr = (unsigned long)page_address(page);
|
|
|
|
+
|
|
|
|
+ return set_memory_rw_force(addr, numpages);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
|
|
|
|
|
|
|
static int __set_pages_p(struct page *page, int numpages)
|