From: Nick Piggin Subject: Add mark_rodata_rw() to un-protect read-only kernel code pages Patch-mainline: probably never References: bnc#439348 CONFIG_RODATA presents a problem for antivirus vendors who do not have a clean user-space interface for getting virus scanning triggered, and currently resort to patching the kernel code instead (presumably the ystem call table). With CONFIG_RODATA enabled, the kernel rejects such write accesses. Add a new mark_rodata_rw() function to un-protect the read-only kernel code pages for now, and export mark_rodata_ro() and mark_rodata_rw() to modules. This is not meant as a permanent workaround, and will be removed again in the next release! Acked-by: Andres Gruenbacher Automatically created from "patches.suse/x86-mark_rodata_rw.patch" by xen-port-patches.py --- head-2010-04-15.orig/arch/x86/mm/init_32-xen.c 2010-04-15 10:51:33.000000000 +0200 +++ head-2010-04-15/arch/x86/mm/init_32-xen.c 2010-03-25 14:37:41.000000000 +0100 @@ -1137,6 +1137,20 @@ void mark_rodata_ro(void) set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif } +EXPORT_SYMBOL_GPL(mark_rodata_ro); + +void mark_rodata_rw(void) +{ + unsigned long start = PFN_ALIGN(_text); + unsigned long size = PFN_ALIGN(_etext) - start; + + start += size; + size = (unsigned long)__end_rodata - start; + set_pages_rw_force(virt_to_page(start), size >> PAGE_SHIFT); + printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n", + size >> 10); +} +EXPORT_SYMBOL_GPL(mark_rodata_rw); #endif int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, --- head-2010-04-15.orig/arch/x86/mm/init_64-xen.c 2010-04-15 10:48:32.000000000 +0200 +++ head-2010-04-15/arch/x86/mm/init_64-xen.c 2010-04-15 11:41:27.000000000 +0200 @@ -1019,6 +1019,7 @@ void set_kernel_text_ro(void) set_memory_ro(start, (end - start) >> PAGE_SHIFT); } +static int initmem_freed __read_mostly = 0; void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); @@ -1051,15 +1052,33 @@ void mark_rodata_ro(void) set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif - free_init_pages("unused kernel memory", - (unsigned long) page_address(virt_to_page(text_end)), - (unsigned long) + if (!initmem_freed) { + initmem_freed = 1; + free_init_pages("unused kernel memory", + (unsigned long) + page_address(virt_to_page(text_end)), + (unsigned long) page_address(virt_to_page(rodata_start))); - free_init_pages("unused kernel memory", - (unsigned long) page_address(virt_to_page(rodata_end)), - (unsigned long) page_address(virt_to_page(data_start))); + free_init_pages("unused kernel memory", + (unsigned long) + page_address(virt_to_page(rodata_end)), + (unsigned long) + page_address(virt_to_page(data_start))); + } } +EXPORT_SYMBOL_GPL(mark_rodata_ro); +void mark_rodata_rw(void) +{ + unsigned long rodata_start = + ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; + unsigned long end = (unsigned long) &__end_rodata; + + printk(KERN_INFO "Write enabling the kernel read-only data: %luk\n", + (end - rodata_start) >> 10); + set_memory_rw_force(rodata_start, (end - rodata_start) >> PAGE_SHIFT); +} +EXPORT_SYMBOL_GPL(mark_rodata_rw); #endif int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, --- head-2010-04-15.orig/arch/x86/mm/pageattr-xen.c 2010-04-15 10:48:32.000000000 +0200 +++ head-2010-04-15/arch/x86/mm/pageattr-xen.c 2010-03-25 14:37:41.000000000 +0100 @@ -245,6 +245,8 @@ static void cpa_flush_array(unsigned lon } } +static int static_protections_allow_rodata __read_mostly; + /* * Certain areas of memory on x86 require very specific protection flags, * for example the BIOS area or kernel text. Callers don't always get this @@ -278,8 +280,10 @@ static inline pgprot_t static_protection * catches all aliases. */ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, - __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_RW; + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) { + if (!static_protections_allow_rodata) + pgprot_val(forbidden) |= _PAGE_RW; + } #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) && !defined(CONFIG_XEN) /* @@ -1200,6 +1204,21 @@ int set_memory_rw(unsigned long addr, in } EXPORT_SYMBOL_GPL(set_memory_rw); +/* hack: bypass kernel rodata section static_protections check. */ +int set_memory_rw_force(unsigned long addr, int numpages) +{ + static DEFINE_MUTEX(lock); + int ret; + + mutex_lock(&lock); + static_protections_allow_rodata = 1; + ret = change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); + static_protections_allow_rodata = 0; + mutex_unlock(&lock); + + return ret; +} + int set_memory_np(unsigned long addr, int numpages) { return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); @@ -1314,6 +1333,13 @@ int set_pages_rw(struct page *page, int return set_memory_rw(addr, numpages); } +int set_pages_rw_force(struct page *page, int numpages) +{ + unsigned long addr = (unsigned long)page_address(page); + + return set_memory_rw_force(addr, numpages); +} + #ifdef CONFIG_DEBUG_PAGEALLOC static int __set_pages_p(struct page *page, int numpages)