From: Jiri Slaby Subject: Linux 2.6.38.2 Patch-mainline: Linux 2.6.38.2 Signed-off-by: Jiri Slaby --- diff --git a/Makefile b/Makefile index 167ef45..6c15525 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 38 -EXTRAVERSION = .1 +EXTRAVERSION = .2 NAME = Flesh-Eating Bats with Fangs # *DOCUMENTATION* diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index f62bb4c..7c3fb07 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c @@ -506,6 +506,11 @@ static struct i2c_board_info mini2440_i2c_devs[] __initdata = { }, }; +static struct platform_device uda1340_codec = { + .name = "uda134x-codec", + .id = -1, +}; + static struct platform_device *mini2440_devices[] __initdata = { &s3c_device_ohci, &s3c_device_wdt, @@ -521,7 +526,9 @@ static struct platform_device *mini2440_devices[] __initdata = { &s3c_device_nand, &s3c_device_sdi, &s3c_device_iis, + &uda1340_codec, &mini2440_audio, + &samsung_asoc_dma, }; static void __init mini2440_map_io(void) diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c index 90a15d2..2130ca6 100644 --- a/arch/sh/kernel/ptrace_32.c +++ b/arch/sh/kernel/ptrace_32.c @@ -101,6 +101,8 @@ static int set_single_step(struct task_struct *tsk, unsigned long addr) attr = bp->attr; attr.bp_addr = addr; + /* reenable breakpoint */ + attr.disabled = false; err = modify_user_hw_breakpoint(bp, &attr); if (unlikely(err)) return err; @@ -392,6 +394,9 @@ long arch_ptrace(struct task_struct *child, long request, tmp = 0; } else { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); tmp = ((unsigned long *)child->thread.xstate) [index >> 2]; @@ -423,6 +428,9 @@ long arch_ptrace(struct task_struct *child, long request, else if (addr >= offsetof(struct user, fpu) && addr < offsetof(struct user, u_fpvalid)) { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); set_stopped_child_used_math(child); ((unsigned long *)child->thread.xstate) diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c index 4436eac..c8f9764 100644 --- a/arch/sh/kernel/ptrace_64.c +++ b/arch/sh/kernel/ptrace_64.c @@ -403,6 +403,9 @@ long arch_ptrace(struct task_struct *child, long request, else if ((addr >= offsetof(struct user, fpu)) && (addr < offsetof(struct user, u_fpvalid))) { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); tmp = get_fpu_long(child, index); } else if (addr == offsetof(struct user, u_fpvalid)) { @@ -442,6 +445,9 @@ long arch_ptrace(struct task_struct *child, long request, else if ((addr >= offsetof(struct user, fpu)) && (addr < offsetof(struct user, u_fpvalid))) { unsigned long index; + ret = init_fpu(child); + if (ret) + break; index = addr - offsetof(struct user, fpu); ret = put_fpu_long(child, index, data); } diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index c8b4efa..9ca3b0e 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1413,7 +1413,7 @@ ENTRY(async_page_fault) CFI_ADJUST_CFA_OFFSET 4 jmp error_code CFI_ENDPROC -END(apf_page_fault) +END(async_page_fault) #endif /* diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 2d2673c..5655c22 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data) /* Make NULL pointers segfault */ zap_identity_mappings(); - /* Cleanup the over mapped high alias */ - cleanup_highmap(); - max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d3cfe26..e543fe9 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -297,6 +297,9 @@ static void __init init_gbpages(void) static inline void init_gbpages(void) { } +static void __init cleanup_highmap(void) +{ +} #endif static void __init reserve_brk(void) @@ -922,6 +925,8 @@ void __init setup_arch(char **cmdline_p) */ reserve_brk(); + cleanup_highmap(); + memblock.current_limit = get_max_mapped(); memblock_x86_fill(); diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 947f42a..f13ff3a 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -279,25 +279,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, load_cr3(swapper_pg_dir); #endif -#ifdef CONFIG_X86_64 - if (!after_bootmem && !start) { - pud_t *pud; - pmd_t *pmd; - - mmu_cr4_features = read_cr4(); - - /* - * _brk_end cannot change anymore, but it and _end may be - * located on different 2M pages. cleanup_highmap(), however, - * can only consider _end when it runs, so destroy any - * mappings beyond _brk_end here. - */ - pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); - pmd = pmd_offset(pud, _brk_end - 1); - while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) - pmd_clear(pmd); - } -#endif __flush_tlb_all(); if (!after_bootmem && e820_table_end > e820_table_start) diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index c14a542..68f9921 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -51,6 +51,7 @@ #include #include #include +#include static int __init parse_direct_gbpages_off(char *arg) { @@ -293,18 +294,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) * to the compile time generated pmds. This results in invalid pmds up * to the point where we hit the physaddr 0 mapping. * - * We limit the mappings to the region from _text to _end. _end is - * rounded up to the 2MB boundary. This catches the invalid pmds as + * We limit the mappings to the region from _text to _brk_end. _brk_end + * is rounded up to the 2MB boundary. This catches the invalid pmds as * well, as they are located before _text: */ void __init cleanup_highmap(void) { unsigned long vaddr = __START_KERNEL_map; - unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; + unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); + unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; pmd_t *pmd = level2_kernel_pgt; - pmd_t *last_pmd = pmd + PTRS_PER_PMD; - for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { + for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { if (pmd_none(*pmd)) continue; if (vaddr < (unsigned long) _text || vaddr > end) diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f608942..6020562 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1651,9 +1651,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; - if (!pte_none(pte_page[pteidx])) continue; @@ -1711,6 +1708,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pud_t *l3; pmd_t *l2; + /* max_pfn_mapped is the last pfn mapped in the initial memory + * mappings. Considering that on Xen after the kernel mappings we + * have the mappings of some pages that don't exist in pfn space, we + * set max_pfn_mapped to the last real pfn mapped. */ + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + /* Zap identity mapping */ init_level4_pgt[0] = __pgd(0); @@ -1815,9 +1818,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, initial_kernel_pmd = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + - xen_start_info->nr_pt_frames * PAGE_SIZE + - 512*1024); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 69ad529..ea5ac2d 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) } /* generate SMI */ + /* inb to force posted write through and make SMI happen now */ asm volatile ( - "outb %b0,%w1" + "outb %b0,%w1\n" + "inb %w1" : /* no output args */ : "a" (smi_cmd->command_code), "d" (smi_cmd->command_address), diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 654faa8..6a5371b 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1073,6 +1073,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, uint32_t __user *encoder_id; struct drm_mode_group *mode_group; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); /* @@ -1244,6 +1247,9 @@ int drm_mode_getcrtc(struct drm_device *dev, struct drm_mode_object *obj; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_resp->crtc_id, @@ -1312,6 +1318,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, uint64_t __user *prop_values; uint32_t __user *encoder_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); @@ -1431,6 +1440,9 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_encoder *encoder; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, enc_resp->encoder_id, DRM_MODE_OBJECT_ENCODER); @@ -1486,6 +1498,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, int ret = 0; int i; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_req->crtc_id, DRM_MODE_OBJECT_CRTC); @@ -1603,6 +1618,9 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if (!req->flags) { DRM_ERROR("no operation set\n"); return -EINVAL; @@ -1667,6 +1685,9 @@ int drm_mode_addfb(struct drm_device *dev, struct drm_framebuffer *fb; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_ERROR("mode new framebuffer width not within limits\n"); return -EINVAL; @@ -1724,6 +1745,9 @@ int drm_mode_rmfb(struct drm_device *dev, int ret = 0; int found = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); /* TODO check that we realy get a framebuffer back. */ @@ -1780,6 +1804,9 @@ int drm_mode_getfb(struct drm_device *dev, struct drm_framebuffer *fb; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { @@ -1813,6 +1840,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, int num_clips; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { @@ -1996,6 +2026,9 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev, struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2042,6 +2075,9 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev, struct drm_mode_modeinfo *umode = &mode_cmd->mode; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2211,6 +2247,9 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev, uint64_t __user *values_ptr; uint32_t __user *blob_length_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!obj) { @@ -2333,6 +2372,9 @@ int drm_mode_getblob_ioctl(struct drm_device *dev, int ret = 0; void *blob_ptr; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); if (!obj) { @@ -2393,6 +2435,9 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, int ret = -EINVAL; int i; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); @@ -2509,6 +2554,9 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, int size; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { @@ -2560,6 +2608,9 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev, int size; int ret = 0; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + return -EINVAL; + mutex_lock(&dev->mode_config.mutex); obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ea1c4b0..c3c78ee 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -498,11 +498,12 @@ EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; + struct drm_device *dev = obj->dev; - mutex_lock(&obj->dev->struct_mutex); + mutex_lock(&dev->struct_mutex); drm_vm_close_locked(vma); drm_gem_object_unreference(obj); - mutex_unlock(&obj->dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 36e66cc..729c95a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1749,8 +1749,10 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) return; spin_lock(&file_priv->mm.lock); - list_del(&request->client_list); - request->file_priv = NULL; + if (request->file_priv) { + list_del(&request->client_list); + request->file_priv = NULL; + } spin_unlock(&file_priv->mm.lock); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 50ab161..ded73a6 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -388,6 +388,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, uint32_t __iomem *reloc_entry; void __iomem *reloc_page; + /* We can't wait for rendering with pagefaults disabled */ + if (obj->active && in_atomic()) + return -EFAULT; + ret = i915_gem_object_set_to_gtt_domain(obj, 1); if (ret) return ret; @@ -461,15 +465,24 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, struct list_head *objects) { struct drm_i915_gem_object *obj; - int ret; - + int ret = 0; + + /* This is the fast path and we cannot handle a pagefault whilst + * holding the struct mutex lest the user pass in the relocations + * contained within a mmaped bo. For in such a case we, the page + * fault handler would call i915_gem_fault() and we would try to + * acquire the struct mutex again. Obviously this is bad and so + * lockdep complains vehemently. + */ + pagefault_disable(); list_for_each_entry(obj, objects, exec_list) { ret = i915_gem_execbuffer_relocate_object(obj, eb); if (ret) - return ret; + break; } + pagefault_enable(); - return 0; + return ret; } static int diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 4a5a73b..e967cc8 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -957,7 +957,11 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); - if (ASIC_IS_AVIVO(rdev)) + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) + /* TV seems to prefer the legacy algo on some boards */ + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + else if (ASIC_IS_AVIVO(rdev)) radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); else diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index cf7c8d5..cf602e2 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -448,7 +448,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) { - int edid_info; + int edid_info, size; struct edid *edid; unsigned char *raw; edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); @@ -456,11 +456,12 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) return false; raw = rdev->bios + edid_info; - edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); + size = EDID_LENGTH * (raw[0x7e] + 1); + edid = kmalloc(size, GFP_KERNEL); if (edid == NULL) return false; - memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); + memcpy((unsigned char *)edid, raw, size); if (!drm_edid_is_valid(edid)) { kfree(edid); @@ -468,6 +469,7 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) } rdev->mode_info.bios_hardcoded_edid = edid; + rdev->mode_info.bios_hardcoded_edid_size = size; return true; } @@ -475,8 +477,17 @@ bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) struct edid * radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) { - if (rdev->mode_info.bios_hardcoded_edid) - return rdev->mode_info.bios_hardcoded_edid; + struct edid *edid; + + if (rdev->mode_info.bios_hardcoded_edid) { + edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); + if (edid) { + memcpy((unsigned char *)edid, + (unsigned char *)rdev->mode_info.bios_hardcoded_edid, + rdev->mode_info.bios_hardcoded_edid_size); + return edid; + } + } return NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 22b7e3d..d83338b 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -629,6 +629,8 @@ static int radeon_vga_mode_valid(struct drm_connector *connector, static enum drm_connector_status radeon_vga_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; @@ -679,6 +681,17 @@ radeon_vga_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); + + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the + * vbios to deal with KVMs. If we have one and are not able to detect a monitor + * by other means, assume the CRT is connected and use that EDID. + */ + if ((!rdev->is_atom_bios) && + (ret == connector_status_disconnected) && + rdev->mode_info.bios_hardcoded_edid_size) { + ret = connector_status_connected; + } + radeon_connector_update_scratch_regs(connector, ret); return ret; } @@ -790,6 +803,8 @@ static int radeon_dvi_get_modes(struct drm_connector *connector) static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector, bool force) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder = NULL; struct drm_encoder_helper_funcs *encoder_funcs; @@ -829,8 +844,6 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) * you don't really know what's connected to which port as both are digital. */ if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { - struct drm_device *dev = connector->dev; - struct radeon_device *rdev = dev->dev_private; struct drm_connector *list_connector; struct radeon_connector *list_radeon_connector; list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { @@ -895,6 +908,19 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); } + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the + * vbios to deal with KVMs. If we have one and are not able to detect a monitor + * by other means, assume the DFP is connected and use that EDID. In most + * cases the DVI port is actually a virtual KVM port connected to the service + * processor. + */ + if ((!rdev->is_atom_bios) && + (ret == connector_status_disconnected) && + rdev->mode_info.bios_hardcoded_edid_size) { + radeon_connector->use_digital = true; + ret = connector_status_connected; + } + out: /* updated in get modes as well since we need to know if it's analog or digital */ radeon_connector_update_scratch_regs(connector, ret); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index a670caa..8c134db 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -239,6 +239,7 @@ struct radeon_mode_info { struct drm_property *underscan_vborder_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; + int bios_hardcoded_edid_size; /* pointer to fbdev info structure */ struct radeon_fbdev *rfbdev; diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c index 7f85a86..53e6273 100644 --- a/drivers/input/xen-kbdfront.c +++ b/drivers/input/xen-kbdfront.c @@ -110,7 +110,7 @@ static irqreturn_t input_handler(int rq, void *dev_id) static int __devinit xenkbd_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { - int ret, i; + int ret, i, abs; struct xenkbd_info *info; struct input_dev *kbd, *ptr; @@ -128,6 +128,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, if (!info->page) goto error_nomem; + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) + abs = 0; + if (abs) + xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); + /* keyboard */ kbd = input_allocate_device(); if (!kbd) @@ -137,11 +142,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, kbd->id.bustype = BUS_PCI; kbd->id.vendor = 0x5853; kbd->id.product = 0xffff; - kbd->evbit[0] = BIT(EV_KEY); + + __set_bit(EV_KEY, kbd->evbit); for (i = KEY_ESC; i < KEY_UNKNOWN; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); for (i = KEY_OK; i < KEY_MAX; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); ret = input_register_device(kbd); if (ret) { @@ -160,12 +166,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, ptr->id.bustype = BUS_PCI; ptr->id.vendor = 0x5853; ptr->id.product = 0xfffe; - ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); + + if (abs) { + __set_bit(EV_ABS, ptr->evbit); + input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); + input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + } else { + input_set_capability(ptr, EV_REL, REL_X); + input_set_capability(ptr, EV_REL, REL_Y); + } + input_set_capability(ptr, EV_REL, REL_WHEEL); + + __set_bit(EV_KEY, ptr->evbit); for (i = BTN_LEFT; i <= BTN_TASK; i++) - set_bit(i, ptr->keybit); - ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL); - input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); - input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + __set_bit(i, ptr->keybit); ret = input_register_device(ptr); if (ret) { @@ -272,7 +286,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct xenkbd_info *info = dev_get_drvdata(&dev->dev); - int ret, val; + int val; switch (backend_state) { case XenbusStateInitialising: @@ -285,16 +299,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateInitWait: InitWait: - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-abs-pointer", "%d", &val); - if (ret < 0) - val = 0; - if (val) { - ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, - "request-abs-pointer", "1"); - if (ret) - pr_warning("can't request abs-pointer\n"); - } xenbus_switch_state(dev, XenbusStateConnected); break; diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index a1e9dfb..6459b8c 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1264,6 +1264,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, break; + case UVC_OTT_VENDOR_SPECIFIC: + case UVC_OTT_DISPLAY: + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: + if (uvc_trace_param & UVC_TRACE_PROBE) + printk(" OT %d", entity->id); + + break; + case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(entity)) { if (uvc_trace_param & UVC_TRACE_PROBE) diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 5673d67..545c029 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -89,15 +89,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl) { - struct uvc_format *format; + struct uvc_format *format = NULL; struct uvc_frame *frame = NULL; unsigned int i; - if (ctrl->bFormatIndex <= 0 || - ctrl->bFormatIndex > stream->nformats) - return; + for (i = 0; i < stream->nformats; ++i) { + if (stream->format[i].index == ctrl->bFormatIndex) { + format = &stream->format[i]; + break; + } + } - format = &stream->format[ctrl->bFormatIndex - 1]; + if (format == NULL) + return; for (i = 0; i < format->nframes; ++i) { if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) { diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cb23aa2..e610cfe 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); if (pdev) { + pdev->current_state = PCI_D0; slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); pci_dev_put(pdev); } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 4ab49d4..30bb8d0 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb) if (!ACM_READY(acm)) goto exit; + usb_mark_last_busy(acm->dev); + data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: @@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb) break; } exit: - usb_mark_last_busy(acm->dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " @@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work) if (!ACM_READY(acm)) return; tty = tty_port_tty_get(&acm->port); + if (!tty) + return; tty_wakeup(tty); tty_kref_put(tty); } @@ -646,8 +649,10 @@ static void acm_port_down(struct acm *acm) usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); + tasklet_disable(&acm->urb_task); for (i = 0; i < nr; i++) usb_kill_urb(acm->ru[i].urb); + tasklet_enable(&acm->urb_task); acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 47085e5..a97c018 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -281,7 +281,7 @@ static void cleanup(struct wdm_device *desc) desc->sbuf, desc->validity->transfer_dma); usb_free_coherent(interface_to_usbdev(desc->intf), - desc->wMaxCommand, + desc->bMaxPacketSize0, desc->inbuf, desc->response->transfer_dma); kfree(desc->orq); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index a7131ad..37518df 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -802,7 +802,7 @@ static int proc_control(struct dev_state *ps, void __user *arg) tbuf, ctrl.wLength, tmo); usb_lock_device(dev); snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, - tbuf, i); + tbuf, max(i, 0)); if ((i > 0) && ctrl.wLength) { if (copy_to_user(ctrl.data, tbuf, i)) { free_page((unsigned long)tbuf); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 233c288..5add8b5 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) int stopped; unsigned count = 0; u8 state; - const __le32 halt = HALT_BIT(ehci); struct ehci_qh_hw *hw = qh->hw; if (unlikely (list_empty (&qh->qtd_list))) @@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) && !(qtd->hw_alt_next & EHCI_LIST_END(ehci))) { stopped = 1; - goto halt; } /* stop scanning when we reach qtds the hc is using */ @@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) */ ehci_clear_tt_buffer(ehci, qh, urb, token); } - - /* force halt for unlinked or blocked qh, so we'll - * patch the qh later and so that completions can't - * activate it while we "know" it's stopped. - */ - if ((halt & hw->hw_token) == 0) { -halt: - hw->hw_token |= halt; - wmb (); - } } /* unless we already know the urb's status, collect qtd status diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index f7a2057..8b1d94a 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p spin_lock_irqsave(&priv->asynclock, flags); list_add_tail(&rq->asynclist, &priv->asynclist); spin_unlock_irqrestore(&priv->asynclock, flags); + kref_get(&rq->ref_count); ret = usb_submit_urb(rq->urb, mem_flags); - if (!ret) { - kref_get(&rq->ref_count); + if (!ret) return rq; - } - kref_put(&rq->ref_count, destroy_async); + destroy_async(&rq->ref_count); err("submit_async_request submit_urb failed with %d", ret); return NULL; } diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 9d49d1c..52312e8 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -322,7 +322,7 @@ static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout) mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); } -static int bfin_musb_get_vbus_status(struct musb *musb) +static int bfin_musb_vbus_status(struct musb *musb) { return 0; } @@ -540,7 +540,7 @@ static struct dev_pm_ops bfin_pm_ops = { .resume = bfin_resume, }; -#define DEV_PM_OPS &bfin_pm_op, +#define DEV_PM_OPS &bfin_pm_ops #else #define DEV_PM_OPS NULL #endif @@ -548,7 +548,7 @@ static struct dev_pm_ops bfin_pm_ops = { static struct platform_driver bfin_driver = { .remove = __exit_p(bfin_remove), .driver = { - .name = "musb-bfin", + .name = "musb-blackfin", .pm = DEV_PM_OPS, }, }; diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c index 0056a41..15e8e1a 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/console/tileblit.c @@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_tilecursor cursor; - int use_sw = (vc->vc_cursor_type & 0x01); + int use_sw = (vc->vc_cursor_type & 0x10); cursor.sx = vc->vc_x; cursor.sy = vc->vc_y; diff --git a/fs/aio.c b/fs/aio.c index 26869cd..88f0ed5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -520,7 +520,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) ctx->reqs_active--; if (unlikely(!ctx->reqs_active && ctx->dead)) - wake_up(&ctx->wait); + wake_up_all(&ctx->wait); } static void aio_fput_routine(struct work_struct *data) @@ -1229,7 +1229,7 @@ static void io_destroy(struct kioctx *ioctx) * by other CPUs at this point. Right now, we rely on the * locking done by the above calls to ensure this consistency. */ - wake_up(&ioctx->wait); + wake_up_all(&ioctx->wait); put_ioctx(ioctx); /* once for the lookup */ } diff --git a/fs/dcache.c b/fs/dcache.c index a39fe47..1baddc1 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1612,10 +1612,13 @@ struct dentry *d_obtain_alias(struct inode *inode) __bit_spin_unlock(0, (unsigned long *)&tmp->d_sb->s_anon.first); spin_unlock(&tmp->d_lock); spin_unlock(&inode->i_lock); + security_d_instantiate(tmp, inode); return tmp; out_iput: + if (res && !IS_ERR(res)) + security_d_instantiate(res, inode); iput(inode); return res; } diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 85c8cc8..0d62f29 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1464,6 +1464,13 @@ static void ext3_orphan_cleanup (struct super_block * sb, return; } + /* Check if feature set allows readwrite operations */ + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { + ext3_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " + "unknown ROCOMPAT features"); + return; + } + if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f6a318f..4381efe 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb); static int ext4_freeze(struct super_block *sb); static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); +static int ext4_feature_set_ok(struct super_block *sb, int readonly); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); static void ext4_clear_request_list(void); @@ -2120,6 +2121,13 @@ static void ext4_orphan_cleanup(struct super_block *sb, return; } + /* Check if feature set would not allow a r/w mount */ + if (!ext4_feature_set_ok(sb, 0)) { + ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to " + "unknown ROCOMPAT features"); + return; + } + if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " diff --git a/fs/namespace.c b/fs/namespace.c index d1edf26..445534b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -2469,9 +2469,6 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, error = user_path_dir(new_root, &new); if (error) goto out0; - error = -EINVAL; - if (!check_mnt(new.mnt)) - goto out1; error = user_path_dir(put_old, &old); if (error) @@ -2491,7 +2488,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, IS_MNT_SHARED(new.mnt->mnt_parent) || IS_MNT_SHARED(root.mnt->mnt_parent)) goto out2; - if (!check_mnt(root.mnt)) + if (!check_mnt(root.mnt) || !check_mnt(new.mnt)) goto out2; error = -ENOENT; if (cant_mount(old.dentry)) @@ -2515,19 +2512,19 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, goto out2; /* not attached */ /* make sure we can reach put_old from new_root */ tmp = old.mnt; - br_write_lock(vfsmount_lock); if (tmp != new.mnt) { for (;;) { if (tmp->mnt_parent == tmp) - goto out3; /* already mounted on put_old */ + goto out2; /* already mounted on put_old */ if (tmp->mnt_parent == new.mnt) break; tmp = tmp->mnt_parent; } if (!is_subdir(tmp->mnt_mountpoint, new.dentry)) - goto out3; + goto out2; } else if (!is_subdir(old.dentry, new.dentry)) - goto out3; + goto out2; + br_write_lock(vfsmount_lock); detach_mnt(new.mnt, &parent_path); detach_mnt(root.mnt, &root_parent); /* mount old root on put_old */ @@ -2550,9 +2547,6 @@ out1: path_put(&new); out0: return error; -out3: - br_write_unlock(vfsmount_lock); - goto out2; } static void __init init_mount_tree(void) diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 42b92d7..b5fcbf7 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1214,13 +1214,17 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data) #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) { + int ret; + if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags)) return 1; - if (may_wait && !out_of_line_wait_on_bit_lock(&nfsi->flags, - NFS_INO_COMMIT, nfs_wait_bit_killable, - TASK_KILLABLE)) - return 1; - return 0; + if (!may_wait) + return 0; + ret = out_of_line_wait_on_bit_lock(&nfsi->flags, + NFS_INO_COMMIT, + nfs_wait_bit_killable, + TASK_KILLABLE); + return (ret < 0) ? ret : 1; } static void nfs_commit_clear_lock(struct nfs_inode *nfsi) @@ -1396,9 +1400,10 @@ int nfs_commit_inode(struct inode *inode, int how) { LIST_HEAD(head); int may_wait = how & FLUSH_SYNC; - int res = 0; + int res; - if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) + res = nfs_commit_set_lock(NFS_I(inode), may_wait); + if (res <= 0) goto out_mark_dirty; spin_lock(&inode->i_lock); res = nfs_scan_commit(inode, &head, 0, 0); @@ -1407,12 +1412,14 @@ int nfs_commit_inode(struct inode *inode, int how) int error = nfs_commit_list(inode, &head, how); if (error < 0) return error; - if (may_wait) - wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, - nfs_wait_bit_killable, - TASK_KILLABLE); - else + if (!may_wait) goto out_mark_dirty; + error = wait_on_bit(&NFS_I(inode)->flags, + NFS_INO_COMMIT, + nfs_wait_bit_killable, + TASK_KILLABLE); + if (error < 0) + return error; } else nfs_commit_clear_lock(NFS_I(inode)); return res; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index db52546..5fcb139 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -984,8 +984,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, void *); enum nfsd4_op_flags { ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ - ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */ - ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */ + ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */ + ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */ }; struct nfsd4_operation { diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 7b566ec..f0e448a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -316,64 +316,6 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE]; static struct list_head client_lru; static struct list_head close_lru; -static void unhash_generic_stateid(struct nfs4_stateid *stp) -{ - list_del(&stp->st_hash); - list_del(&stp->st_perfile); - list_del(&stp->st_perstateowner); -} - -static void free_generic_stateid(struct nfs4_stateid *stp) -{ - put_nfs4_file(stp->st_file); - kmem_cache_free(stateid_slab, stp); -} - -static void release_lock_stateid(struct nfs4_stateid *stp) -{ - struct file *file; - - unhash_generic_stateid(stp); - file = find_any_file(stp->st_file); - if (file) - locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); - free_generic_stateid(stp); -} - -static void unhash_lockowner(struct nfs4_stateowner *sop) -{ - struct nfs4_stateid *stp; - - list_del(&sop->so_idhash); - list_del(&sop->so_strhash); - list_del(&sop->so_perstateid); - while (!list_empty(&sop->so_stateids)) { - stp = list_first_entry(&sop->so_stateids, - struct nfs4_stateid, st_perstateowner); - release_lock_stateid(stp); - } -} - -static void release_lockowner(struct nfs4_stateowner *sop) -{ - unhash_lockowner(sop); - nfs4_put_stateowner(sop); -} - -static void -release_stateid_lockowners(struct nfs4_stateid *open_stp) -{ - struct nfs4_stateowner *lock_sop; - - while (!list_empty(&open_stp->st_lockowners)) { - lock_sop = list_entry(open_stp->st_lockowners.next, - struct nfs4_stateowner, so_perstateid); - /* list_del(&open_stp->st_lockowners); */ - BUG_ON(lock_sop->so_is_open_owner); - release_lockowner(lock_sop); - } -} - /* * We store the NONE, READ, WRITE, and BOTH bits separately in the * st_{access,deny}_bmap field of the stateid, in order to track not @@ -446,13 +388,71 @@ static int nfs4_access_bmap_to_omode(struct nfs4_stateid *stp) return nfs4_access_to_omode(access); } -static void release_open_stateid(struct nfs4_stateid *stp) +static void unhash_generic_stateid(struct nfs4_stateid *stp) +{ + list_del(&stp->st_hash); + list_del(&stp->st_perfile); + list_del(&stp->st_perstateowner); +} + +static void free_generic_stateid(struct nfs4_stateid *stp) { int oflag = nfs4_access_bmap_to_omode(stp); + nfs4_file_put_access(stp->st_file, oflag); + put_nfs4_file(stp->st_file); + kmem_cache_free(stateid_slab, stp); +} + +static void release_lock_stateid(struct nfs4_stateid *stp) +{ + struct file *file; + + unhash_generic_stateid(stp); + file = find_any_file(stp->st_file); + if (file) + locks_remove_posix(file, (fl_owner_t)stp->st_stateowner); + free_generic_stateid(stp); +} + +static void unhash_lockowner(struct nfs4_stateowner *sop) +{ + struct nfs4_stateid *stp; + + list_del(&sop->so_idhash); + list_del(&sop->so_strhash); + list_del(&sop->so_perstateid); + while (!list_empty(&sop->so_stateids)) { + stp = list_first_entry(&sop->so_stateids, + struct nfs4_stateid, st_perstateowner); + release_lock_stateid(stp); + } +} + +static void release_lockowner(struct nfs4_stateowner *sop) +{ + unhash_lockowner(sop); + nfs4_put_stateowner(sop); +} + +static void +release_stateid_lockowners(struct nfs4_stateid *open_stp) +{ + struct nfs4_stateowner *lock_sop; + + while (!list_empty(&open_stp->st_lockowners)) { + lock_sop = list_entry(open_stp->st_lockowners.next, + struct nfs4_stateowner, so_perstateid); + /* list_del(&open_stp->st_lockowners); */ + BUG_ON(lock_sop->so_is_open_owner); + release_lockowner(lock_sop); + } +} + +static void release_open_stateid(struct nfs4_stateid *stp) +{ unhash_generic_stateid(stp); release_stateid_lockowners(stp); - nfs4_file_put_access(stp->st_file, oflag); free_generic_stateid(stp); } @@ -3735,6 +3735,7 @@ alloc_init_lock_stateid(struct nfs4_stateowner *sop, struct nfs4_file *fp, struc stp->st_stateid.si_stateownerid = sop->so_id; stp->st_stateid.si_fileid = fp->fi_id; stp->st_stateid.si_generation = 0; + stp->st_access_bmap = 0; stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_openstp = open_stp; @@ -3749,6 +3750,17 @@ check_lock_length(u64 offset, u64 length) LOFF_OVERFLOW(offset, length))); } +static void get_lock_access(struct nfs4_stateid *lock_stp, u32 access) +{ + struct nfs4_file *fp = lock_stp->st_file; + int oflag = nfs4_access_to_omode(access); + + if (test_bit(access, &lock_stp->st_access_bmap)) + return; + nfs4_file_get_access(fp, oflag); + __set_bit(access, &lock_stp->st_access_bmap); +} + /* * LOCK operation */ @@ -3765,7 +3777,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct file_lock conflock; __be32 status = 0; unsigned int strhashval; - unsigned int cmd; int err; dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", @@ -3847,22 +3858,18 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, switch (lock->lk_type) { case NFS4_READ_LT: case NFS4_READW_LT: - if (find_readable_file(lock_stp->st_file)) { - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_READ); - filp = find_readable_file(lock_stp->st_file); - } + filp = find_readable_file(lock_stp->st_file); + if (filp) + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); file_lock.fl_type = F_RDLCK; - cmd = F_SETLK; - break; + break; case NFS4_WRITE_LT: case NFS4_WRITEW_LT: - if (find_writeable_file(lock_stp->st_file)) { - nfs4_get_vfs_file(rqstp, fp, &cstate->current_fh, NFS4_SHARE_ACCESS_WRITE); - filp = find_writeable_file(lock_stp->st_file); - } + filp = find_writeable_file(lock_stp->st_file); + if (filp) + get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); file_lock.fl_type = F_WRLCK; - cmd = F_SETLK; - break; + break; default: status = nfserr_inval; goto out; @@ -3886,7 +3893,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * Note: locks.c uses the BKL to protect the inode's lock list. */ - err = vfs_lock_file(filp, cmd, &file_lock, &conflock); + err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock); switch (-err) { case 0: /* success! */ update_stateid(&lock_stp->st_stateid); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 615f0a9..c6766af 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, u32 dummy; char *machine_name; - int i, j; + int i; int nr_secflavs; READ_BUF(16); @@ -1215,8 +1215,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); - for (j = 0; j < dummy; ++j) - READ32(dummy); break; case RPC_AUTH_GSS: dprintk("RPC_AUTH_GSS callback secflavor " @@ -1232,7 +1230,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy); - p += XDR_QUADLEN(dummy); break; default: dprintk("Illegal callback secflavor\n"); diff --git a/fs/proc/array.c b/fs/proc/array.c index 7c99c1c..5e4f776 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -489,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, vsize, mm ? get_mm_rss(mm) : 0, rsslim, - mm ? mm->start_code : 0, - mm ? mm->end_code : 0, + mm ? (permitted ? mm->start_code : 1) : 0, + mm ? (permitted ? mm->end_code : 1) : 0, (permitted && mm) ? mm->start_stack : 0, esp, eip, diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 60b9148..f269ee6 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -249,8 +249,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) const char *name = arch_vma_name(vma); if (!name) { if (mm) { - if (vma->vm_start <= mm->start_brk && - vma->vm_end >= mm->brk) { + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { name = "[heap]"; } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { diff --git a/fs/super.c b/fs/super.c index 7e9dd4c..0d89e93 100644 --- a/fs/super.c +++ b/fs/super.c @@ -71,6 +71,7 @@ static struct super_block *alloc_super(struct file_system_type *type) #else INIT_LIST_HEAD(&s->s_files); #endif + s->s_bdi = &default_backing_dev_info; INIT_LIST_HEAD(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); @@ -1003,6 +1004,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void } BUG_ON(!mnt->mnt_sb); WARN_ON(!mnt->mnt_sb->s_bdi); + WARN_ON(mnt->mnt_sb->s_bdi == &default_backing_dev_info); mnt->mnt_sb->s_flags |= MS_BORN; error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); diff --git a/fs/sync.c b/fs/sync.c index ba76b96..412dc89 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -33,7 +33,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) * This should be safe, as we require bdi backing to actually * write out data in the first place */ - if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) + if (sb->s_bdi == &noop_backing_dev_info) return 0; if (sb->s_qcop && sb->s_qcop->quota_sync) @@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(sync_filesystem); static void sync_one_sb(struct super_block *sb, void *arg) { - if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi) + if (!(sb->s_flags & MS_RDONLY)) __sync_filesystem(sb, *(int *)arg); } /* diff --git a/include/linux/compaction.h b/include/linux/compaction.h index dfa2ed4..cc9f7a4 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -11,9 +11,6 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 3 -#define COMPACT_MODE_DIRECT_RECLAIM 0 -#define COMPACT_MODE_KSWAPD 1 - #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, @@ -28,8 +25,7 @@ extern unsigned long try_to_compact_pages(struct zonelist *zonelist, bool sync); extern unsigned long compaction_suitable(struct zone *zone, int order); extern unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, bool sync, - int compact_mode); + gfp_t gfp_mask, bool sync); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -74,8 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order) } static inline unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, bool sync, - int compact_mode) + gfp_t gfp_mask, bool sync) { return COMPACT_CONTINUE; } diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 1908929..a3c1874 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -13,6 +13,9 @@ #ifndef _LINUX_ETHTOOL_H #define _LINUX_ETHTOOL_H +#ifdef __KERNEL__ +#include +#endif #include #include @@ -449,6 +452,37 @@ struct ethtool_rxnfc { __u32 rule_locs[0]; }; +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +struct compat_ethtool_rx_flow_spec { + u32 flow_type; + union { + struct ethtool_tcpip4_spec tcp_ip4_spec; + struct ethtool_tcpip4_spec udp_ip4_spec; + struct ethtool_tcpip4_spec sctp_ip4_spec; + struct ethtool_ah_espip4_spec ah_ip4_spec; + struct ethtool_ah_espip4_spec esp_ip4_spec; + struct ethtool_usrip4_spec usr_ip4_spec; + struct ethhdr ether_spec; + u8 hdata[72]; + } h_u, m_u; + compat_u64 ring_cookie; + u32 location; +}; + +struct compat_ethtool_rxnfc { + u32 cmd; + u32 flow_type; + compat_u64 data; + struct compat_ethtool_rx_flow_spec fs; + u32 rule_cnt; + u32 rule_locs[0]; +}; + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ + /** * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR diff --git a/include/linux/mm.h b/include/linux/mm.h index f6385fc..c67adb4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page) /* * PageBuddy() indicate that the page is free and in the buddy system * (see mm/page_alloc.c). + * + * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to + * -2 so that an underflow of the page_mapcount() won't be mistaken + * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very + * efficiently by most CPU architectures. */ +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) + static inline int PageBuddy(struct page *page) { - return atomic_read(&page->_mapcount) == -2; + return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; } static inline void __SetPageBuddy(struct page *page) { VM_BUG_ON(atomic_read(&page->_mapcount) != -1); - atomic_set(&page->_mapcount, -2); + atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); } static inline void __ClearPageBuddy(struct page *page) diff --git a/kernel/cgroup.c b/kernel/cgroup.c index b24d702..bcc7336 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1813,10 +1813,8 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) /* Update the css_set linked lists if we're using them */ write_lock(&css_set_lock); - if (!list_empty(&tsk->cg_list)) { - list_del(&tsk->cg_list); - list_add(&tsk->cg_list, &newcg->tasks); - } + if (!list_empty(&tsk->cg_list)) + list_move(&tsk->cg_list, &newcg->tasks); write_unlock(&css_set_lock); for_each_subsys(root, ss) { @@ -3655,12 +3653,12 @@ again: spin_lock(&release_list_lock); set_bit(CGRP_REMOVED, &cgrp->flags); if (!list_empty(&cgrp->release_list)) - list_del(&cgrp->release_list); + list_del_init(&cgrp->release_list); spin_unlock(&release_list_lock); cgroup_lock_hierarchy(cgrp->root); /* delete this cgroup from parent->children */ - list_del(&cgrp->sibling); + list_del_init(&cgrp->sibling); cgroup_unlock_hierarchy(cgrp->root); d = dget(cgrp->dentry); @@ -3879,7 +3877,7 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss) subsys[ss->subsys_id] = NULL; /* remove subsystem from rootnode's list of subsystems */ - list_del(&ss->sibling); + list_del_init(&ss->sibling); /* * disentangle the css from all css_sets attached to the dummytop. as @@ -4253,7 +4251,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) if (!list_empty(&tsk->cg_list)) { write_lock(&css_set_lock); if (!list_empty(&tsk->cg_list)) - list_del(&tsk->cg_list); + list_del_init(&tsk->cg_list); write_unlock(&css_set_lock); } diff --git a/kernel/perf_event.c b/kernel/perf_event.c index b22a2ef..ad02fea 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -6115,17 +6115,20 @@ __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { - struct perf_event *parent_event; + if (child_event->parent) { + raw_spin_lock_irq(&child_ctx->lock); + perf_group_detach(child_event); + raw_spin_unlock_irq(&child_ctx->lock); + } perf_event_remove_from_context(child_event); - parent_event = child_event->parent; /* - * It can happen that parent exits first, and has events + * It can happen that the parent exits first, and has events * that are still around due to the child reference. These - * events need to be zapped - but otherwise linger. + * events need to be zapped. */ - if (parent_event) { + if (child_event->parent) { sync_child_event(child_event, child); free_event(child_event); } diff --git a/kernel/signal.c b/kernel/signal.c index 4e3cff1..3175186 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2421,9 +2421,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, return -EFAULT; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info.si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info.si_code != SI_QUEUE) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info.si_code < 0); return -EPERM; + } info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ @@ -2437,9 +2441,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) return -EINVAL; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info->si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info->si_code != SI_QUEUE) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info->si_code < 0); return -EPERM; + } info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4eed0af..443fd20 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -169,6 +169,11 @@ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif +#ifdef CONFIG_PRINTK +static int proc_dmesg_restrict(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos); +#endif + #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses it's own private copy */ static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; @@ -713,7 +718,7 @@ static struct ctl_table kern_table[] = { .data = &kptr_restrict, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_dmesg_restrict, .extra1 = &zero, .extra2 = &two, }, @@ -2397,6 +2402,17 @@ static int proc_taint(struct ctl_table *table, int write, return err; } +#ifdef CONFIG_PRINTK +static int proc_dmesg_restrict(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + if (write && !capable(CAP_SYS_ADMIN)) + return -EPERM; + + return proc_dointvec_minmax(table, write, buffer, lenp, ppos); +} +#endif + struct do_proc_dointvec_minmax_conv_param { int *min; int *max; diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 027100d..8e4ed88 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -604,7 +604,7 @@ static void bdi_prune_sb(struct backing_dev_info *bdi) spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { if (sb->s_bdi == bdi) - sb->s_bdi = NULL; + sb->s_bdi = &default_backing_dev_info; } spin_unlock(&sb_lock); } diff --git a/mm/compaction.c b/mm/compaction.c index 8be430b8..dcb058b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -42,8 +42,6 @@ struct compact_control { unsigned int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - - int compact_mode; }; static unsigned long release_freepages(struct list_head *freelist) @@ -397,10 +395,7 @@ static int compact_finished(struct zone *zone, return COMPACT_COMPLETE; /* Compaction run is not finished if the watermark is not met */ - if (cc->compact_mode != COMPACT_MODE_KSWAPD) - watermark = low_wmark_pages(zone); - else - watermark = high_wmark_pages(zone); + watermark = low_wmark_pages(zone); watermark += (1 << cc->order); if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) @@ -413,15 +408,6 @@ static int compact_finished(struct zone *zone, if (cc->order == -1) return COMPACT_CONTINUE; - /* - * Generating only one page of the right order is not enough - * for kswapd, we must continue until we're above the high - * watermark as a pool for high order GFP_ATOMIC allocations - * too. - */ - if (cc->compact_mode == COMPACT_MODE_KSWAPD) - return COMPACT_CONTINUE; - /* Direct compactor: Is a suitable page free? */ for (order = cc->order; order < MAX_ORDER; order++) { /* Job done if page is free of the right migratetype */ @@ -543,8 +529,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - bool sync, - int compact_mode) + bool sync) { struct compact_control cc = { .nr_freepages = 0, @@ -553,7 +538,6 @@ unsigned long compact_zone_order(struct zone *zone, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, .sync = sync, - .compact_mode = compact_mode, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -599,8 +583,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - status = compact_zone_order(zone, order, gfp_mask, sync, - COMPACT_MODE_DIRECT_RECLAIM); + status = compact_zone_order(zone, order, gfp_mask, sync); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ @@ -631,7 +614,6 @@ static int compact_node(int nid) .nr_freepages = 0, .nr_migratepages = 0, .order = -1, - .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, }; zone = &pgdat->node_zones[zoneid]; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7dcca55..33b5861 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -31,6 +31,7 @@ #include #include #include +#include int sysctl_panic_on_oom; int sysctl_oom_kill_allocating_task; @@ -292,13 +293,15 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, unsigned long totalpages, struct mem_cgroup *mem, const nodemask_t *nodemask) { - struct task_struct *p; + struct task_struct *g, *p; struct task_struct *chosen = NULL; *ppoints = 0; - for_each_process(p) { + do_each_thread(g, p) { unsigned int points; + if (!p->mm) + continue; if (oom_unkillable_task(p, mem, nodemask)) continue; @@ -314,22 +317,29 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, if (test_tsk_thread_flag(p, TIF_MEMDIE)) return ERR_PTR(-1UL); - /* - * This is in the process of releasing memory so wait for it - * to finish before killing some other task by mistake. - * - * However, if p is the current task, we allow the 'kill' to - * go ahead if it is exiting: this will simply set TIF_MEMDIE, - * which will allow it to gain access to memory reserves in - * the process of exiting and releasing its resources. - * Otherwise we could get an easy OOM deadlock. - */ - if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) { - if (p != current) - return ERR_PTR(-1UL); - - chosen = p; - *ppoints = 1000; + if (p->flags & PF_EXITING) { + /* + * If p is the current task and is in the process of + * releasing memory, we allow the "kill" to set + * TIF_MEMDIE, which will allow it to gain access to + * memory reserves. Otherwise, it may stall forever. + * + * The loop isn't broken here, however, in case other + * threads are found to have already been oom killed. + */ + if (p == current) { + chosen = p; + *ppoints = 1000; + } else { + /* + * If this task is not being ptraced on exit, + * then wait for it to finish before killing + * some other task unnecessarily. + */ + if (!(task_ptrace(p->group_leader) & + PT_TRACE_EXIT)) + return ERR_PTR(-1UL); + } } points = oom_badness(p, mem, nodemask, totalpages); @@ -337,7 +347,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, chosen = p; *ppoints = points; } - } + } while_each_thread(g, p); return chosen; } @@ -491,6 +501,8 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, list_for_each_entry(child, &t->children, sibling) { unsigned int child_points; + if (child->mm == p->mm) + continue; /* * oom_badness() returns 0 if the thread is unkillable */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cdef1d4..2828037 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -286,7 +286,7 @@ static void bad_page(struct page *page) /* Don't complain about poisoned pages */ if (PageHWPoison(page)) { - __ClearPageBuddy(page); + reset_page_mapcount(page); /* remove PageBuddy */ return; } @@ -317,7 +317,7 @@ static void bad_page(struct page *page) dump_stack(); out: /* Leave bad fields for debug, except PageBuddy could make trouble */ - __ClearPageBuddy(page); + reset_page_mapcount(page); /* remove PageBuddy */ add_taint(TAINT_BAD_PAGE); } diff --git a/mm/shmem.c b/mm/shmem.c index 5ee67c99..5ac23d5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2791,5 +2791,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } diff --git a/mm/slab.c b/mm/slab.c index 37961d1f..4c6e2e3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2288,8 +2288,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (ralign < align) { ralign = align; } - /* disable debug if not aligning with REDZONE_ALIGN */ - if (ralign & (__alignof__(unsigned long long) - 1)) + /* disable debug if necessary */ + if (ralign > __alignof__(unsigned long long)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. @@ -2315,8 +2315,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ - cachep->obj_offset += align; - size += align + sizeof(unsigned long long); + cachep->obj_offset += sizeof(unsigned long long); + size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of diff --git a/mm/swapfile.c b/mm/swapfile.c index 0341c57..6d6d28c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2149,8 +2149,13 @@ bad_swap_2: p->flags = 0; spin_unlock(&swap_lock); vfree(swap_map); - if (swap_file) + if (swap_file) { + if (did_down) { + mutex_unlock(&inode->i_mutex); + did_down = 0; + } filp_close(swap_file, NULL); + } out: if (page && !IS_ERR(page)) { kunmap(page); diff --git a/mm/vmscan.c b/mm/vmscan.c index 6771ea7..3b4a41d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2397,7 +2397,6 @@ loop_again: * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { - int compaction; struct zone *zone = pgdat->node_zones + i; int nr_slab; @@ -2428,24 +2427,9 @@ loop_again: sc.nr_reclaimed += reclaim_state->reclaimed_slab; total_scanned += sc.nr_scanned; - compaction = 0; - if (order && - zone_watermark_ok(zone, 0, - high_wmark_pages(zone), - end_zone, 0) && - !zone_watermark_ok(zone, order, - high_wmark_pages(zone), - end_zone, 0)) { - compact_zone_order(zone, - order, - sc.gfp_mask, false, - COMPACT_MODE_KSWAPD); - compaction = 1; - } - if (zone->all_unreclaimable) continue; - if (!compaction && nr_slab == 0 && + if (nr_slab == 0 && !zone_reclaimable(zone)) zone->all_unreclaimable = 1; /* diff --git a/net/socket.c b/net/socket.c index ac2219f..29c7df0 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2583,23 +2583,123 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { + struct compat_ethtool_rxnfc __user *compat_rxnfc; + bool convert_in = false, convert_out = false; + size_t buf_size = ALIGN(sizeof(struct ifreq), 8); + struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; + u32 rule_cnt = 0, actual_rule_cnt; + u32 ethcmd; u32 data; - void __user *datap; + int ret; + + if (get_user(data, &ifr32->ifr_ifru.ifru_data)) + return -EFAULT; - ifr = compat_alloc_user_space(sizeof(*ifr)); + compat_rxnfc = compat_ptr(data); - if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) + if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; - if (get_user(data, &ifr32->ifr_ifru.ifru_data)) + /* Most ethtool structures are defined without padding. + * Unfortunately struct ethtool_rxnfc is an exception. + */ + switch (ethcmd) { + default: + break; + case ETHTOOL_GRXCLSRLALL: + /* Buffer size is variable */ + if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) + return -EFAULT; + if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) + return -ENOMEM; + buf_size += rule_cnt * sizeof(u32); + /* fall through */ + case ETHTOOL_GRXRINGS: + case ETHTOOL_GRXCLSRLCNT: + case ETHTOOL_GRXCLSRULE: + convert_out = true; + /* fall through */ + case ETHTOOL_SRXCLSRLDEL: + case ETHTOOL_SRXCLSRLINS: + buf_size += sizeof(struct ethtool_rxnfc); + convert_in = true; + break; + } + + ifr = compat_alloc_user_space(buf_size); + rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); + + if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; - datap = compat_ptr(data); - if (put_user(datap, &ifr->ifr_ifru.ifru_data)) + if (put_user(convert_in ? rxnfc : compat_ptr(data), + &ifr->ifr_ifru.ifru_data)) return -EFAULT; - return dev_ioctl(net, SIOCETHTOOL, ifr); + if (convert_in) { + /* We expect there to be holes between fs.m_u and + * fs.ring_cookie and at the end of fs, but nowhere else. + */ + BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + + sizeof(compat_rxnfc->fs.m_u) != + offsetof(struct ethtool_rxnfc, fs.m_u) + + sizeof(rxnfc->fs.m_u)); + BUILD_BUG_ON( + offsetof(struct compat_ethtool_rxnfc, fs.location) - + offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != + offsetof(struct ethtool_rxnfc, fs.location) - + offsetof(struct ethtool_rxnfc, fs.ring_cookie)); + + if (copy_in_user(rxnfc, compat_rxnfc, + (void *)(&rxnfc->fs.m_u + 1) - + (void *)rxnfc) || + copy_in_user(&rxnfc->fs.ring_cookie, + &compat_rxnfc->fs.ring_cookie, + (void *)(&rxnfc->fs.location + 1) - + (void *)&rxnfc->fs.ring_cookie) || + copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) + return -EFAULT; + } + + ret = dev_ioctl(net, SIOCETHTOOL, ifr); + if (ret) + return ret; + + if (convert_out) { + if (copy_in_user(compat_rxnfc, rxnfc, + (const void *)(&rxnfc->fs.m_u + 1) - + (const void *)rxnfc) || + copy_in_user(&compat_rxnfc->fs.ring_cookie, + &rxnfc->fs.ring_cookie, + (const void *)(&rxnfc->fs.location + 1) - + (const void *)&rxnfc->fs.ring_cookie) || + copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, + sizeof(rxnfc->rule_cnt))) + return -EFAULT; + + if (ethcmd == ETHTOOL_GRXCLSRLALL) { + /* As an optimisation, we only copy the actual + * number of rules that the underlying + * function returned. Since Mallory might + * change the rule count in user memory, we + * check that it is less than the rule count + * originally given (as the user buffer size), + * which has been range-checked. + */ + if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) + return -EFAULT; + if (actual_rule_cnt < rule_cnt) + rule_cnt = actual_rule_cnt; + if (copy_in_user(&compat_rxnfc->rule_locs[0], + &rxnfc->rule_locs[0], + rule_cnt * sizeof(u32))) + return -EFAULT; + } + } + + return 0; } static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index be96d42..1e336a0 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + transport->srcport = 0; + write_lock_bh(&sk->sk_callback_lock); transport->inet = NULL; transport->sock = NULL; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index acd2099..c2eb6a7 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -16085,9 +16085,12 @@ static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; - err = __alc861_create_out_sw(codec, name, nid, i, 3); + index = 0; + } + err = __alc861_create_out_sw(codec, name, nid, index, 3); if (err < 0) return err; } @@ -17238,16 +17241,19 @@ static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; + index = 0; + } err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, - name, i, + name, index, HDA_COMPOSE_AMP_VAL(nid_v, 3, 0, HDA_OUTPUT)); if (err < 0) return err; err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, - name, i, + name, index, HDA_COMPOSE_AMP_VAL(nid_s, 3, 2, HDA_INPUT)); if (err < 0) @@ -19296,12 +19302,15 @@ static int alc662_auto_create_multi_out_ctls(struct hda_codec *codec, return err; } else { const char *name = pfx; - if (!name) + int index = i; + if (!name) { name = chname[i]; - err = __alc662_add_vol_ctl(spec, name, nid, i, 3); + index = 0; + } + err = __alc662_add_vol_ctl(spec, name, nid, index, 3); if (err < 0) return err; - err = __alc662_add_sw_ctl(spec, name, mix, i, 3); + err = __alc662_add_sw_ctl(spec, name, mix, index, 3); if (err < 0) return err; } diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 052062d..8566119 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -94,6 +94,7 @@ enum { STAC_92HD83XXX_REF, STAC_92HD83XXX_PWR_REF, STAC_DELL_S14, + STAC_DELL_E5520M, STAC_92HD83XXX_HP, STAC_HP_DV7_4000, STAC_92HD83XXX_MODELS @@ -1657,6 +1658,13 @@ static unsigned int dell_s14_pin_configs[10] = { 0x40f000f0, 0x40f000f0, }; +/* Switch int mic from 0x20 to 0x11 */ +static unsigned int dell_e5520m_pin_configs[10] = { + 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110, + 0x23011050, 0x23a1102e, 0x400000f3, 0xd5a30130, + 0x400000f0, 0x40f000f0, +}; + static unsigned int hp_dv7_4000_pin_configs[10] = { 0x03a12050, 0x0321201f, 0x40f000f0, 0x90170110, 0x40f000f0, 0x40f000f0, 0x90170110, 0xd5a30140, @@ -1667,6 +1675,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, [STAC_DELL_S14] = dell_s14_pin_configs, + [STAC_DELL_E5520M] = dell_e5520m_pin_configs, [STAC_HP_DV7_4000] = hp_dv7_4000_pin_configs, }; @@ -1675,6 +1684,7 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = "ref", [STAC_92HD83XXX_PWR_REF] = "mic-ref", [STAC_DELL_S14] = "dell-s14", + [STAC_DELL_E5520M] = "dell-e5520m", [STAC_92HD83XXX_HP] = "hp", [STAC_HP_DV7_4000] = "hp-dv7-4000", }; @@ -1687,6 +1697,14 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, "unknown Dell", STAC_DELL_S14), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049a, + "Dell E5520", STAC_DELL_E5520M), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x049b, + "Dell E5420", STAC_DELL_E5520M), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04eb, + "Dell E5420m", STAC_DELL_E5520M), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x04ec, + "Dell E5520m", STAC_DELL_E5520M), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, "HP", STAC_92HD83XXX_HP), {} /* terminator */ diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 63b0054..acc4579 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -159,6 +159,7 @@ struct via_spec { #endif }; +static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec); static struct via_spec * via_new_spec(struct hda_codec *codec) { struct via_spec *spec; @@ -169,6 +170,10 @@ static struct via_spec * via_new_spec(struct hda_codec *codec) codec->spec = spec; spec->codec = codec; + spec->codec_type = get_codec_type(codec); + /* VT1708BCE & VT1708S are almost same */ + if (spec->codec_type == VT1708BCE) + spec->codec_type = VT1708S; return spec; } @@ -1101,6 +1106,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct via_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); + int ret; if (!spec->mux_nids[adc_idx]) return -EINVAL; @@ -1109,12 +1115,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D0); - /* update jack power state */ - set_jack_power_state(codec); - return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, + ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, spec->mux_nids[adc_idx], &spec->cur_mux[adc_idx]); + /* update jack power state */ + set_jack_power_state(codec); + + return ret; } static int via_independent_hp_info(struct snd_kcontrol *kcontrol, @@ -1188,8 +1196,16 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, /* Get Independent Mode index of headphone pin widget */ spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel ? 1 : 0; - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel); + if (spec->codec_type == VT1718S) + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_CONNECT_SEL, pinsel ? 2 : 0); + else + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_CONNECT_SEL, pinsel); + if (spec->codec_type == VT1812) + snd_hda_codec_write(codec, 0x35, 0, + AC_VERB_SET_CONNECT_SEL, pinsel); if (spec->multiout.hp_nid && spec->multiout.hp_nid != spec->multiout.dac_nids[HDA_FRONT]) snd_hda_codec_setup_stream(codec, spec->multiout.hp_nid, @@ -1208,6 +1224,8 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol, activate_ctl(codec, "Headphone Playback Switch", spec->hp_independent_mode); } + /* update jack power state */ + set_jack_power_state(codec); return 0; } @@ -1248,9 +1266,12 @@ static int via_hp_build(struct hda_codec *codec) break; } - nums = snd_hda_get_connections(codec, nid, conn, HDA_MAX_CONNECTIONS); - if (nums <= 1) - return 0; + if (spec->codec_type != VT1708) { + nums = snd_hda_get_connections(codec, nid, + conn, HDA_MAX_CONNECTIONS); + if (nums <= 1) + return 0; + } knew = via_clone_control(spec, &via_hp_mixer[0]); if (knew == NULL) @@ -1310,6 +1331,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute) start_idx = 2; end_idx = 4; break; + case VT1718S: + nid_mixer = 0x21; + start_idx = 1; + end_idx = 3; + break; default: return; } @@ -2185,10 +2211,6 @@ static int via_init(struct hda_codec *codec) for (i = 0; i < spec->num_iverbs; i++) snd_hda_sequence_write(codec, spec->init_verbs[i]); - spec->codec_type = get_codec_type(codec); - if (spec->codec_type == VT1708BCE) - spec->codec_type = VT1708S; /* VT1708BCE & VT1708S are almost - same */ /* Lydia Add for EAPD enable */ if (!spec->dig_in_nid) { /* No Digital In connection */ if (spec->dig_in_pin) { @@ -2438,7 +2460,14 @@ static int vt_auto_create_analog_input_ctls(struct hda_codec *codec, else type_idx = 0; label = hda_get_autocfg_input_label(codec, cfg, i); - err = via_new_analog_input(spec, label, type_idx, idx, cap_nid); + if (spec->codec_type == VT1708S || + spec->codec_type == VT1702 || + spec->codec_type == VT1716S) + err = via_new_analog_input(spec, label, type_idx, + idx+1, cap_nid); + else + err = via_new_analog_input(spec, label, type_idx, + idx, cap_nid); if (err < 0) return err; snd_hda_add_imux_item(imux, label, idx, NULL); diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c index e76847a..48ffd40 100644 --- a/sound/soc/codecs/uda134x.c +++ b/sound/soc/codecs/uda134x.c @@ -486,7 +486,8 @@ static struct snd_soc_dai_driver uda134x_dai = { static int uda134x_soc_probe(struct snd_soc_codec *codec) { struct uda134x_priv *uda134x; - struct uda134x_platform_data *pd = dev_get_drvdata(codec->card->dev); + struct uda134x_platform_data *pd = codec->card->dev->platform_data; + int ret; printk(KERN_INFO "UDA134X SoC Audio Codec\n"); diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c index 2c09e93..86f1dc4 100644 --- a/sound/soc/samsung/s3c24xx_uda134x.c +++ b/sound/soc/samsung/s3c24xx_uda134x.c @@ -226,7 +226,7 @@ static struct snd_soc_ops s3c24xx_uda134x_ops = { static struct snd_soc_dai_link s3c24xx_uda134x_dai_link = { .name = "UDA134X", .stream_name = "UDA134X", - .codec_name = "uda134x-hifi", + .codec_name = "uda134x-codec", .codec_dai_name = "uda134x-hifi", .cpu_dai_name = "s3c24xx-iis", .ops = &s3c24xx_uda134x_ops, @@ -321,6 +321,7 @@ static int s3c24xx_uda134x_probe(struct platform_device *pdev) platform_set_drvdata(s3c24xx_uda134x_snd_device, &snd_soc_s3c24xx_uda134x); + platform_device_add_data(s3c24xx_uda134x_snd_device, &s3c24xx_uda134x, sizeof(s3c24xx_uda134x)); ret = platform_device_add(s3c24xx_uda134x_snd_device); if (ret) { printk(KERN_ERR "S3C24XX_UDA134X SoC Audio: Unable to add\n");