diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices index e894902..ed1779c 100644 --- a/Documentation/i2c/instantiating-devices +++ b/Documentation/i2c/instantiating-devices @@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) (...) i2c_adap = i2c_get_adapter(2); memset(&i2c_info, 0, sizeof(struct i2c_board_info)); - strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); + strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, normal_i2c); i2c_put_adapter(i2c_adap); diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index c74ff6f..713c589 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2672,6 +2672,10 @@ and is between 256 and 4096 characters. It is defined in the file disables clocksource verification at runtime. Used to enable high-resolution timer mode on older hardware, and in virtualized environment. + [x86] noirqtime: Do not use TSC to do irq accounting. + Used to run time disable IRQ_TIME_ACCOUNTING on any + platforms where RDTSC is slow and this accounting + can add overhead. turbografx.map[2|3]= [HW,JOY] TurboGraFX parallel port interface diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index e67211f..c537834 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt @@ -57,7 +57,7 @@ smallest image possible. In particular, if "0" is written to this file, the suspend image will be as small as possible. Reading from this file will display the current image size limit, which -is set to 500 MB by default. +is set to 2/5 of available RAM by default. /sys/power/pm_trace controls the code which saves the last PM event point in the RTC across reboots, so that you can debug a machine that just hangs diff --git a/Makefile b/Makefile index 78daa86..92b2538 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 35 -EXTRAVERSION = .11 +EXTRAVERSION = .12 NAME = Yokohama # *DOCUMENTATION* diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a6..abaf844 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 9f342a5..dd028f2 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h @@ -272,10 +272,6 @@ void cpu_idle_wait(void); void default_idle(void); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void account_system_vtime(struct task_struct *); -#endif - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42a..956f946 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c @@ -28,6 +28,8 @@ #include #include +#include + static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { &mtx1_mtd, }; +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { + .phy_search_highest_addr = 1, + .phy1_search_mac0 = 1, +}; + static int __init mtx1_register_devices(void) { int rc; + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); + rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index df971fa..4896ed0 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) unsigned int i; unsigned long flags; - for (i = 0; i < count && i < 79;) { + for (i = 0; i < count;) { switch(str[i]) { case '\n': iodc_dbuf[i+0] = '\r'; iodc_dbuf[i+1] = '\n'; i += 2; goto print; - case '\b': /* BS */ - i--; /* overwrite last */ default: iodc_dbuf[i] = str[i]; i++; @@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) } } - /* if we're at the end of line, and not already inserting a newline, - * insert one anyway. iodc console doesn't claim to support >79 char - * lines. don't account for this in the return value. - */ - if (i == 79 && iodc_dbuf[i-1] != '\n') { - iodc_dbuf[i+0] = '\r'; - iodc_dbuf[i+1] = '\n'; - } - print: spin_lock_irqsave(&pdc_lock, flags); real32_call(PAGE0->mem_cons.iodc_io, diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index efbcee5..e13fc3f 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -117,7 +117,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) int cpu_dest; /* timer and ipi have to always be received on all CPUs */ - if (CHECK_IRQ_PER_CPU(irq)) { + if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ cpumask_setall(irq_desc[irq].affinity); diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d62fdf4..1dc338c 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -878,6 +878,7 @@ #define PV_970 0x0039 #define PV_POWER5 0x003A #define PV_POWER5p 0x003B +#define PV_POWER7 0x003F #define PV_970FX 0x003C #define PV_630 0x0040 #define PV_630p 0x0041 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index a6297c6..880fb57 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -545,10 +545,6 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void account_system_vtime(struct task_struct *); -#endif - extern struct dentry *powerpc_debugfs_root; #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a..f8cd9fb 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include _GLOBAL(__setup_cpu_603) - mflr r4 + mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_604) - mflr r4 + mflr r5 bl setup_common_caches bl setup_604_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750cx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750fx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7400) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7410) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_745x) - mflr r4 + mflr r5 bl setup_common_caches bl setup_745x_specifics - mtlr r4 + mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index ed31a29..060c6d3 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -199,9 +200,32 @@ static void kexec_prepare_cpus_wait(int wait_state) mb(); } -static void kexec_prepare_cpus(void) +/* + * We need to make sure each present CPU is online. The next kernel will scan + * the device tree and assume primary threads are online and query secondary + * threads via RTAS to online them if required. If we don't online primary + * threads, they will be stuck. However, we also online secondary threads as we + * may be using 'cede offline'. In this case RTAS doesn't see the secondary + * threads as offline -- and again, these CPUs will be stuck. + * + * So, we online all CPUs that should be running, including secondary threads. + */ +static void wake_offline_cpus(void) { + int cpu = 0; + + for_each_present_cpu(cpu) { + if (!cpu_online(cpu)) { + printk(KERN_INFO "kexec: Waking offline cpu %d.\n", + cpu); + cpu_up(cpu); + } + } +} +static void kexec_prepare_cpus(void) +{ + wake_offline_cpus(); smp_call_function(kexec_smp_down, NULL, /* wait */0); local_irq_disable(); mb(); /* make sure IRQs are disabled before we say they are */ diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5c14ffe..59f2467 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1230,6 +1230,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1277,7 +1299,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } } diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 30e1626..494a63c 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -952,7 +952,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance) if (dsr & DOORBELL_DSR_QFI) { pr_info("RIO: doorbell queue full\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); - goto out; } /* XXX Need to check/dispatch until queue empty */ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 73e2598..f73b50a 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -150,11 +150,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); */ extern unsigned long thread_saved_pc(struct task_struct *t); -/* - * Print register of task into buffer. Used in fs/proc/array.c. - */ -extern void task_show_regs(struct seq_file *m, struct task_struct *task); - extern void show_code(struct pt_regs *regs); unsigned long get_wchan(struct task_struct *p); diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index cef6621..38ddd8a 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) extern void account_vtime(struct task_struct *, struct task_struct *); extern void account_tick_vtime(struct task_struct *); -extern void account_system_vtime(struct task_struct *); #ifdef CONFIG_PFAULT extern void pfault_irq_init(void); diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5d8f0f3..657cb17 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs) show_last_breaking_event(regs); } -/* This is called from fs/proc/array.c */ -void task_show_regs(struct seq_file *m, struct task_struct *task) -{ - struct pt_regs *regs; - - regs = task_pt_regs(task); - seq_printf(m, "task: %p, ksp: %p\n", - task, (void *)task->thread.ksp); - seq_printf(m, "User PSW : %p %p\n", - (void *) regs->psw.mask, (void *)regs->psw.addr); - - seq_printf(m, "User GPRS: " FOURLONG, - regs->gprs[0], regs->gprs[1], - regs->gprs[2], regs->gprs[3]); - seq_printf(m, " " FOURLONG, - regs->gprs[4], regs->gprs[5], - regs->gprs[6], regs->gprs[7]); - seq_printf(m, " " FOURLONG, - regs->gprs[8], regs->gprs[9], - regs->gprs[10], regs->gprs[11]); - seq_printf(m, " " FOURLONG, - regs->gprs[12], regs->gprs[13], - regs->gprs[14], regs->gprs[15]); - seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", - task->thread.acrs[0], task->thread.acrs[1], - task->thread.acrs[2], task->thread.acrs[3]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[4], task->thread.acrs[5], - task->thread.acrs[6], task->thread.acrs[7]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[8], task->thread.acrs[9], - task->thread.acrs[10], task->thread.acrs[11]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[12], task->thread.acrs[13], - task->thread.acrs[14], task->thread.acrs[15]); -} - static DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f942bb7..d2b8210 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -797,6 +797,17 @@ config SCHED_MC making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. +config IRQ_TIME_ACCOUNTING + bool "Fine granularity task level IRQ time accounting" + default n + ---help--- + Select this option to enable fine granularity task irq time + accounting. This is done by reading a timestamp on each + transitions between softirq and hardirq state, so there can be a + small performance impact. + + If in doubt, say N here. + source "kernel/Kconfig.preempt" config X86_UP_APIC diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index aa2c39d..27d8379 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -88,6 +88,7 @@ extern int acpi_disabled; extern int acpi_pci_disabled; extern int acpi_skip_timer_override; extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; extern u8 acpi_sci_flags; extern int acpi_sci_override_gsi; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 76f5483..535c373 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -77,7 +77,7 @@ #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) #define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_REFILL_PAGES 25 -#define KVM_MAX_CPUID_ENTRIES 40 +#define KVM_MAX_CPUID_ENTRIES 80 #define KVM_NR_FIXED_MTRR_REGION 88 #define KVM_NR_VAR_MTRR 8 @@ -673,20 +673,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) return (struct kvm_mmu_page *)page_private(page); } -static inline u16 kvm_read_fs(void) -{ - u16 seg; - asm("mov %%fs, %0" : "=g"(seg)); - return seg; -} - -static inline u16 kvm_read_gs(void) -{ - u16 seg; - asm("mov %%gs, %0" : "=g"(seg)); - return seg; -} - static inline u16 kvm_read_ldt(void) { u16 ldt; @@ -694,16 +680,6 @@ static inline u16 kvm_read_ldt(void) return ldt; } -static inline void kvm_load_fs(u16 sel) -{ - asm("mov %0, %%fs" : : "rm"(sel)); -} - -static inline void kvm_load_gs(u16 sel) -{ - asm("mov %0, %%gs" : : "rm"(sel)); -} - static inline void kvm_load_ldt(u16 sel) { asm("lldt %0" : : "rm"(sel)); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4a2d4e0..8b5393e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned cpu = smp_processor_id(); if (likely(prev != next)) { - /* stop flush ipis for the previous mm */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); #ifdef CONFIG_SMP percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); @@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Re-load page tables */ load_cr3(next->pgd); + /* stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + /* * load the LDT, if the LDT is different: */ diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 177b016..33927d2 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd) static inline void pud_clear(pud_t *pudp) { - unsigned long pgd; - set_pud(pudp, __pud(0)); /* @@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp) * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... * - * Make sure the pud entry we're updating is within the - * current pgd to avoid unnecessary TLB flushes. + * Currently all places where pud_clear() is called either have + * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or + * pud_clear_bad()), so we don't need TLB flush here. */ - pgd = read_cr3(); - if (__pa(pudp) >= pgd && __pa(pudp) < - (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) - write_cr3(pgd); } #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index cd02f32..6226870 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -11,5 +11,6 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); void pvclock_read_wallclock(struct pvclock_wall_clock *wall, struct pvclock_vcpu_time_info *vcpu, struct timespec *ts); +void pvclock_resume(void); #endif /* _ASM_X86_PVCLOCK_H */ diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 1def601..cfdc6c8 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h @@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ CMOS_WRITE(0, 0xf); - *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; + *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; } static inline void __init smpboot_setup_io_apic(void) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c05872a..510bc6d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; +int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; @@ -410,10 +411,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, return 0; } - if (acpi_skip_timer_override && - intsrc->source_irq == 0 && intsrc->global_irq == 2) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); - return 0; + if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { + if (acpi_skip_timer_override) { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } + if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); + } } mp_override_legacy_irq(intsrc->source_irq, diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3e..bebabec 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void) } /* - * MTRR initialization for all AP's + * Delayed MTRR initialization for all AP's */ void mtrr_aps_init(void) { if (!use_intel()) return; + /* + * Check if someone has requested the delay of AP MTRR initialization, + * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, + * then we are done. + */ + if (!mtrr_aps_delayed_init) + return; + set_mtrr(~0U, 0, 0, 0); mtrr_aps_delayed_init = false; } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71..28b09af 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -980,15 +980,21 @@ static int __init parse_memopt(char *p) if (!p) return -EINVAL; -#ifdef CONFIG_X86_32 if (!strcmp(p, "nopentium")) { +#ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_PSE); return 0; - } +#else + printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); + return -EINVAL; #endif + } userdef = 1; mem_size = memparse(p, &p); + /* don't remove all of memory when handling "mem={invalid}" param */ + if (mem_size == 0) + return -EINVAL; e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index ebdb85c..f67a33c7 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -145,15 +145,10 @@ static void __init ati_bugs(int num, int slot, int func) static u32 __init ati_sbx00_rev(int num, int slot, int func) { - u32 old, d; + u32 d; - d = read_pci_config(num, slot, func, 0x70); - old = d; - d &= ~(1<<8); - write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; - write_pci_config(num, slot, func, 0x70, old); return d; } @@ -162,11 +157,19 @@ static void __init ati_bugs_contd(int num, int slot, int func) { u32 d, rev; - if (acpi_use_timer_override) + rev = ati_sbx00_rev(num, slot, func); + if (rev >= 0x40) + acpi_fix_pin2_polarity = 1; + + /* + * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... + * SB700: revisions 0x39, 0x3a, ... + * SB800: revisions 0x40, 0x41, ... + */ + if (rev >= 0x39) return; - rev = ati_sbx00_rev(num, slot, func); - if (rev > 0x13) + if (acpi_use_timer_override) return; /* check for IRQ0 interrupt swap */ diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 4db7c4d..2642cf9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1268,7 +1268,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) decl PER_CPU_VAR(irq_count) jmp error_exit CFI_ENDPROC -END(do_hypervisor_callback) +END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 239427c..a4f07c1 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -120,6 +120,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) static atomic64_t last_value = ATOMIC64_INIT(0); +void pvclock_resume(void) +{ + atomic64_set(&last_value, 0); +} + cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) { struct pvclock_shadow_time shadow; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 4094ae0..97cdbe8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -104,10 +104,14 @@ int __init notsc_setup(char *str) __setup("notsc", notsc_setup); +static int no_sched_irq_time; + static int __init tsc_setup(char *str) { if (!strcmp(str, "reliable")) tsc_clocksource_reliable = 1; + if (!strncmp(str, "noirqtime", 9)) + no_sched_irq_time = 1; return 1; } @@ -802,6 +806,7 @@ void mark_tsc_unstable(char *reason) if (!tsc_unstable) { tsc_unstable = 1; sched_clock_stable = 0; + disable_sched_clock_irqtime(); printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ if (clocksource_tsc.mult) @@ -990,6 +995,9 @@ void __init tsc_init(void) /* now allow native_sched_clock() to use rdtsc */ tsc_disabled = 0; + if (!no_sched_irq_time) + enable_sched_clock_irqtime(); + lpj = ((u64)tsc_khz * 1000); do_div(lpj, HZ); lpj_fine = lpj; diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 93825ff..8a33246 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -553,6 +553,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) s->irq_request_opaque = kvm; s->pics[0].pics_state = s; s->pics[1].pics_state = s; + s->pics[0].isr_ack = 0xff; + s->pics[1].isr_ack = 0xff; /* * Initialize PIO device diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 9ca7032..d103e15 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3096,8 +3096,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) sync_lapic_to_cr8(vcpu); save_host_msrs(vcpu); - fs_selector = kvm_read_fs(); - gs_selector = kvm_read_gs(); + savesegment(fs, fs_selector); + savesegment(gs, gs_selector); ldt_selector = kvm_read_ldt(); svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ @@ -3184,10 +3184,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - kvm_load_ldt(ldt_selector); - kvm_load_fs(fs_selector); - kvm_load_gs(gs_selector); load_host_msrs(vcpu); + kvm_load_ldt(ldt_selector); + loadsegment(fs, fs_selector); +#ifdef CONFIG_X86_64 + load_gs_index(gs_selector); + wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); +#else + loadsegment(gs, gs_selector); +#endif reload_tss(vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 39ac456..f1abe23 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -746,7 +746,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; - vmx->host_state.fs_sel = kvm_read_fs(); + savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; @@ -754,7 +754,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } - vmx->host_state.gs_sel = kvm_read_gs(); + savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { @@ -771,10 +771,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) #endif #ifdef CONFIG_X86_64 - if (is_long_mode(&vmx->vcpu)) { - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); + if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); - } #endif for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, @@ -784,34 +783,28 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void __vmx_load_host_state(struct vcpu_vmx *vmx) { - unsigned long flags; - if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; +#ifdef CONFIG_X86_64 + if (is_long_mode(&vmx->vcpu)) + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); +#endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); - /* - * If we have to reload gs, we must take care to - * preserve our gs base. - */ - local_irq_save(flags); - kvm_load_gs(vmx->host_state.gs_sel); #ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); + load_gs_index(vmx->host_state.gs_sel); +#else + loadsegment(gs, vmx->host_state.gs_sel); #endif - local_irq_restore(flags); } if (vmx->host_state.fs_reload_needed) loadsegment(fs, vmx->host_state.fs_sel); reload_tss(); #ifdef CONFIG_X86_64 - if (is_long_mode(&vmx->vcpu)) { - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); - } + wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif load_gdt(&__get_cpu_var(host_gdt)); } @@ -2518,8 +2511,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ - vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ - vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ + vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ + vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 4c4508e..544ed25 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -831,6 +831,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address); + return; + } + out_of_memory(regs, error_code, address); } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON)) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5c4ee42..2a72049 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -160,8 +160,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... */ - if (mm == current->active_mm) - write_cr3(read_cr3()); + flush_tlb_mm(mm); } #else /* !CONFIG_X86_PAE */ diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 914f046..6adff93 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1641,9 +1641,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; - if (!pte_none(pte_page[pteidx])) continue; @@ -1687,6 +1684,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pud_t *l3; pmd_t *l2; + /* max_pfn_mapped is the last pfn mapped in the initial memory + * mappings. Considering that on Xen after the kernel mappings we + * have the mappings of some pages that don't exist in pfn space, we + * set max_pfn_mapped to the last real pfn mapped. */ + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + /* Zap identity mapping */ init_level4_pgt[0] = __pgd(0); @@ -1750,9 +1753,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, { pmd_t *kernel_pmd; - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + - xen_start_info->nr_pt_frames * PAGE_SIZE + - 512*1024); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index a86df42..41eb583 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -425,6 +425,8 @@ void xen_timer_resume(void) { int cpu; + pvclock_resume(); + if (xen_clockevent != &xen_vcpuop_clockevent) return; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index d571e3f..deb228c 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -248,6 +248,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ + { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1f24267..c77a167 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4946,9 +4946,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) { struct ata_device *dev = qc->dev; - if (ata_tag_internal(qc->tag)) - return; - if (ata_is_nodata(qc->tf.protocol)) return; @@ -4992,14 +4989,23 @@ void ata_qc_complete(struct ata_queued_cmd *qc) if (unlikely(qc->err_mask)) qc->flags |= ATA_QCFLAG_FAILED; - if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { - /* always fill result TF for failed qc */ + /* + * Finish internal commands without any further processing + * and always with the result TF filled. + */ + if (unlikely(ata_tag_internal(qc->tag))) { fill_result_tf(qc); + __ata_qc_complete(qc); + return; + } - if (!ata_tag_internal(qc->tag)) - ata_qc_schedule_eh(qc); - else - __ata_qc_complete(qc); + /* + * Non-internal qc has failed. Fill the result TF and + * summon EH. + */ + if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { + fill_result_tf(qc); + ata_qc_schedule_eh(qc); return; } diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index f087ab5..8f56ef6 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = { }; static struct ata_port_operations mpc52xx_ata_port_ops = { - .inherits = &ata_sff_port_ops, + .inherits = &ata_bmdma_port_ops, .sff_dev_select = mpc52xx_ata_dev_select, .set_piomode = mpc52xx_ata_set_piomode, .set_dmamode = mpc52xx_ata_set_dmamode, diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index c5d4111..37a2d4f 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -173,6 +173,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) printk("Sending %x - down to controller\n", c->busaddr ); #endif /* CCISS_DEBUG */ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); h->commands_outstanding++; if ( h->commands_outstanding > h->max_outstanding) h->max_outstanding = h->commands_outstanding; diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 05ad4a1..2ec5f33 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -354,12 +354,14 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, tpm_protected_ordinal_duration[ordinal & TPM_PROTECTED_ORDINAL_MASK]; - if (duration_idx != TPM_UNDEFINED) + if (duration_idx != TPM_UNDEFINED) { duration = chip->vendor.duration[duration_idx]; - if (duration <= 0) + /* if duration is 0, it's because chip->vendor.duration wasn't */ + /* filled yet, so we set the lowest timeout just to give enough */ + /* time for tpm_get_timeouts() to succeed */ + return (duration <= 0 ? HZ : duration); + } else return 2 * 60 * HZ; - else - return duration; } EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); @@ -565,9 +567,11 @@ duration: if (rc) return; - if (be32_to_cpu(tpm_cmd.header.out.return_code) - != 3 * sizeof(u32)) + if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || + be32_to_cpu(tpm_cmd.header.out.length) + != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) return; + duration_cap = &tpm_cmd.params.getcap_out.cap.duration; chip->vendor.duration[TPM_SHORT] = usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); @@ -911,6 +915,18 @@ ssize_t tpm_show_caps_1_2(struct device * dev, } EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); +ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tpm_chip *chip = dev_get_drvdata(dev); + + return sprintf(buf, "%d %d %d\n", + jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), + jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), + jiffies_to_usecs(chip->vendor.duration[TPM_LONG])); +} +EXPORT_SYMBOL_GPL(tpm_show_timeouts); + ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 792868d..ba1779c 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -56,6 +56,8 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr, char *); extern ssize_t tpm_show_temp_deactivated(struct device *, struct device_attribute *attr, char *); +extern ssize_t tpm_show_timeouts(struct device *, + struct device_attribute *attr, char *); struct tpm_chip; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 1030f84..ef9c4ac 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -355,6 +355,7 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, NULL); static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); +static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); static struct attribute *tis_attrs[] = { &dev_attr_pubek.attr, @@ -364,7 +365,8 @@ static struct attribute *tis_attrs[] = { &dev_attr_owned.attr, &dev_attr_temp_deactivated.attr, &dev_attr_caps.attr, - &dev_attr_cancel.attr, NULL, + &dev_attr_cancel.attr, + &dev_attr_timeouts.attr, NULL, }; static struct attribute_group tis_attr_grp = { diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index aa9bc9e..dad21fd 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) } /* generate SMI */ + /* inb to force posted write through and make SMI happen now */ asm volatile ( - "outb %b0,%w1" + "outb %b0,%w1\n" + "inb %w1" : /* no output args */ : "a" (smi_cmd->command_code), "d" (smi_cmd->command_address), diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 88910e5..83dac28 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -96,7 +96,10 @@ config DRM_I830 config DRM_I915 tristate "i915 driver" depends on AGP_INTEL + # we need shmfs for the swappable backing store, and in particular + # the shmem_readpage() which depends upon tmpfs select SHMEM + select TMPFS select DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index a263b70..0ddd109 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -545,7 +545,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - int crtc, ret = 0; + int ret = 0; + unsigned int crtc; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 7d42ff1..1266ab3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -811,6 +811,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen i915GMm-HFS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", .matches = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 42d876a..f8dc72d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -289,6 +289,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *line_mux = 0x90; } + /* mac rv630, rv730, others */ + if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && + (*connector_type == DRM_MODE_CONNECTOR_DVII)) { + *connector_type = DRM_MODE_CONNECTOR_9PinDIN; + *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; + } + /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && @@ -2118,7 +2125,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; /* tell the bios not to handle mode switching */ - bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); + bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); @@ -2169,10 +2176,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) else bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); - if (lock) + if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; - else + bios_6_scratch &= ~ATOM_S6_ACC_MODE; + } else { bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; + bios_6_scratch |= ATOM_S6_ACC_MODE; + } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 2d95376..ce627b2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -619,6 +619,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, *frac_fb_div_p = best_frac_feedback_div; *ref_div_p = best_ref_div; *post_div_p = best_post_div; + DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", + freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, + best_ref_div, best_post_div); + } static bool diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 537841e..fec078d 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -2100,7 +2100,6 @@ static int f71882fg_remove(struct platform_device *pdev) int nr_fans = (data->type == f71882fg) ? 4 : 3; u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); - platform_set_drvdata(pdev, NULL); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); @@ -2167,6 +2166,7 @@ static int f71882fg_remove(struct platform_device *pdev) } } + platform_set_drvdata(pdev, NULL); kfree(data); return 0; diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index a610e78..38a41d2 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data) const int c1 = -4; const int c2 = 40500; /* x 10 ^ -6 */ - const int c3 = -2800; /* x10 ^ -9 */ + const int c3 = -28; /* x 10 ^ -7 */ RHlinear = c1*1000 + c2 * data->val_humid/1000 - + (data->val_humid * data->val_humid * c3)/1000000; + + (data->val_humid * data->val_humid * c3) / 10000; return (temp - 25000) * (10000 + 80 * data->val_humid) / 1000000 + RHlinear; } diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index f397ce7..b2074e3 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c @@ -687,6 +687,13 @@ static int __devexit via686a_remove(struct platform_device *pdev) return 0; } +static void via686a_update_fan_div(struct via686a_data *data) +{ + int reg = via686a_read_value(data, VIA686A_REG_FANDIV); + data->fan_div[0] = (reg >> 4) & 0x03; + data->fan_div[1] = reg >> 6; +} + static void __devinit via686a_init_device(struct via686a_data *data) { u8 reg; @@ -700,6 +707,9 @@ static void __devinit via686a_init_device(struct via686a_data *data) via686a_write_value(data, VIA686A_REG_TEMP_MODE, (reg & ~VIA686A_TEMP_MODE_MASK) | VIA686A_TEMP_MODE_CONTINUOUS); + + /* Pre-read fan clock divisor values */ + via686a_update_fan_div(data); } static struct via686a_data *via686a_update_device(struct device *dev) @@ -751,9 +761,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) & 0xc0) >> 6; - i = via686a_read_value(data, VIA686A_REG_FANDIV); - data->fan_div[0] = (i >> 4) & 0x03; - data->fan_div[1] = i >> 6; + via686a_update_fan_div(data); data->alarms = via686a_read_value(data, VIA686A_REG_ALARM1) | diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 0dcaba9..e01a3e9 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -277,6 +277,11 @@ struct w83627ehf_data { struct device *hwmon_dev; struct mutex lock; + const u8 *REG_FAN_START_OUTPUT; + const u8 *REG_FAN_STOP_OUTPUT; + const u8 *REG_FAN_MAX_OUTPUT; + const u8 *REG_FAN_STEP_OUTPUT; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -524,7 +529,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) } } - for (i = 0; i < 4; i++) { + for (i = 0; i < data->pwm_num; i++) { + if (!(data->has_fan & (1 << i))) + continue; + /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */ if (i != 1) { pwmcfg = w83627ehf_read_value(data, @@ -546,6 +554,17 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, W83627EHF_REG_FAN_STOP_TIME[i]); + + if (data->REG_FAN_MAX_OUTPUT[i] != 0xff) + data->fan_max_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_MAX_OUTPUT[i]); + + if (data->REG_FAN_STEP_OUTPUT[i] != 0xff) + data->fan_step_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_STEP_OUTPUT[i]); + data->target_temp[i] = w83627ehf_read_value(data, W83627EHF_REG_TARGET[i]) & @@ -1126,7 +1145,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \ + w83627ehf_write_value(data, data->REG_##REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } @@ -1206,12 +1225,26 @@ static struct sensor_device_attribute sda_sf3_arrays[] = { store_fan_stop_output, 1), SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, store_fan_stop_output, 2), +}; - /* pwm1 and pwm3 don't support max and step settings */ + +/* + * pwm1 and pwm3 don't support max and step settings on all chips. + * Need to check support while generating/removing attribute files. + */ +static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { + SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 0), + SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 0), SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, store_fan_max_output, 1), SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, store_fan_step_output, 1), + SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 2), + SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 2), }; static ssize_t @@ -1235,6 +1268,12 @@ static void w83627ehf_device_remove_files(struct device *dev) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { + struct sensor_device_attribute *attr = + &sda_sf3_max_step_arrays[i]; + if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) + device_remove_file(dev, &attr->dev_attr); + } for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); for (i = 0; i < data->in_num; i++) { @@ -1352,6 +1391,11 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) data->in6_skip = !data->temp3_disable; } + data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; + data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT; + data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT; + /* Initialize the chip */ w83627ehf_init_device(data); @@ -1440,6 +1484,15 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) &sda_sf3_arrays[i].dev_attr))) goto exit_remove; + for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { + struct sensor_device_attribute *attr = + &sda_sf3_max_step_arrays[i]; + if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { + err = device_create_file(dev, &attr->dev_attr); + if (err) + goto exit_remove; + } + } /* if fan4 is enabled create the sf3 files for it */ if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 0815e10..9e6aa8d 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -923,6 +923,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver, static int __unregister_client(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); + if (client && strcmp(client->name, "dummy")) + i2c_unregister_device(client); + return 0; +} + +static int __unregister_dummy(struct device *dev, void *dummy) +{ + struct i2c_client *client = i2c_verify_client(dev); if (client) i2c_unregister_device(client); return 0; @@ -977,8 +985,12 @@ int i2c_del_adapter(struct i2c_adapter *adap) i2c_unlock_adapter(adap); /* Detach any active clients. This can't fail, thus we do not - checking the returned value. */ + * check the returned value. This is a two-pass process, because + * we can't remove the dummy devices during the first pass: they + * could have been instantiated by real devices wishing to clean + * them up properly, so we give them a chance to do that first. */ res = device_for_each_child(&adap->dev, NULL, __unregister_client); + res = device_for_each_child(&adap->dev, NULL, __unregister_dummy); #ifdef CONFIG_I2C_COMPAT class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ad63b79..2422945 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2987,6 +2987,7 @@ static int cm_sidr_req_handler(struct cm_work *work) goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); + atomic_inc(&cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b930b81..2a072be 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1136,6 +1136,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; + /* + * Protect against the user destroying conn_id from another thread + * until we're done accessing it. + */ + atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (!ret) { /* @@ -1148,8 +1153,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); goto out; } + cma_deref_id(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; @@ -1351,17 +1358,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; event.param.conn.responder_resources = attr.max_qp_rd_atom; + + /* + * Protect against the user destroying conn_id from another thread + * until we're done accessing it. + */ + atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; cma_exch(conn_id, CMA_DESTROYING); mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); goto out; } mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); out: if (dev) diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 6dedded..3564cda 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -55,6 +55,14 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 +/* MacbookAir3,2 (unibody), aka wellspring5 */ +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 +/* MacbookAir3,1 (unibody), aka wellspring4 */ +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -80,6 +88,14 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), + /* MacbookAir3,2 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS), + /* MacbookAir3,1 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), /* Terminating entry */ {} }; @@ -234,6 +250,30 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4460, 5166 }, { DIM_Y, DIM_Y / SN_COORD, -75, 6700 } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING4_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + }, + { + USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, + { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } + }, {} }; diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c index e140816..6ad8be1 100644 --- a/drivers/input/xen-kbdfront.c +++ b/drivers/input/xen-kbdfront.c @@ -109,7 +109,7 @@ static irqreturn_t input_handler(int rq, void *dev_id) static int __devinit xenkbd_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { - int ret, i; + int ret, i, abs; struct xenkbd_info *info; struct input_dev *kbd, *ptr; @@ -127,6 +127,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, if (!info->page) goto error_nomem; + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) + abs = 0; + if (abs) + xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); + /* keyboard */ kbd = input_allocate_device(); if (!kbd) @@ -136,11 +141,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, kbd->id.bustype = BUS_PCI; kbd->id.vendor = 0x5853; kbd->id.product = 0xffff; - kbd->evbit[0] = BIT(EV_KEY); + + __set_bit(EV_KEY, kbd->evbit); for (i = KEY_ESC; i < KEY_UNKNOWN; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); for (i = KEY_OK; i < KEY_MAX; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); ret = input_register_device(kbd); if (ret) { @@ -159,12 +165,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, ptr->id.bustype = BUS_PCI; ptr->id.vendor = 0x5853; ptr->id.product = 0xfffe; - ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); + + if (abs) { + __set_bit(EV_ABS, ptr->evbit); + input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); + input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + } else { + input_set_capability(ptr, EV_REL, REL_X); + input_set_capability(ptr, EV_REL, REL_Y); + } + input_set_capability(ptr, EV_REL, REL_WHEEL); + + __set_bit(EV_KEY, ptr->evbit); for (i = BTN_LEFT; i <= BTN_TASK; i++) - set_bit(i, ptr->keybit); - ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL); - input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); - input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + __set_bit(i, ptr->keybit); ret = input_register_device(ptr); if (ret) { @@ -271,7 +285,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct xenkbd_info *info = dev_get_drvdata(&dev->dev); - int ret, val; + int val; switch (backend_state) { case XenbusStateInitialising: @@ -282,17 +296,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateInitWait: InitWait: - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-abs-pointer", "%d", &val); - if (ret < 0) - val = 0; - if (val) { - ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, - "request-abs-pointer", "1"); - if (ret) - printk(KERN_WARNING - "xenkbd: can't request abs-pointer"); - } xenbus_switch_state(dev, XenbusStateConnected); break; diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 544cf4b..0a347a8 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -801,6 +801,16 @@ static void closecard(int cardnr) ll_unload(csta); } +static irqreturn_t card_irq(int intno, void *dev_id) +{ + struct IsdnCardState *cs = dev_id; + irqreturn_t ret = cs->irq_func(intno, cs); + + if (ret == IRQ_HANDLED) + cs->irq_cnt++; + return ret; +} + static int init_card(struct IsdnCardState *cs) { int irq_cnt, cnt = 3, ret; @@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs) ret = cs->cardmsg(cs, CARD_INIT, NULL); return(ret); } - irq_cnt = kstat_irqs(cs->irq); + irq_cnt = cs->irq_cnt = 0; printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], cs->irq, irq_cnt); - if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { + if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", cs->irq); return 1; @@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs) /* Timeout 10ms */ msleep(10); printk(KERN_INFO "%s: IRQ %d count %d\n", - CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); - if (kstat_irqs(cs->irq) == irq_cnt) { + CardType[cs->typ], cs->irq, cs->irq_cnt); + if (cs->irq_cnt == irq_cnt) { printk(KERN_WARNING "%s: IRQ(%d) getting no interrupts during init %d\n", CardType[cs->typ], cs->irq, 4 - cnt); diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 832a878..32ab392 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h @@ -959,6 +959,7 @@ struct IsdnCardState { u_long event; struct work_struct tqueue; struct timer_list dbusytimer; + unsigned int irq_cnt; #ifdef ERROR_STATISTIC int err_crc; int err_tx; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 826bce7..b294ea6 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -33,7 +33,6 @@ struct pgpath { unsigned fail_count; /* Cumulative failure count */ struct dm_path path; - struct work_struct deactivate_path; struct work_struct activate_path; }; @@ -116,7 +115,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); -static void deactivate_path(struct work_struct *work); /*----------------------------------------------- @@ -129,7 +127,6 @@ static struct pgpath *alloc_pgpath(void) if (pgpath) { pgpath->is_active = 1; - INIT_WORK(&pgpath->deactivate_path, deactivate_path); INIT_WORK(&pgpath->activate_path, activate_path); } @@ -141,14 +138,6 @@ static void free_pgpath(struct pgpath *pgpath) kfree(pgpath); } -static void deactivate_path(struct work_struct *work) -{ - struct pgpath *pgpath = - container_of(work, struct pgpath, deactivate_path); - - blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); -} - static struct priority_group *alloc_priority_group(void) { struct priority_group *pg; @@ -993,7 +982,6 @@ static int fail_path(struct pgpath *pgpath) pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); - queue_work(kmultipathd, &pgpath->deactivate_path); out: spin_unlock_irqrestore(&m->lock, flags); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index e3a512d..96f01d25 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2040,13 +2040,14 @@ static void event_callback(void *context) wake_up(&md->eventq); } +/* + * Protected by md->suspend_lock obtained by dm_swap_table(). + */ static void __set_size(struct mapped_device *md, sector_t size) { set_capacity(md->disk, size); - mutex_lock(&md->bdev->bd_inode->i_mutex); i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); - mutex_unlock(&md->bdev->bd_inode->i_mutex); } /* diff --git a/drivers/md/md.c b/drivers/md/md.c index db6552c..9ec74cc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -441,6 +441,9 @@ static mddev_t * mddev_find(dev_t unit) { mddev_t *mddev, *new = NULL; + if (unit && MAJOR(unit) != MD_MAJOR) + unit &= ~((1<gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); + mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); out: return err; @@ -4635,6 +4639,7 @@ static void md_clean(mddev_t *mddev) mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev->recovery = 0; mddev->in_sync = 0; + mddev->changed = 0; mddev->degraded = 0; mddev->barriers_work = 0; mddev->safemode = 0; @@ -4741,6 +4746,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) set_capacity(disk, 0); revalidate = 1; + mddev->changed = 1; if (mddev->ro) mddev->ro = 0; @@ -5109,9 +5115,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) /* set saved_raid_disk if appropriate */ if (!mddev->persistent) { if (info->state & (1<raid_disk < mddev->raid_disks) + info->raid_disk < mddev->raid_disks) { rdev->raid_disk = info->raid_disk; - else + set_bit(In_sync, &rdev->flags); + } else rdev->raid_disk = -1; } else super_types[mddev->major_version]. @@ -5926,7 +5933,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) atomic_inc(&mddev->openers); mutex_unlock(&mddev->open_mutex); - check_disk_size_change(mddev->gendisk, bdev); + check_disk_change(bdev); out: return err; } @@ -5941,6 +5948,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) return 0; } + +static int md_media_changed(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + return mddev->changed; +} + +static int md_revalidate(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + mddev->changed = 0; + return 0; +} static const struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -5951,6 +5973,8 @@ static const struct block_device_operations md_fops = .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, + .media_changed = md_media_changed, + .revalidate_disk= md_revalidate, }; static int md_thread(void * arg) diff --git a/drivers/md/md.h b/drivers/md/md.h index 9ec208e..2e2a3c2 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -250,6 +250,8 @@ struct mddev_s atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ + int changed; /* True if we might need to + * reread partition info */ int degraded; /* whether md should consider * adding a spare */ diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c index 4eba35a..c38dd67 100644 --- a/drivers/media/dvb/ttpci/av7110_ca.c +++ b/drivers/media/dvb/ttpci/av7110_ca.c @@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) { ca_slot_info_t *info=(ca_slot_info_t *)parg; - if (info->num > 1) + if (info->num < 0 || info->num > 1) return -EINVAL; av7110->ci_slot[info->num].num = info->num; av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c index 5bf4985..a91642c 100644 --- a/drivers/media/radio/radio-aimslab.c +++ b/drivers/media/radio/radio-aimslab.c @@ -31,7 +31,7 @@ #include /* Modules */ #include /* Initdata */ #include /* request_region */ -#include /* udelay */ +#include /* msleep */ #include /* kernel radio structs */ #include /* for KERNEL_VERSION MACRO */ #include /* outb, outb_p */ @@ -71,27 +71,17 @@ static struct rtrack rtrack_card; /* local things */ -static void sleep_delay(long n) -{ - /* Sleep nicely for 'n' uS */ - int d = n / msecs_to_jiffies(1000); - if (!d) - udelay(n); - else - msleep(jiffies_to_msecs(d)); -} - static void rt_decvol(struct rtrack *rt) { outb(0x58, rt->io); /* volume down + sigstr + on */ - sleep_delay(100000); + msleep(100); outb(0xd8, rt->io); /* volume steady + sigstr + on */ } static void rt_incvol(struct rtrack *rt) { outb(0x98, rt->io); /* volume up + sigstr + on */ - sleep_delay(100000); + msleep(100); outb(0xd8, rt->io); /* volume steady + sigstr + on */ } @@ -120,7 +110,7 @@ static int rt_setvol(struct rtrack *rt, int vol) if (vol == 0) { /* volume = 0 means mute the card */ outb(0x48, rt->io); /* volume down but still "on" */ - sleep_delay(2000000); /* make sure it's totally down */ + msleep(2000); /* make sure it's totally down */ outb(0xd0, rt->io); /* volume steady, off */ rt->curvol = 0; /* track the volume state! */ mutex_unlock(&rt->lock); @@ -155,7 +145,7 @@ static void send_0_byte(struct rtrack *rt) outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */ outb_p(128+64+16+8+2+1, rt->io); /* clock */ } - sleep_delay(1000); + msleep(1); } static void send_1_byte(struct rtrack *rt) @@ -169,7 +159,7 @@ static void send_1_byte(struct rtrack *rt) outb_p(128+64+16+8+4+2+1, rt->io); /* clock */ } - sleep_delay(1000); + msleep(1); } static int rt_setfreq(struct rtrack *rt, unsigned long freq) @@ -427,7 +417,7 @@ static int __init rtrack_init(void) /* this ensures that the volume is all the way down */ outb(0x48, rt->io); /* volume down but still "on" */ - sleep_delay(2000000); /* make sure it's totally down */ + msleep(2000); /* make sure it's totally down */ outb(0xc0, rt->io); /* steady volume, mute card */ return 0; diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 3a4fd85..2fa5bb4 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -1605,11 +1605,11 @@ struct em28xx_board em28xx_boards[] = { .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = SAA7115_COMPOSITE0, - .amux = EM28XX_AMUX_VIDEO2, + .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, - .amux = EM28XX_AMUX_VIDEO2, + .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_TERRATEC_AV350] = { diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 53f3ef4..4b0870c 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -65,15 +65,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl) { - struct uvc_format *format; + struct uvc_format *format = NULL; struct uvc_frame *frame = NULL; unsigned int i; - if (ctrl->bFormatIndex <= 0 || - ctrl->bFormatIndex > stream->nformats) - return; + for (i = 0; i < stream->nformats; ++i) { + if (stream->format[i].index == ctrl->bFormatIndex) { + format = &stream->format[i]; + break; + } + } - format = &stream->format[ctrl->bFormatIndex - 1]; + if (format == NULL) + return; for (i = 0; i < format->nframes; ++i) { if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) { diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f06b291..4dbe546 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -588,6 +588,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) } static int +mptctl_release(struct inode *inode, struct file *filep) +{ + fasync_helper(-1, filep, 0, &async_queue); + return 0; +} + +static int mptctl_fasync(int fd, struct file *filep, int mode) { MPT_ADAPTER *ioc; @@ -2796,6 +2803,7 @@ static const struct file_operations mptctl_fops = { .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, + .release = mptctl_release, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, #endif diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 407cb84..ab783a3 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1848,8 +1848,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) } out: - printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", - ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); + printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", + ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, + SCpnt, SCpnt->serial_number); return retval; } @@ -1886,7 +1887,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { - retval = SUCCESS; + retval = 0; goto out; } diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 000cb41..92b85e2 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c @@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) idev->close = ucb1x00_ts_close; __set_bit(EV_ABS, idev->evbit); - __set_bit(ABS_X, idev->absbit); - __set_bit(ABS_Y, idev->absbit); - __set_bit(ABS_PRESSURE, idev->absbit); input_set_drvdata(idev, ts); + ucb1x00_adc_enable(ts->ucb); + ts->x_res = ucb1x00_ts_read_xres(ts); + ts->y_res = ucb1x00_ts_read_yres(ts); + ucb1x00_adc_disable(ts->ucb); + + input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); + input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); + input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); + err = input_register_device(idev); if (err) goto fail; diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index b9dee28..ac3f2c0 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -309,6 +309,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, if (err) goto remove; + /* + * Update oldcard with the new RCA received from the SDIO + * device -- we're doing this so that it's updated in the + * "card" struct when oldcard overwrites that later. + */ + if (oldcard) + oldcard->rca = card->rca; + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index ee87325..f613eb2 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1077,6 +1077,6 @@ static void __exit omap_nand_exit(void) module_init(omap_nand_init); module_exit(omap_nand_exit); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index e789149..ac08750 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c @@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = { .remove = __devexit_p(generic_onenand_remove), }; -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); static int __init generic_onenand_init(void) { diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 9f322f1..348ce71 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -815,7 +815,7 @@ static void __exit omap2_onenand_exit(void) module_init(omap2_onenand_init); module_exit(omap2_onenand_exit); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarkko Lavinen "); MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c index 969ffed..07fa65f 100644 --- a/drivers/net/bonding/bond_ipv6.c +++ b/drivers/net/bonding/bond_ipv6.c @@ -70,6 +70,13 @@ static void bond_na_send(struct net_device *slave_dev, }; struct sk_buff *skb; + /* The Ethernet header is built in ndisc_send_skb(), not + * ndisc_build_skb(), so we cannot insert a VLAN tag. Only an + * out-of-line tag inserted by the hardware will work. + */ + if (vlan_id && !(slave_dev->features & NETIF_F_HW_VLAN_TX)) + return; + icmp6h.icmp6_router = router; icmp6h.icmp6_solicited = 0; icmp6h.icmp6_override = 1; @@ -88,7 +95,7 @@ static void bond_na_send(struct net_device *slave_dev, } if (vlan_id) { - skb = vlan_put_tag(skb, vlan_id); + skb = __vlan_hwaccel_put_tag(skb, vlan_id); if (!skb) { pr_err("failed to insert VLAN tag\n"); return; diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index ba24679..513a4b8 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -5678,7 +5678,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = er32(WUC); eeprom_apme_mask = E1000_WUC_APME; - if (eeprom_data & E1000_WUC_PHY_WAKE) + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 649c867..7782437 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2658,6 +2658,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + /* Disable packet split due to 82599 erratum #45 */ + if (hw->mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { rx_buf_len = IXGBE_RX_HDR_SIZE; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e0b47cc..2437c7e 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -3600,6 +3600,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) dma_free_coherent(&pdev->dev, bytes, ss->fw_stats, ss->fw_stats_bus); ss->fw_stats = NULL; + netif_napi_del(&ss->napi); } } kfree(mgp->ss); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 857ecaf..48ed375 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -3040,6 +3041,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->reg_num_mask = 0x1f; mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pci_enable_device(pdev); if (rc < 0) { @@ -3744,7 +3750,8 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -4631,7 +4638,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) /* Work around for rx fifo overflow */ if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { + (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22)) { netif_stop_queue(dev); rtl8169_tx_timeout(dev); break; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 5ea8773..dcfe6e1 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -174,8 +174,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, /* returns delimiter padding required given the packet length */ #define ATH_AGGR_GET_NDELIM(_len) \ - (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \ - (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2) + (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \ + DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ)) #define BAW_WITHIN(_start, _bawsz, _seqno) \ ((((_seqno) - (_start)) & 4095) < (_bawsz)) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 2feee1d..4b9378f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -519,6 +519,8 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->hw_version.devid == AR5416_AR9100_DEVID) ah->hw_version.macVersion = AR_SREV_VERSION_9100; + ath9k_hw_read_revisions(ah); + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_print(common, ATH_DBG_FATAL, "Couldn't reset chip\n"); @@ -1096,8 +1098,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) return false; } - ath9k_hw_read_revisions(ah); - return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 29b31a6..357da81 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -626,15 +626,9 @@ static int prism2_config(struct pcmcia_device *link) local->hw_priv = hw_priv; hw_priv->link = link; - /* - * Make sure the IRQ handler cannot proceed until at least - * dev->base_addr is initialized. - */ - spin_lock_irqsave(&local->irq_init_lock, flags); - ret = pcmcia_request_irq(link, prism2_interrupt); if (ret) - goto failed_unlock; + goto failed; /* * This actually configures the PCMCIA socket -- setting up @@ -643,11 +637,12 @@ static int prism2_config(struct pcmcia_device *link) */ ret = pcmcia_request_configuration(link, &link->conf); if (ret) - goto failed_unlock; + goto failed; + /* IRQ handler cannot proceed until at dev->base_addr is initialized */ + spin_lock_irqsave(&local->irq_init_lock, flags); dev->irq = link->irq; dev->base_addr = link->io.BasePort1; - spin_unlock_irqrestore(&local->irq_init_lock, flags); /* Finally, report what we've done */ @@ -676,8 +671,6 @@ static int prism2_config(struct pcmcia_device *link) return ret; - failed_unlock: - spin_unlock_irqrestore(&local->irq_init_lock, flags); failed: kfree(hw_priv); prism2_release((u_long)link); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 2a9480d..7b4fc94 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -2837,7 +2837,6 @@ static struct iwl_lib_ops iwl3945_lib = { .config_ap = iwl3945_config_ap, .manage_ibss_station = iwl3945_manage_ibss_station, .recover_from_tx_stall = iwl_bg_monitor_recover, - .check_plcp_health = iwl3945_good_plcp_health, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index c7f56b4..8a2c4d7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -1205,6 +1205,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) /* only Re-enable if diabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { @@ -1420,6 +1423,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) /* only Re-enable if diabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); } /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ @@ -3112,9 +3118,10 @@ static void iwl_mac_stop(struct ieee80211_hw *hw) flush_workqueue(priv->workqueue); - /* enable interrupts again in order to receive rfkill changes */ + /* User space software may expect getting rfkill changes + * even if interface is down */ iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_enable_interrupts(priv); + iwl_enable_rfkill_int(priv); IWL_DEBUG_MAC80211(priv, "leave\n"); } @@ -3832,14 +3839,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * 8. Enable interrupts and read RFKILL state *********************************************/ - /* enable interrupts if needed: hw bug w/a */ + /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); } - iwl_enable_interrupts(priv); + iwl_enable_rfkill_int(priv); /* If platform's RF_KILL switch is NOT set to KILL */ if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h index 3ff6b9d..2f7ea66 100644 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -163,6 +163,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv) IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); } +static inline void iwl_enable_rfkill_int(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n"); + iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); +} + static inline void iwl_enable_interrupts(struct iwl_priv *priv) { IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index a5ea89c..a66b39b 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, while (i != idx) { u16 len; struct sk_buff *skb; + dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; @@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, len = priv->common.rx_mtu; } + dma_addr = le32_to_cpu(desc->host_addr); + pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { - pci_unmap_single(priv->pdev, - le32_to_cpu(desc->host_addr), - priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); rx_buf[i] = NULL; - desc->host_addr = 0; + desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); + pci_dma_sync_single_for_device(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 967a200..2943f04 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -97,6 +97,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index ccb2dd1..9ad0f5f 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -616,7 +616,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, else *burst_possible = false; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 1e99a02..47a795c 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2392,6 +2392,7 @@ static struct usb_device_id rt73usb_device_table[] = { { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cb23aa2..e610cfe 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); if (pdev) { + pdev->current_state = PCI_D0; slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); pci_dev_put(pdev); } diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index f7b68ca..4ae494b 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -54,6 +54,9 @@ static int __init pci_stub_init(void) subdevice = PCI_ANY_ID, class=0, class_mask=0; int fields; + if (!strlen(id)) + continue; + fields = sscanf(id, "%x:%x:%x:%x:%x:%x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 3d9f3eb..d4ad1bd 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -1018,7 +1018,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) attr->write = write_vpd_attr; retval = sysfs_create_bin_file(&dev->dev.kobj, attr); if (retval) { - kfree(dev->vpd->attr); + kfree(attr); return retval; } dev->vpd->attr = attr; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 0e9acde..9cd7000 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -519,6 +519,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); +#define ICH_PMBASE 0x40 +#define ICH_ACPI_CNTL 0x44 +#define ICH4_ACPI_EN 0x10 +#define ICH6_ACPI_EN 0x80 +#define ICH4_GPIOBASE 0x58 +#define ICH4_GPIO_CNTL 0x5c +#define ICH4_GPIO_EN 0x10 +#define ICH6_GPIOBASE 0x48 +#define ICH6_GPIO_CNTL 0x4c +#define ICH6_GPIO_EN 0x10 + /* * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at * 0x40 (128 bytes of ACPI, GPIO & TCO registers) @@ -527,12 +538,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) { u32 region; + u8 enable; - pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); + /* + * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict + * with low legacy (and fixed) ports. We don't know the decoding + * priority and can't tell whether the legacy device or the one created + * here is really at that address. This happens on boards with broken + * BIOSes. + */ + + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); + if (enable & ICH4_ACPI_EN) { + pci_read_config_dword(dev, ICH_PMBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, + "ICH4 ACPI/GPIO/TCO"); + } - pci_read_config_dword(dev, 0x58, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); + pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); + if (enable & ICH4_GPIO_EN) { + pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 64, + PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO"); + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); @@ -548,12 +580,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) { u32 region; + u8 enable; - pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); + if (enable & ICH6_ACPI_EN) { + pci_read_config_dword(dev, ICH_PMBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, + "ICH6 ACPI/GPIO/TCO"); + } - pci_read_config_dword(dev, 0x48, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); + pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); + if (enable & ICH4_GPIO_EN) { + pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 64, + PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO"); + } } static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 3e1b8a2..fe7d670 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -520,6 +520,7 @@ config TOSHIBA_BT_RFKILL config ACPI_CMPC tristate "CMPC Laptop Extras" depends on X86 && ACPI + depends on RFKILL || RFKILL=n select INPUT select BACKLIGHT_CLASS_DEVICE default n diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 1ea6c43..7b32e9c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -91,7 +91,7 @@ struct acer_quirks { */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" -#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" +#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); @@ -1066,7 +1066,7 @@ static ssize_t set_bool_threeg(struct device *dev, return -EINVAL; return count; } -static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, +static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 92fd30c..9727b8e 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1079,14 +1079,8 @@ static int asus_hotk_add_fs(struct acpi_device *device) struct proc_dir_entry *proc; mode_t mode; - /* - * If parameter uid or gid is not changed, keep the default setting for - * our proc entries (-rw-rw-rw-) else, it means we care about security, - * and then set to -rw-rw---- - */ - if ((asus_uid == 0) && (asus_gid == 0)) { - mode = S_IFREG | S_IRUGO | S_IWUGO; + mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; } else { mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; printk(KERN_WARNING " asus_uid and asus_gid parameters are " diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index 1fe0f1f..865ef78 100644 --- a/drivers/platform/x86/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c @@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \ return -EINVAL; \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ show_bool_##value, set_bool_##value); show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 4bdb137..babc4d4 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */ TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, TP_ACPI_HOTKEYSCAN_MUTE, TP_ACPI_HOTKEYSCAN_THINKPAD, + TP_ACPI_HOTKEYSCAN_UNK1, + TP_ACPI_HOTKEYSCAN_UNK2, + TP_ACPI_HOTKEYSCAN_UNK3, + TP_ACPI_HOTKEYSCAN_UNK4, + TP_ACPI_HOTKEYSCAN_UNK5, + TP_ACPI_HOTKEYSCAN_UNK6, + TP_ACPI_HOTKEYSCAN_UNK7, + TP_ACPI_HOTKEYSCAN_UNK8, + + /* Hotkey keymap size */ + TPACPI_HOTKEY_MAP_LEN }; enum { /* Keys/events available through NVRAM polling */ @@ -3082,6 +3093,9 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ }; +typedef u16 tpacpi_keymap_entry_t; +typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; + static int __init hotkey_init(struct ibm_init_struct *iibm) { /* Requirements for changing the default keymaps: @@ -3113,9 +3127,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) * If the above is too much to ask, don't change the keymap. * Ask the thinkpad-acpi maintainer to do it, instead. */ - static u16 ibm_keycode_map[] __initdata = { + + enum keymap_index { + TPACPI_KEYMAP_IBM_GENERIC = 0, + TPACPI_KEYMAP_LENOVO_GENERIC, + }; + + static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = { + /* Generic keymap for IBM ThinkPads */ + [TPACPI_KEYMAP_IBM_GENERIC] = { /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ - KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP, + KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP, KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, @@ -3146,11 +3168,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, - }; - static u16 lenovo_keycode_map[] __initdata = { + }, + + /* Generic keymap for Lenovo ThinkPads */ + [TPACPI_KEYMAP_LENOVO_GENERIC] = { /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, - KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, + KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8, KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ @@ -3189,11 +3213,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + }, }; -#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map) -#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map) -#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0]) + static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = { + /* Generic maps (fallback) */ + { + .vendor = PCI_VENDOR_ID_IBM, + .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, + .quirks = TPACPI_KEYMAP_IBM_GENERIC, + }, + { + .vendor = PCI_VENDOR_ID_LENOVO, + .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, + .quirks = TPACPI_KEYMAP_LENOVO_GENERIC, + }, + }; + +#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) +#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t) int res, i; int status; @@ -3202,6 +3240,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) bool tabletsw_state = false; unsigned long quirks; + unsigned long keymap_id; vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, "initializing hotkey subdriver\n"); @@ -3342,7 +3381,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) goto err_exit; /* Set up key map */ - hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL); if (!hotkey_keycode_map) { @@ -3352,17 +3390,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) goto err_exit; } - if (tpacpi_is_lenovo()) { - dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, - "using Lenovo default hot key map\n"); - memcpy(hotkey_keycode_map, &lenovo_keycode_map, - TPACPI_HOTKEY_MAP_SIZE); - } else { - dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, - "using IBM default hot key map\n"); - memcpy(hotkey_keycode_map, &ibm_keycode_map, - TPACPI_HOTKEY_MAP_SIZE); - } + keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable, + ARRAY_SIZE(tpacpi_keymap_qtable)); + BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps)); + dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, + "using keymap number %lu\n", keymap_id); + + memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id], + TPACPI_HOTKEY_MAP_SIZE); input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN); tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; @@ -3469,7 +3504,8 @@ static bool hotkey_notify_hotkey(const u32 hkey, *send_acpi_ev = true; *ignore_acpi_ev = false; - if (scancode > 0 && scancode < 0x21) { + /* HKEY event 0x1001 is scancode 0x00 */ + if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) { scancode--; if (!(hotkey_source_mask & (1 << scancode))) { tpacpi_input_send_key_masked(scancode); diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 4d3b272..c8d26df 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) if (di->rem_capacity > 100) di->rem_capacity = 100; - if (di->current_uA >= 100L) + if (di->current_uA < -100L) di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L) / (di->current_uA / 100L); else diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 11b8ea2..bf588d9 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -36,6 +36,7 @@ #include #include #include +#include /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include @@ -850,7 +851,7 @@ static void __exit cmos_do_remove(struct device *dev) #ifdef CONFIG_PM -static int cmos_suspend(struct device *dev, pm_message_t mesg) +static int cmos_suspend(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; @@ -898,7 +899,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) */ static inline int cmos_poweroff(struct device *dev) { - return cmos_suspend(dev, PMSG_HIBERNATE); + return cmos_suspend(dev); } static int cmos_resume(struct device *dev) @@ -945,9 +946,9 @@ static int cmos_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); + #else -#define cmos_suspend NULL -#define cmos_resume NULL static inline int cmos_poweroff(struct device *dev) { @@ -1083,7 +1084,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp) static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg) { - return cmos_suspend(&pnp->dev, mesg); + return cmos_suspend(&pnp->dev); } static int cmos_pnp_resume(struct pnp_dev *pnp) @@ -1163,8 +1164,9 @@ static struct platform_driver cmos_platform_driver = { .shutdown = cmos_platform_shutdown, .driver = { .name = (char *) driver_name, - .suspend = cmos_suspend, - .resume = cmos_resume, +#ifdef CONFIG_PM + .pm = &cmos_pm_ops, +#endif } }; diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 18d9a49..9b93e4a 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp; - int ct, perm; + unsigned int ct; + int perm; argp = (void __user *)arg; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 1a970a7..3d9f55f 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -720,6 +720,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = { {"Intel", "Multi-Flex"}, {"NETAPP", "LUN"}, {"AIX", "NVDISK"}, + {"NETAPP", "LUN"}, + {"AIX", "NVDISK"}, {NULL, NULL} }; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index a7890c6..87f58f6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -649,6 +649,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->eh_cmd_q, &eh_work_q); + shost->host_eh_scheduled = 0; spin_unlock_irqrestore(shost->host_lock, flags); SAS_DPRINTK("Enter %s\n", __func__); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 0ec1ed3..09433df 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2050,9 +2050,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* adjust hba_queue_depth, reply_free_queue_depth, * and queue_size */ - ioc->hba_queue_depth -= queue_diff; - ioc->reply_free_queue_depth -= queue_diff; - queue_size -= queue_diff; + ioc->hba_queue_depth -= (queue_diff / 2); + ioc->reply_free_queue_depth -= (queue_diff / 2); + queue_size = facts->MaxReplyDescriptorPostQueueDepth; } ioc->reply_post_queue_depth = queue_size; @@ -3736,6 +3736,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) static void _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { + mpt2sas_scsih_reset_handler(ioc, reset_phase); + mpt2sas_ctl_reset_handler(ioc, reset_phase); switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " @@ -3766,8 +3768,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); break; } - mpt2sas_scsih_reset_handler(ioc, reset_phase); - mpt2sas_ctl_reset_handler(ioc, reset_phase); } /** @@ -3821,6 +3821,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, { int r; unsigned long flags; + u8 pe_complete = ioc->wait_for_port_enable_to_complete; dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, __func__)); @@ -3845,6 +3846,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, if (r) goto out; _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (pe_complete) { + r = -EFAULT; + goto out; + } r = _base_make_ioc_operational(ioc, sleep_flag); if (!r) _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c5ff26a..04baaf4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2759,9 +2759,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, u16 handle; for (i = 0 ; i < event_data->NumEntries; i++) { - if (event_data->PHY[i].PhyStatus & - MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) - continue; handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e4d9c51..2452686 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1111,6 +1111,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); u64 bad_lba; int info_valid; + /* + * resid is optional but mostly filled in. When it's unused, + * its value is zero, so we assume the whole buffer transferred + */ + unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); + unsigned int good_bytes; if (!blk_fs_request(scmd->request)) return 0; @@ -1144,7 +1150,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) /* This computation should always be done in terms of * the resolution of the device's medium. */ - return (bad_lba - start_lba) * scmd->device->sector_size; + good_bytes = (bad_lba - start_lba) * scmd->device->sector_size; + return min(good_bytes, transferred); } /** diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a8..3b00e90 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -390,9 +390,9 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, len = (desc_ptr[2] << 8) + desc_ptr[3]; /* skip past overall descriptor */ desc_ptr += len + 4; - if (ses_dev->page10) - addl_desc_ptr = ses_dev->page10 + 8; } + if (ses_dev->page10) + addl_desc_ptr = ses_dev->page10 + 8; type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; components = 0; for (i = 0; i < types; i++, type_ptr += 4) { diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 891e1dd..5bab62d 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -256,7 +256,8 @@ static const struct serial8250_config uart_config[] = { .fifo_size = 128, .tx_loadsz = 128, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, - .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + /* UART_CAP_EFR breaks billionon CF bluetooth card. */ + .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, [PORT_RSA] = { .name = "RSA", diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index d330b18..db31dfb 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -53,6 +53,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci) #define PCI_VENDOR_ID_JR3 0x1762 #define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111 +#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111 #define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112 #define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113 #define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114 @@ -72,6 +73,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = { { PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { + PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL, @@ -808,6 +811,10 @@ static int jr3_pci_attach(struct comedi_device *dev, devpriv->n_channels = 1; } break; + case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{ + devpriv->n_channels = 1; + } + break; case PCI_DEVICE_ID_JR3_2_CHANNEL:{ devpriv->n_channels = 2; } diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 67c8a53..95bdc0d 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -571,7 +571,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase, /* grab our IRQ */ if (irq) { isr_flags = 0; - if (thisboard->bustype == pci_bustype) + if (thisboard->bustype == pci_bustype + || thisboard->bustype == pcmcia_bustype) isr_flags |= IRQF_SHARED; if (request_irq(irq, labpc_interrupt, isr_flags, driver_labpc.driver_name, dev)) { diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 61bd0be..1886bc0 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -378,6 +378,7 @@ static int blkvsc_probe(struct device *device) blkdev->gd->first_minor = 0; blkdev->gd->fops = &block_ops; blkdev->gd->private_data = blkdev; + blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device); sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum); blkvsc_do_inquiry(blkdev); diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 69cc8f7..a2b8c6d 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c @@ -249,6 +249,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, if (status == 1) { netif_carrier_on(net); netif_wake_queue(net); + netif_notify_peers(net); } else { netif_carrier_off(net); netif_stop_queue(net); diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h index 41a1fe5..afc3b1a 100644 --- a/drivers/staging/usbip/vhci.h +++ b/drivers/staging/usbip/vhci.h @@ -100,9 +100,6 @@ struct vhci_hcd { * But, the index of this array begins from 0. */ struct vhci_device vdev[VHCI_NPORTS]; - - /* vhci_device which has not been assiged its address yet */ - int pending_port; }; @@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport); void vhci_rx_loop(struct usbip_task *ut); void vhci_tx_loop(struct usbip_task *ut); +struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, + __u32 seqnum); + #define hardware (&the_controller->pdev.dev) static inline struct vhci_device *port_to_vdev(__u32 port) diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c index b6c871c..e003beb 100644 --- a/drivers/staging/usbip/vhci_hcd.c +++ b/drivers/staging/usbip/vhci_hcd.c @@ -138,8 +138,6 @@ void rh_port_connect(int rhport, enum usb_device_speed speed) * the_controller->vdev[rhport].ud.status = VDEV_CONNECT; * spin_unlock(&the_controller->vdev[rhport].ud.lock); */ - the_controller->pending_port = rhport; - spin_unlock_irqrestore(&the_controller->lock, flags); usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); @@ -559,6 +557,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, struct device *dev = &urb->dev->dev; int ret = 0; unsigned long flags; + struct vhci_device *vdev; usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", hcd, urb, mem_flags); @@ -574,6 +573,18 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return urb->status; } + vdev = port_to_vdev(urb->dev->portnum-1); + + /* refuse enqueue for dead connection */ + spin_lock(&vdev->ud.lock); + if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) { + usbip_uerr("enqueue for inactive port %d\n", vdev->rhport); + spin_unlock(&vdev->ud.lock); + spin_unlock_irqrestore(&the_controller->lock, flags); + return -ENODEV; + } + spin_unlock(&vdev->ud.lock); + ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto no_need_unlink; @@ -592,8 +603,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; - struct vhci_device *vdev = - port_to_vdev(the_controller->pending_port); if (type != PIPE_CONTROL || !ctrlreq) { dev_err(dev, "invalid request to devnum 0\n"); @@ -607,7 +616,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, dev_info(dev, "SetAddress Request (%d) to port %d\n", ctrlreq->wValue, vdev->rhport); - vdev->udev = urb->dev; + if (vdev->udev) + usb_put_dev(vdev->udev); + vdev->udev = usb_get_dev(urb->dev); spin_lock(&vdev->ud.lock); vdev->ud.status = VDEV_ST_USED; @@ -627,8 +638,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, "Get_Descriptor to device 0 " "(get max pipe size)\n"); - /* FIXME: reference count? (usb_get_dev()) */ - vdev->udev = urb->dev; + if (vdev->udev) + usb_put_dev(vdev->udev); + vdev->udev = usb_get_dev(urb->dev); goto out; default: @@ -799,27 +811,12 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) spin_unlock_irqrestore(&vdev->priv_lock, flags2); } - - if (!vdev->ud.tcp_socket) { - /* tcp connection is closed */ - usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n", - urb); - - usb_hcd_unlink_urb_from_ep(hcd, urb); - - spin_unlock_irqrestore(&the_controller->lock, flags); - usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, - urb->status); - spin_lock_irqsave(&the_controller->lock, flags); - } - spin_unlock_irqrestore(&the_controller->lock, flags); usbip_dbg_vhci_hc("leave\n"); return 0; } - static void vhci_device_unlink_cleanup(struct vhci_device *vdev) { struct vhci_unlink *unlink, *tmp; @@ -827,11 +824,34 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev) spin_lock(&vdev->priv_lock); list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { + usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum); list_del(&unlink->list); kfree(unlink); } list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { + struct urb *urb; + + /* give back URB of unanswered unlink request */ + usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum); + + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); + if (!urb) { + usbip_uinfo("the urb (seqnum %lu) was already given back\n", + unlink->unlink_seqnum); + list_del(&unlink->list); + kfree(unlink); + continue; + } + + urb->status = -ENODEV; + + spin_lock(&the_controller->lock); + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); + spin_unlock(&the_controller->lock); + + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); + list_del(&unlink->list); kfree(unlink); } @@ -901,6 +921,10 @@ static void vhci_device_reset(struct usbip_device *ud) vdev->speed = 0; vdev->devid = 0; + if (vdev->udev) + usb_put_dev(vdev->udev); + vdev->udev = NULL; + ud->tcp_socket = NULL; ud->status = VDEV_ST_NULL; diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index 8147d72..bdbedd2 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -23,16 +23,14 @@ #include "vhci.h" -/* get URB from transmitted urb queue */ -static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, +/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */ +struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) { struct vhci_priv *priv, *tmp; struct urb *urb = NULL; int status; - spin_lock(&vdev->priv_lock); - list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) { if (priv->seqnum == seqnum) { urb = priv->urb; @@ -63,8 +61,6 @@ static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, } } - spin_unlock(&vdev->priv_lock); - return urb; } @@ -74,9 +70,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, struct usbip_device *ud = &vdev->ud; struct urb *urb; + spin_lock(&vdev->priv_lock); urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); + spin_unlock(&vdev->priv_lock); if (!urb) { usbip_uerr("cannot find a urb of seqnum %u\n", @@ -161,7 +159,12 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, return; } + spin_lock(&vdev->priv_lock); + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); + + spin_unlock(&vdev->priv_lock); + if (!urb) { /* * I get the result of a unlink request. But, it seems that I diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index a1aacb0..2c70e1e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb) if (!ACM_READY(acm)) goto exit; + usb_mark_last_busy(acm->dev); + data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: @@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb) break; } exit: - usb_mark_last_busy(acm->dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " @@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work) if (!ACM_READY(acm)) return; tty = tty_port_tty_get(&acm->port); + if (!tty) + return; tty_wakeup(tty); tty_kref_put(tty); } @@ -652,8 +655,10 @@ static void acm_port_down(struct acm *acm, int drain) usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); + tasklet_disable(&acm->urb_task); for (i = 0; i < nr; i++) usb_kill_urb(acm->ru[i].urb); + tasklet_enable(&acm->urb_task); acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } @@ -1613,6 +1618,7 @@ static const struct usb_device_id acm_ids[] = { { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ + { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 53467b5..d4041c6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -677,6 +677,8 @@ static void hub_init_func3(struct work_struct *ws); static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) { struct usb_device *hdev = hub->hdev; + struct usb_hcd *hcd; + int ret; int port1; int status; bool need_debounce_delay = false; @@ -715,6 +717,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ + } else if (type == HUB_RESET_RESUME) { + /* The internal host controller state for the hub device + * may be gone after a host power loss on system resume. + * Update the device's info so the HW knows it's a hub. + */ + hcd = bus_to_hcd(hdev->bus); + if (hcd->driver->update_hub_device) { + ret = hcd->driver->update_hub_device(hcd, hdev, + &hub->tt, GFP_NOIO); + if (ret < 0) { + dev_err(hub->intfdev, "Host not " + "accepting hub info " + "update.\n"); + dev_err(hub->intfdev, "LS/FS devices " + "and hubs may not work " + "under this hub\n."); + } + } + hub_power_on(hub, true); } else { hub_power_on(hub, true); } @@ -2721,6 +2742,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, udev->ttport = hdev->ttport; } else if (udev->speed != USB_SPEED_HIGH && hdev->speed == USB_SPEED_HIGH) { + if (!hub->tt.hub) { + dev_err(&udev->dev, "parent hub has no TT\n"); + retval = -EINVAL; + goto fail; + } udev->tt = &hub->tt; udev->ttport = port1; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 25719da..1848420 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ + { USB_DEVICE(0x04e8, 0x6601), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Keytouch QWERTY Panel keyboard */ + { USB_DEVICE(0x0926, 0x3333), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4c3ac5c..d615eee 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -130,31 +130,31 @@ static struct printer_dev usb_printer_gadget; * parameters are in UTF-8 (superset of ASCII's 7 bit characters). */ -static ushort __initdata idVendor; +static ushort idVendor; module_param(idVendor, ushort, S_IRUGO); MODULE_PARM_DESC(idVendor, "USB Vendor ID"); -static ushort __initdata idProduct; +static ushort idProduct; module_param(idProduct, ushort, S_IRUGO); MODULE_PARM_DESC(idProduct, "USB Product ID"); -static ushort __initdata bcdDevice; +static ushort bcdDevice; module_param(bcdDevice, ushort, S_IRUGO); MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); -static char *__initdata iManufacturer; +static char *iManufacturer; module_param(iManufacturer, charp, S_IRUGO); MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); -static char *__initdata iProduct; +static char *iProduct; module_param(iProduct, charp, S_IRUGO); MODULE_PARM_DESC(iProduct, "USB Product string"); -static char *__initdata iSerialNum; +static char *iSerialNum; module_param(iSerialNum, charp, S_IRUGO); MODULE_PARM_DESC(iSerialNum, "1"); -static char *__initdata iPNPstring; +static char *iPNPstring; module_param(iPNPstring, charp, S_IRUGO); MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;"); diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index b14be9c..761bbb3 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -102,6 +102,9 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) +/* for ASPM quirk of ISOC on AMD SB800 */ +static struct pci_dev *amd_nb_dev; + /*-------------------------------------------------------------------------*/ #include "ehci.h" @@ -501,6 +504,11 @@ static void ehci_stop (struct usb_hcd *hcd) spin_unlock_irq (&ehci->lock); ehci_mem_cleanup (ehci); + if (amd_nb_dev) { + pci_dev_put(amd_nb_dev); + amd_nb_dev = NULL; + } + #ifdef EHCI_STATS ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, @@ -536,6 +544,8 @@ static int ehci_init(struct usb_hcd *hcd) ehci->iaa_watchdog.function = ehci_iaa_watchdog; ehci->iaa_watchdog.data = (unsigned long) ehci; + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); + /* * hw default: 1K periodic list heads, one per frame. * periodic_size can shrink by USBCMD update if hcc_params allows. @@ -543,11 +553,20 @@ static int ehci_init(struct usb_hcd *hcd) ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); INIT_LIST_HEAD(&ehci->cached_sitd_list); + + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + switch (EHCI_TUNE_FLS) { + case 0: ehci->periodic_size = 1024; break; + case 1: ehci->periodic_size = 512; break; + case 2: ehci->periodic_size = 256; break; + default: BUG(); + } + } if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; /* controllers may cache some of the periodic schedule ... */ - hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_ISOC_CACHE(hcc_params)) // full frame cache ehci->i_thresh = 2 + 8; else // N microframes cached @@ -596,12 +615,6 @@ static int ehci_init(struct usb_hcd *hcd) /* periodic schedule size can be smaller than default */ temp &= ~(3 << 2); temp |= (EHCI_TUNE_FLS << 2); - switch (EHCI_TUNE_FLS) { - case 0: ehci->periodic_size = 1024; break; - case 1: ehci->periodic_size = 512; break; - case 2: ehci->periodic_size = 256; break; - default: BUG(); - } } ehci->command = temp; diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 19f5070..e9dc136 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -41,6 +41,42 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) return 0; } +static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci) +{ + struct pci_dev *amd_smbus_dev; + u8 rev = 0; + + amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); + if (amd_smbus_dev) { + pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); + if (rev < 0x40) { + pci_dev_put(amd_smbus_dev); + amd_smbus_dev = NULL; + return 0; + } + } else { + amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL); + if (!amd_smbus_dev) + return 0; + pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); + if (rev < 0x11 || rev > 0x18) { + pci_dev_put(amd_smbus_dev); + amd_smbus_dev = NULL; + return 0; + } + } + + if (!amd_nb_dev) + amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); + + ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n"); + + pci_dev_put(amd_smbus_dev); + amd_smbus_dev = NULL; + + return 1; +} + /* called during probe() after chip reset completes */ static int ehci_pci_setup(struct usb_hcd *hcd) { @@ -99,6 +135,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd) /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + if (ehci_quirk_amd_hudson(ehci)) + ehci->amd_l1_fix = 1; + retval = ehci_halt(ehci); if (retval) return retval; diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 11a79c4..98f022d 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) int stopped; unsigned count = 0; u8 state; - const __le32 halt = HALT_BIT(ehci); struct ehci_qh_hw *hw = qh->hw; if (unlikely (list_empty (&qh->qtd_list))) @@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) && !(qtd->hw_alt_next & EHCI_LIST_END(ehci))) { stopped = 1; - goto halt; } /* stop scanning when we reach qtds the hc is using */ @@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) */ ehci_clear_tt_buffer(ehci, qh, urb, token); } - - /* force halt for unlinked or blocked qh, so we'll - * patch the qh later and so that completions can't - * activate it while we "know" it's stopped. - */ - if ((halt & hw->hw_token) == 0) { -halt: - hw->hw_token |= halt; - wmb (); - } } /* unless we already know the urb's status, collect qtd status diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 93f58e5..a530856 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1586,6 +1586,63 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); } +#define AB_REG_BAR_LOW 0xe0 +#define AB_REG_BAR_HIGH 0xe1 +#define AB_INDX(addr) ((addr) + 0x00) +#define AB_DATA(addr) ((addr) + 0x04) +#define NB_PCIE_INDX_ADDR 0xe0 +#define NB_PCIE_INDX_DATA 0xe4 +#define NB_PIF0_PWRDOWN_0 0x01100012 +#define NB_PIF0_PWRDOWN_1 0x01100013 + +static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable) +{ + u32 addr, addr_low, addr_high, val; + + outb_p(AB_REG_BAR_LOW, 0xcd6); + addr_low = inb_p(0xcd7); + outb_p(AB_REG_BAR_HIGH, 0xcd6); + addr_high = inb_p(0xcd7); + addr = addr_high << 8 | addr_low; + outl_p(0x30, AB_INDX(addr)); + outl_p(0x40, AB_DATA(addr)); + outl_p(0x34, AB_INDX(addr)); + val = inl_p(AB_DATA(addr)); + + if (disable) { + val &= ~0x8; + val |= (1 << 4) | (1 << 9); + } else { + val |= 0x8; + val &= ~((1 << 4) | (1 << 9)); + } + outl_p(val, AB_DATA(addr)); + + if (amd_nb_dev) { + addr = NB_PIF0_PWRDOWN_0; + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val); + + addr = NB_PIF0_PWRDOWN_1; + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val); + } + + return; +} + /* fit urb's itds into the selected schedule slot; activate as needed */ static int itd_link_urb ( @@ -1613,6 +1670,12 @@ itd_link_urb ( next_uframe >> 3, next_uframe & 0x7); stream->start = jiffies; } + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 1); + } + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; /* fill iTDs uframe by uframe */ @@ -1739,6 +1802,11 @@ itd_complete ( (void) disable_periodic(ehci); ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 0); + } + if (unlikely(list_is_singular(&stream->td_list))) { ehci_to_hcd(ehci)->self.bandwidth_allocated -= stream->bandwidth; @@ -2026,6 +2094,12 @@ sitd_link_urb ( stream->interval, hc32_to_cpu(ehci, stream->splits)); stream->start = jiffies; } + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 1); + } + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; /* fill sITDs frame by frame */ @@ -2128,6 +2202,11 @@ sitd_complete ( (void) disable_periodic(ehci); ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 0); + } + if (list_is_singular(&stream->td_list)) { ehci_to_hcd(ehci)->self.bandwidth_allocated -= stream->bandwidth; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 8b3d9c8..c702e4b 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -131,6 +131,7 @@ struct ehci_hcd { /* one per controller */ unsigned need_io_watchdog:1; unsigned broken_periodic:1; unsigned fs_i_thresh:1; /* Intel iso scheduling */ + unsigned amd_l1_fix:1; /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index dbcafa2..e0eed11 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -33,6 +33,7 @@ struct isp1760_hcd { struct inter_packet_info atl_ints[32]; struct inter_packet_info int_ints[32]; struct memory_chunk memory_pool[BLOCKS]; + u32 atl_queued; /* periodic schedule support */ #define DEFAULT_I_TDPS 1024 @@ -850,6 +851,11 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, skip_map &= ~queue_entry; isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG); + priv->atl_queued++; + if (priv->atl_queued == 2) + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, + hcd->regs + HC_INTERRUPT_ENABLE); + buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG); buffstatus |= ATL_BUFFER; isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG); @@ -991,6 +997,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) u32 dw3; status = 0; + priv->atl_queued--; queue_entry = __ffs(done_map); done_map &= ~(1 << queue_entry); @@ -1053,11 +1060,6 @@ static void do_atl_int(struct usb_hcd *usb_hcd) * device is not able to send data fast enough. * This happens mostly on slower hardware. */ - printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " - "%d of %zu done: %08x cur: %08x\n", qtd, - urb, qh, PTD_XFERRED_LENGTH(dw3), - qtd->length, done_map, - (1 << queue_entry)); /* RL counter = ERR counter */ dw3 &= ~(0xf << 19); @@ -1085,6 +1087,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, sizeof(ptd)); + priv->atl_queued++; + if (priv->atl_queued == 2) + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, + usb_hcd->regs + HC_INTERRUPT_ENABLE); + buffstatus = isp1760_readl(usb_hcd->regs + HC_BUFFER_STATUS_REG); buffstatus |= ATL_BUFFER; @@ -1190,6 +1197,9 @@ static void do_atl_int(struct usb_hcd *usb_hcd) skip_map = isp1760_readl(usb_hcd->regs + HC_ATL_PTD_SKIPMAP_REG); } + if (priv->atl_queued <= 1) + isp1760_writel(INTERRUPT_ENABLE_MASK, + usb_hcd->regs + HC_INTERRUPT_ENABLE); } static void do_intl_int(struct usb_hcd *usb_hcd) @@ -1769,7 +1779,7 @@ static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd) goto leave; isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG); - if (imask & HC_ATL_INT) + if (imask & (HC_ATL_INT | HC_SOT_INT)) do_atl_int(usb_hcd); if (imask & HC_INTL_INT) diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 6931ef5..612bce5 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h @@ -69,6 +69,7 @@ void deinit_kmem_cache(void); #define HC_INTERRUPT_ENABLE 0x314 #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) +#define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) #define HC_ISO_INT (1 << 9) #define HC_ATL_INT (1 << 8) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 221f999..b262e91 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -457,8 +457,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(cur_td->start_seg, dev->eps[ep_index].stopped_trb, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); @@ -469,8 +472,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } trb = &state->new_deq_ptr->generic; if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && @@ -478,15 +483,26 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_cycle_state = ~(state->new_cycle_state) & 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); + /* + * If there is only one segment in a ring, find_trb_seg()'s while loop + * will not run, and it will return before it has a chance to see if it + * needs to toggle the cycle bit. It can't tell if the stalled transfer + * ended just before the link TRB on a one-segment ring, or if the TD + * wrapped around the top of the ring, because it doesn't have the TD in + * question. Look for the one-segment case where stalled TRB's address + * is greater than the new dequeue pointer address. + */ + if (ep_ring->first_seg == ep_ring->first_seg->next && + state->new_deq_ptr < dev->eps[ep_index].stopped_trb) + state->new_cycle_state ^= 0x1; + xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); + /* Don't update the ring cycle state for the producer (us). */ xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", state->new_deq_seg); addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", (unsigned long long) addr); - xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); - ep_ring->dequeue = state->new_deq_ptr; - ep_ring->deq_seg = state->new_deq_seg; } static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, @@ -905,9 +921,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, } else { xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", ep_ctx->deq); + if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, + dev->eps[ep_index].queued_deq_ptr) == + (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { + /* Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; + ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; + } else { + xhci_warn(xhci, "Mismatch between completed Set TR Deq " + "Ptr command & xHCI internal state.\n"); + xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", + dev->eps[ep_index].queued_deq_seg, + dev->eps[ep_index].queued_deq_ptr); + } } dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; + dev->eps[ep_index].queued_deq_seg = NULL; + dev->eps[ep_index].queued_deq_ptr = NULL; /* Restart any rings with pending URBs */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } @@ -1885,12 +1918,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) /* Scatter gather list entries may cross 64KB boundaries */ running_total = TRB_MAX_BUFF_SIZE - - (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; if (running_total != 0) num_trbs++; /* How many more 64KB chunks to transfer, how many more TRBs? */ - while (running_total < sg_dma_len(sg)) { + while (running_total < sg_dma_len(sg) && running_total < temp) { num_trbs++; running_total += TRB_MAX_BUFF_SIZE; } @@ -1915,11 +1949,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) static void check_trb_math(struct urb *urb, int num_trbs, int running_total) { if (num_trbs != 0) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " "TRBs, %d left\n", __func__, urb->ep->desc.bEndpointAddress, num_trbs); if (running_total != urb->transfer_buffer_length) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " "queued %#x (%d), asked for %#x (%d)\n", __func__, urb->ep->desc.bEndpointAddress, @@ -2046,8 +2080,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, sg = urb->sg; addr = (u64) sg_dma_address(sg); this_sg_len = sg_dma_len(sg); - trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; @@ -2083,7 +2116,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); if (TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { + (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), @@ -2127,7 +2160,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (running_total + trb_buff_len > urb->transfer_buffer_length) trb_buff_len = @@ -2166,7 +2199,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_trbs = 0; /* How much data is (potentially) left before the 64KB boundary? */ running_total = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; /* If there's some data on this 64KB chunk, or we have to send a * zero-length transfer, we need at least one TRB @@ -2206,8 +2240,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* How much data is in the first TRB? */ addr = (u64) urb->transfer_dma; trb_buff_len = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); - if (urb->transfer_buffer_length < trb_buff_len) + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; first_trb = true; @@ -2492,6 +2526,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); u32 type = TRB_TYPE(TRB_SET_DEQ); + struct xhci_virt_ep *ep; addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); if (addr == 0) { @@ -2500,6 +2535,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, deq_seg, deq_ptr); return 0; } + ep = &xhci->devs[slot_id]->eps[ep_index]; + if ((ep->ep_state & SET_DEQ_PENDING)) { + xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); + xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); + return 0; + } + ep->queued_deq_seg = deq_seg; + ep->queued_deq_ptr = deq_ptr; return queue_command(xhci, lower_32_bits(addr) | cycle_state, upper_32_bits(addr), trb_stream_id, trb_slot_id | trb_ep_index | type, false); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 4a445bd..5edb706 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -641,6 +641,9 @@ struct xhci_ep_ctx { #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) +/* deq bitmasks */ +#define EP_CTX_CYCLE_MASK (1 << 0) + /** * struct xhci_input_control_context @@ -743,6 +746,12 @@ struct xhci_virt_ep { struct timer_list stop_cmd_timer; int stop_cmds_pending; struct xhci_hcd *xhci; + /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue + * command. We'll need to update the ring's dequeue segment and dequeue + * pointer after the command completes. + */ + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; }; struct xhci_virt_device { diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index b47e694..654243e 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -373,7 +373,7 @@ static ssize_t iowarrior_write(struct file *file, case USB_DEVICE_ID_CODEMERCS_IOWPV2: case USB_DEVICE_ID_CODEMERCS_IOW40: /* IOW24 and IOW40 use a synchronous call */ - buf = kmalloc(8, GFP_KERNEL); /* 8 bytes are enough for both products */ + buf = kmalloc(count, GFP_KERNEL); if (!buf) { retval = -ENOMEM; goto exit; diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 4ff2158..2380338 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -3,7 +3,7 @@ /* * uss720.c -- USS720 USB Parport Cable. * - * Copyright (C) 1999, 2005, 2010 + * Copyright (C) 1999, 2005 * Thomas Sailer (t.sailer@alumni.ethz.ch) * * This program is free software; you can redistribute it and/or modify @@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p spin_lock_irqsave(&priv->asynclock, flags); list_add_tail(&rq->asynclist, &priv->asynclist); spin_unlock_irqrestore(&priv->asynclock, flags); + kref_get(&rq->ref_count); ret = usb_submit_urb(rq->urb, mem_flags); - if (!ret) { - kref_get(&rq->ref_count); + if (!ret) return rq; - } - kref_put(&rq->ref_count, destroy_async); + destroy_async(&rq->ref_count); err("submit_async_request submit_urb failed with %d", ret); return NULL; } @@ -776,8 +775,6 @@ static const struct usb_device_id uss720_table[] = { { USB_DEVICE(0x0557, 0x2001) }, { USB_DEVICE(0x0729, 0x1284) }, { USB_DEVICE(0x1293, 0x0002) }, - { USB_DEVICE(0x1293, 0x0002) }, - { USB_DEVICE(0x050d, 0x0002) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index d57d9a4..49352b3 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -323,6 +323,7 @@ static int musb_platform_resume(struct musb *musb) int musb_platform_exit(struct musb *musb) { + del_timer_sync(&musb_idle_timer); musb_platform_suspend(musb); diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 63f7cc4..14ac87e 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -75,6 +75,7 @@ static int debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x1a86, 0x5523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); @@ -486,12 +487,22 @@ static void ch341_read_int_callback(struct urb *urb) if (actual_length >= 4) { struct ch341_private *priv = usb_get_serial_port_data(port); unsigned long flags; + u8 prev_line_status = priv->line_status; spin_lock_irqsave(&priv->lock, flags); priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT; if ((data[1] & CH341_MULT_STAT)) priv->multi_status_change = 1; spin_unlock_irqrestore(&priv->lock, flags); + + if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) { + struct tty_struct *tty = tty_port_tty_get(&port->port); + if (tty) + usb_serial_handle_dcd_change(port, tty, + priv->line_status & CH341_BIT_DCD); + tty_kref_put(tty); + } + wake_up_interruptible(&priv->delta_msr_wait); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 9927bca..1a85938 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -49,7 +49,6 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *, static void cp210x_break_ctl(struct tty_struct *, int); static int cp210x_startup(struct usb_serial *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); -static int cp210x_carrier_raised(struct usb_serial_port *p); static int debug; @@ -86,7 +85,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ - { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ @@ -109,7 +107,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ @@ -165,7 +165,6 @@ static struct usb_serial_driver cp210x_device = { .tiocmset = cp210x_tiocmset, .attach = cp210x_startup, .dtr_rts = cp210x_dtr_rts, - .carrier_raised = cp210x_carrier_raised }; /* Config request types */ @@ -764,15 +763,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file) return result; } -static int cp210x_carrier_raised(struct usb_serial_port *p) -{ - unsigned int control; - cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1); - if (control & CONTROL_DCD) - return 1; - return 0; -} - static void cp210x_break_ctl (struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index fd35f73..0c36695 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty); static int digi_chars_in_buffer(struct tty_struct *tty); static int digi_open(struct tty_struct *tty, struct usb_serial_port *port); static void digi_close(struct usb_serial_port *port); -static int digi_carrier_raised(struct usb_serial_port *port); static void digi_dtr_rts(struct usb_serial_port *port, int on); static int digi_startup_device(struct usb_serial *serial); static int digi_startup(struct usb_serial *serial); @@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = { .open = digi_open, .close = digi_close, .dtr_rts = digi_dtr_rts, - .carrier_raised = digi_carrier_raised, .write = digi_write, .write_room = digi_write_room, .write_bulk_callback = digi_write_bulk_callback, @@ -1337,14 +1335,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on) digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1); } -static int digi_carrier_raised(struct usb_serial_port *port) -{ - struct digi_port *priv = usb_get_serial_port_data(port); - if (priv->dp_modem_signals & TIOCM_CD) - return 1; - return 0; -} - static int digi_open(struct tty_struct *tty, struct usb_serial_port *port) { int ret; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 506d744..b2c4c1c 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -99,6 +99,7 @@ struct ftdi_sio_quirk { static int ftdi_jtag_probe(struct usb_serial *serial); static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); static int ftdi_NDI_device_setup(struct usb_serial *serial); +static int ftdi_stmclite_probe(struct usb_serial *serial); static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); @@ -122,6 +123,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { .port_probe = ftdi_HE_TIRA1_setup, }; +static struct ftdi_sio_quirk ftdi_stmclite_quirk = { + .probe = ftdi_stmclite_probe, +}; + /* * The 8U232AM has the same API as the sio except for: * - it can support MUCH higher baudrates; up to: @@ -615,6 +620,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, + { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, @@ -675,7 +681,17 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, - { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, @@ -799,6 +815,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), + .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1680,6 +1698,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial) } /* + * First and second port on STMCLiteadaptors is reserved for JTAG interface + * and the forth port for pio + */ +static int ftdi_stmclite_probe(struct usb_serial *serial) +{ + struct usb_device *udev = serial->dev; + struct usb_interface *interface = serial->interface; + + dbg("%s", __func__); + + if (interface == udev->actconfig->interface[2]) + return 0; + + dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); + + return -ENODEV; +} + +/* * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. * We have to correct it if we want to read from it. */ diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 77ace17..3523df5 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -518,6 +518,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Acton Research Corp. + */ +#define ACTON_VID 0x0647 /* Vendor ID */ +#define ACTON_SPECTRAPRO_PID 0x0100 + +/* * Contec products (http://www.contec.com) * Submitted by Daniel Sangorrin */ @@ -569,11 +575,23 @@ #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ /* - * Icom ID-1 digital transceiver + * Definitions for Icom Inc. devices */ - -#define ICOM_ID1_VID 0x0C26 -#define ICOM_ID1_PID 0x0004 +#define ICOM_VID 0x0C26 /* Icom vendor ID */ +/* Note: ID-1 is a communications tranceiver for HAM-radio operators */ +#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */ +/* Note: OPC is an Optional cable to connect an Icom Tranceiver */ +#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */ +/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */ +#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */ +#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */ +#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/ +#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */ +#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */ +#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */ +#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */ +#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */ +#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */ /* * GN Otometrics (http://www.otometrics.com) @@ -1022,6 +1040,12 @@ #define WHT_PID 0x0004 /* Wireless Handheld Terminal */ /* + * STMicroelectonics + */ +#define ST_VID 0x0483 +#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ + +/* * Papouch products (http://www.papouch.com/) * Submitted by Folkert van Heusden */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index ca92f67..d2d5505 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -481,6 +481,26 @@ int usb_serial_handle_break(struct usb_serial_port *port) } EXPORT_SYMBOL_GPL(usb_serial_handle_break); +/** + * usb_serial_handle_dcd_change - handle a change of carrier detect state + * @port: usb_serial_port structure for the open port + * @tty: tty_struct structure for the port + * @status: new carrier detect status, nonzero if active + */ +void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, unsigned int status) +{ + struct tty_port *port = &usb_port->port; + + dbg("%s - port %d, status %d", __func__, usb_port->number, status); + + if (status) + wake_up_interruptible(&port->open_wait); + else if (tty && !C_CLOCAL(tty)) + tty_hangup(tty); +} +EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change); + int usb_serial_generic_resume(struct usb_serial *serial) { struct usb_serial_port *port; diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 76e6fb3..db0e3fe 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2894,8 +2894,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial) dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); - edge_serial->product_info.FirmwareMajorVersion = fw->data[0]; - edge_serial->product_info.FirmwareMinorVersion = fw->data[1]; + edge_serial->product_info.FirmwareMajorVersion = rec->data[0]; + edge_serial->product_info.FirmwareMinorVersion = rec->data[1]; edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); for (rec = ihex_next_binrec(rec); rec; diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 185fe9a..2cbd661 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -680,22 +680,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on) } } -static int keyspan_pda_carrier_raised(struct usb_serial_port *port) -{ - struct usb_serial *serial = port->serial; - unsigned char modembits; - - /* If we can read the modem status and the DCD is low then - carrier is not raised yet */ - if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) { - if (!(modembits & (1>>6))) - return 0; - } - /* Carrier raised, or we failed (eg disconnected) so - progress accordingly */ - return 1; -} - static int keyspan_pda_open(struct tty_struct *tty, struct usb_serial_port *port) @@ -882,7 +866,6 @@ static struct usb_serial_driver keyspan_pda_device = { .id_table = id_table_std, .num_ports = 1, .dtr_rts = keyspan_pda_dtr_rts, - .carrier_raised = keyspan_pda_carrier_raised, .open = keyspan_pda_open, .close = keyspan_pda_close, .write = keyspan_pda_write, diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index bd5bd85..b382d9a 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -372,7 +372,7 @@ static void kobil_read_int_callback(struct urb *urb) } tty = tty_port_tty_get(&port->port); - if (urb->actual_length) { + if (tty && urb->actual_length) { /* BEGIN DEBUG */ /* diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ba23c21..a0f46c4 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -362,7 +362,16 @@ static void option_instat_callback(struct urb *urb); #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 -#define CINTERION_VENDOR_ID 0x0681 +/* Cinterion (formerly Siemens) products */ +#define SIEMENS_VENDOR_ID 0x0681 +#define CINTERION_VENDOR_ID 0x1e2d +#define CINTERION_PRODUCT_HC25_MDM 0x0047 +#define CINTERION_PRODUCT_HC25_MDMNET 0x0040 +#define CINTERION_PRODUCT_HC28_MDM 0x004C +#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */ +#define CINTERION_PRODUCT_EU3_E 0x0051 +#define CINTERION_PRODUCT_EU3_P 0x0052 +#define CINTERION_PRODUCT_PH8 0x0053 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -912,7 +921,17 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, - { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, + /* Cinterion */ + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ { } /* Terminating entry */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index c98f0fb..2fcc54e 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, @@ -677,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port, { struct pl2303_private *priv = usb_get_serial_port_data(port); + struct tty_struct *tty; unsigned long flags; u8 status_idx = UART_STATE; u8 length = UART_STATE + 1; + u8 prev_line_status; u16 idv, idp; idv = le16_to_cpu(port->serial->dev->descriptor.idVendor); @@ -701,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port, /* Save off the uart status for others to look at */ spin_lock_irqsave(&priv->lock, flags); + prev_line_status = priv->line_status; priv->line_status = data[status_idx]; spin_unlock_irqrestore(&priv->lock, flags); if (priv->line_status & UART_BREAK_ERROR) usb_serial_handle_break(port); wake_up_interruptible(&priv->delta_msr_wait); + + tty = tty_port_tty_get(&port->port); + if (!tty) + return; + if ((priv->line_status ^ prev_line_status) & UART_DCD) + usb_serial_handle_dcd_change(port, tty, + priv->line_status & UART_DCD); + tty_kref_put(tty); } static void pl2303_read_int_callback(struct urb *urb) diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 43eb9bd..1b025f7 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -21,6 +21,7 @@ #define PL2303_PRODUCT_ID_MMX 0x0612 #define PL2303_PRODUCT_ID_GPRS 0x0609 #define PL2303_PRODUCT_ID_HCR331 0x331a +#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d47b56e..d876bba 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -302,7 +302,9 @@ static const struct usb_device_id id_table[] = { .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ - + { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { } }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 329d311..b9575c8 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg { /* how come ??? */ #define UART_STATE 0x08 -#define UART_STATE_TRANSIENT_MASK 0x74 +#define UART_STATE_TRANSIENT_MASK 0x75 #define UART_DCD 0x01 #define UART_DSR 0x02 #define UART_BREAK_ERROR 0x04 @@ -531,6 +531,9 @@ static void spcp8x5_process_read_urb(struct urb *urb) tty_insert_flip_string_fixed_flag(tty, data, tty_flag, urb->actual_length); tty_flip_buffer_push(tty); + if (status & UART_DCD) + usb_serial_handle_dcd_change(port, tty, + priv->line_status & MSR_STATUS_LINE_DCD); tty_kref_put(tty); } diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 90979a1..c58ef54 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -365,9 +365,9 @@ failed_1port: static void __exit ti_exit(void) { + usb_deregister(&ti_usb_driver); usb_serial_deregister(&ti_1port_device); usb_serial_deregister(&ti_2port_device); - usb_deregister(&ti_usb_driver); } diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 15a5d89..1c11959 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "visor.h" /* @@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial, dbg("%s", __func__); + /* + * some Samsung Android phones in modem mode have the same ID + * as SPH-I500, but they are ACM devices, so dont bind to them + */ + if (id->idVendor == SAMSUNG_VENDOR_ID && + id->idProduct == SAMSUNG_SPH_I500_ID && + serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && + serial->dev->descriptor.bDeviceSubClass == + USB_CDC_SUBCLASS_ACM) + return -ENODEV; + if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 44be6d7..fba2824 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0), +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999, + "Super Top", + "USB 2.0 SATA BRIDGE", + US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0), + #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */ diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index b51b121..b0c0a33 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1043,6 +1043,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_BULK32), +/* Reported by + * The device reports a vendor-specific device class, requiring an + * explicit vendor/product match. + */ +UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002, + "MagicPixel", + "FW_Omega2", + US_SC_DEVICE, US_PR_DEVICE, NULL, 0), + /* Andrew Lunn * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL * on LUN 4. @@ -1387,6 +1396,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Submitted by Nick Holloway */ +UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, + "VTech", + "Kidizoom", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + /* Reported by Michael Stattmann */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", @@ -1866,6 +1882,22 @@ UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_BAD_SENSE ), +/* Patch by Richard Schütz + * This external hard drive enclosure uses a JMicron chip which + * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ +UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000, + "TrekStor GmbH & Co. KG", + "DataStation maxi g.u", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), + +/* Reported by Jasper Mackenzie */ +UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000, + "Coby Electronics", + "MP3 Player", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c index 0056a41..15e8e1a 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/console/tileblit.c @@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_tilecursor cursor; - int use_sw = (vc->vc_cursor_type & 0x01); + int use_sw = (vc->vc_cursor_type & 0x10); cursor.sx = vc->vc_x; cursor.sy = vc->vc_y; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index ef8d9d5..4fb5b2b 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); -/* A PCI device has it's own struct device and so does a virtio device so - * we create a place for the virtio devices to show up in sysfs. I think it - * would make more sense for virtio to not insist on having it's own device. */ -static struct device *virtio_pci_root; - /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { @@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, if (vp_dev == NULL) return -ENOMEM; - vp_dev->vdev.dev.parent = virtio_pci_root; + vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; @@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = { static int __init virtio_pci_init(void) { - int err; - - virtio_pci_root = root_device_register("virtio-pci"); - if (IS_ERR(virtio_pci_root)) - return PTR_ERR(virtio_pci_root); - - err = pci_register_driver(&virtio_pci_driver); - if (err) - root_device_unregister(virtio_pci_root); - - return err; + return pci_register_driver(&virtio_pci_driver); } module_init(virtio_pci_init); @@ -735,7 +720,6 @@ module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); - root_device_unregister(virtio_pci_root); } module_exit(virtio_pci_exit); diff --git a/fs/aio.c b/fs/aio.c index 5fb0fd7..a564a9d 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -512,7 +512,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) ctx->reqs_active--; if (unlikely(!ctx->reqs_active && ctx->dead)) - wake_up(&ctx->wait); + wake_up_all(&ctx->wait); } static void aio_fput_routine(struct work_struct *data) @@ -1233,7 +1233,7 @@ static void io_destroy(struct kioctx *ioctx) * by other CPUs at this point. Right now, we rely on the * locking done by the above calls to ensure this consistency. */ - wake_up(&ioctx->wait); + wake_up_all(&ioctx->wait); put_ioctx(ioctx); /* once for the lookup */ } diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 4c54a8b..21088dc 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -343,7 +343,7 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 netfid, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, - const bool waitFlag); + const bool waitFlag, const __u8 oplock_level); extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index c65c341..6b764d7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1647,7 +1647,8 @@ int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const __u64 len, const __u64 offset, const __u32 numUnlock, - const __u32 numLock, const __u8 lockType, const bool waitFlag) + const __u32 numLock, const __u8 lockType, + const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; @@ -1675,6 +1676,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; + pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index de2e6c4..8a9e688 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -796,12 +796,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) /* BB we could chain these into one lock request BB */ rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, - 0, 1, lockType, 0 /* wait flag */ ); + 0, 1, lockType, 0 /* wait flag */, 0); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1 /* numUnlock */ , 0 /* numLock */ , lockType, - 0 /* wait flag */ ); + 0 /* wait flag */, 0); pfLock->fl_type = F_UNLCK; if (rc != 0) cERROR(1, "Error unlocking previously locked " @@ -818,13 +818,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, 1, lockType | LOCKING_ANDX_SHARED_LOCK, - 0 /* wait flag */); + 0 /* wait flag */, 0); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1, 0, lockType | LOCKING_ANDX_SHARED_LOCK, - 0 /* wait flag */); + 0 /* wait flag */, 0); pfLock->fl_type = F_RDLCK; if (rc != 0) cERROR(1, "Error unlocking " @@ -868,8 +868,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if (numLock) { rc = CIFSSMBLock(xid, tcon, netfid, length, - pfLock->fl_start, - 0, numLock, lockType, wait_flag); + pfLock->fl_start, 0, numLock, lockType, + wait_flag, 0); if (rc == 0) { /* For Windows locks we must store them. */ @@ -889,9 +889,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) (pfLock->fl_start + length) >= (li->offset + li->length)) { stored_rc = CIFSSMBLock(xid, tcon, - netfid, - li->length, li->offset, - 1, 0, li->type, false); + netfid, li->length, + li->offset, 1, 0, + li->type, false, 0); if (stored_rc) rc = stored_rc; else { @@ -2300,7 +2300,8 @@ cifs_oplock_break(struct slow_work *work) */ if (!cfile->closePend && !cfile->oplock_break_cancelled) { rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, - LOCKING_ANDX_OPLOCK_RELEASE, false); + LOCKING_ANDX_OPLOCK_RELEASE, false, + cinode->clientCanCacheRead ? 1 : 0); cFYI(1, "Oplock release rc = %d", rc); } } diff --git a/fs/dcache.c b/fs/dcache.c index 86d4db1..c1f86c7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1186,9 +1186,12 @@ struct dentry *d_obtain_alias(struct inode *inode) spin_unlock(&tmp->d_lock); spin_unlock(&dcache_lock); + security_d_instantiate(tmp, inode); return tmp; out_iput: + if (res && !IS_ERR(res)) + security_d_instantiate(res, inode); iput(inode); return res; } diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index db9feb5..1681c62 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -1003,6 +1003,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), ecryptfs_dentry_to_lower(dentry), &lower_stat); if (!rc) { + fsstack_copy_attr_all(dentry->d_inode, + ecryptfs_inode_to_lower(dentry->d_inode)); generic_fillattr(dentry->d_inode, stat); stat->blocks = lower_stat.blocks; } diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 89c5476..d6e9355 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1543,6 +1543,7 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, printk(KERN_ERR "Could not find key with description: [%s]\n", sig); rc = process_request_key_err(PTR_ERR(*auth_tok_key)); + (*auth_tok_key) = NULL; goto out; } (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index b1d8275..b530d9c 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -368,6 +368,11 @@ static int ecryptfs_write_begin(struct file *file, && (pos != 0)) zero_user(page, 0, PAGE_CACHE_SIZE); out: + if (unlikely(rc)) { + unlock_page(page); + page_cache_release(page); + *pagep = NULL; + } return rc; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 3817149..25bcbd1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -63,6 +63,13 @@ * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). + * It is also acquired when inserting an epoll fd onto another epoll + * fd. We do this so that we walk the epoll tree and ensure that this + * insertion does not create a cycle of epoll file descriptors, which + * could lead to deadlock. We need a global mutex to prevent two + * simultaneous inserts (A into B and B into A) from racing and + * constructing a cycle without either insert observing that it is + * going to. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. @@ -227,6 +234,9 @@ static int max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +/* Used to check for epoll file descriptor inclusion loops */ +static struct nested_calls poll_loop_ncalls; + /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; @@ -1181,6 +1191,62 @@ retry: return res; } +/** + * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() + * API, to verify that adding an epoll file inside another + * epoll structure, does not violate the constraints, in + * terms of closed loops, or too deep chains (which can + * result in excessive stack usage). + * + * @priv: Pointer to the epoll file to be currently checked. + * @cookie: Original cookie for this call. This is the top-of-the-chain epoll + * data structure pointer. + * @call_nests: Current dept of the @ep_call_nested() call stack. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) +{ + int error = 0; + struct file *file = priv; + struct eventpoll *ep = file->private_data; + struct rb_node *rbp; + struct epitem *epi; + + mutex_lock(&ep->mtx); + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, epi->ffd.file, + epi->ffd.file->private_data, current); + if (error != 0) + break; + } + } + mutex_unlock(&ep->mtx); + + return error; +} + +/** + * ep_loop_check - Performs a check to verify that adding an epoll file (@file) + * another epoll file (represented by @ep) does not create + * closed loops or too deep chains. + * + * @ep: Pointer to the epoll private data structure. + * @file: Pointer to the epoll file to be checked. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check(struct eventpoll *ep, struct file *file) +{ + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); +} + /* * Open an eventpoll file descriptor. */ @@ -1229,6 +1295,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; + int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; @@ -1270,6 +1337,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, */ ep = file->private_data; + /* + * When we insert an epoll file descriptor, inside another epoll file + * descriptor, there is the change of creating closed loops, which are + * better be handled here, than in more critical paths. + * + * We hold epmutex across the loop check and the insert in this case, in + * order to prevent two separate inserts from racing and each doing the + * insert "at the same time" such that ep_loop_check passes on both + * before either one does the insert, thereby creating a cycle. + */ + if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { + mutex_lock(&epmutex); + did_lock_epmutex = 1; + error = -ELOOP; + if (ep_loop_check(ep, tfile) != 0) + goto error_tgt_fput; + } + + mutex_lock(&ep->mtx); /* @@ -1305,6 +1391,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: + if (unlikely(did_lock_epmutex)) + mutex_unlock(&epmutex); + fput(tfile); error_fput: fput(file); @@ -1423,6 +1512,12 @@ static int __init eventpoll_init(void) max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; + /* + * Initialize the structure used to perform epoll file descriptor + * inclusion loops checks. + */ + ep_nested_calls_init(&poll_loop_ncalls); + /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 71efb0e..b5d9028 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(old_inode); ext2_delete_entry (old_de, old_page); - inode_dec_link_count(old_inode); if (dir_de) { if (old_dir != new_dir) diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index ee18408..827b573 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1550,8 +1550,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; + memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); - node2->fake.inode = 0; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, frame->bh); if (err) diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 6c953bb..6c7efb6 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1459,6 +1459,13 @@ static void ext3_orphan_cleanup (struct super_block * sb, return; } + /* Check if feature set allows readwrite operations */ + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { + printk(KERN_INFO "EXT3-fs: %s: Skipping orphan cleanup due to " + "unknown ROCOMPAT features\n", sb->s_id); + return; + } + if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " diff --git a/fs/file_table.c b/fs/file_table.c index 5c7d10e..684ffe3 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -123,13 +123,13 @@ struct file *get_empty_filp(void) goto fail; percpu_counter_inc(&nr_files); + f->f_cred = get_cred(cred); if (security_file_alloc(f)) goto fail_sec; INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); - f->f_cred = get_cred(cred); spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ diff --git a/fs/fuse/file.c b/fs/fuse/file.c index f6c333f..9576aed 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) return ff; } +static void fuse_release_async(struct work_struct *work) +{ + struct fuse_req *req; + struct fuse_conn *fc; + struct path path; + + req = container_of(work, struct fuse_req, misc.release.work); + path = req->misc.release.path; + fc = get_fuse_conn(path.dentry->d_inode); + + fuse_put_request(fc, req); + path_put(&path); +} + static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - path_put(&req->misc.release.path); + if (fc->destroy_req) { + /* + * If this is a fuseblk mount, then it's possible that + * releasing the path will result in releasing the + * super block and sending the DESTROY request. If + * the server is single threaded, this would hang. + * For this reason do the path_put() in a separate + * thread. + */ + atomic_inc(&req->count); + INIT_WORK(&req->misc.release.work, fuse_release_async); + schedule_work(&req->misc.release.work); + } else { + path_put(&req->misc.release.path); + } } -static void fuse_file_put(struct fuse_file *ff) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; - req->end = fuse_release_end; - fuse_request_send_background(ff->fc, req); + if (sync) { + fuse_request_send(ff->fc, req); + path_put(&req->misc.release.path); + fuse_put_request(ff->fc, req); + } else { + req->end = fuse_release_end; + fuse_request_send_background(ff->fc, req); + } kfree(ff); } } @@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) * Normally this will send the RELEASE request, however if * some asynchronous READ or WRITE requests are outstanding, * the sending will be delayed. + * + * Make the release synchronous if this is a fuseblk mount, + * synchronous RELEASE is allowed (and desirable) in this case + * because the server can be trusted not to screw up. */ - fuse_file_put(ff); + fuse_file_put(ff, ff->fc->destroy_req != NULL); } static int fuse_open(struct inode *inode, struct file *file) @@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) page_cache_release(page); } if (req->ff) - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_send_readpages(struct fuse_req *req, struct file *file) @@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) { __free_page(req->pages[0]); - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 8f309f0..6b9a746 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -21,6 +21,7 @@ #include #include #include +#include /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 @@ -257,7 +258,10 @@ struct fuse_req { union { struct fuse_forget_in forget_in; struct { - struct fuse_release_in in; + union { + struct fuse_release_in in; + struct work_struct work; + }; struct path path; } release; struct fuse_init_in init_in; diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ad4cd31..b1c23b7 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -402,15 +402,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, pos += vec->iov_len; } + /* + * If no bytes were started, return the error, and let the + * generic layer handle the completion. + */ + if (requested_bytes == 0) { + nfs_direct_req_release(dreq); + return result < 0 ? result : -EIO; + } + if (put_dreq(dreq)) nfs_direct_complete(dreq); - - if (requested_bytes != 0) - return 0; - - if (result < 0) - return result; - return -EIO; + return 0; } static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, @@ -830,15 +833,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, pos += vec->iov_len; } + /* + * If no bytes were started, return the error, and let the + * generic layer handle the completion. + */ + if (requested_bytes == 0) { + nfs_direct_req_release(dreq); + return result < 0 ? result : -EIO; + } + if (put_dreq(dreq)) nfs_direct_write_complete(dreq, dreq->inode); - - if (requested_bytes != 0) - return 0; - - if (result < 0) - return result; - return -EIO; + return 0; } static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 642c819..6fd4fba 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -222,7 +222,7 @@ static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode) have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); if (have_error) ret = xchg(&ctx->error, 0); - if (!ret) + if (!ret && status < 0) ret = status; return ret; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 59ec449..bb775a5 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -954,8 +954,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, void *); enum nfsd4_op_flags { ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ - ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */ - ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */ + ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */ + ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */ }; struct nfsd4_operation { diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index ac17a70..d88539d 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -316,8 +316,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) + return status; iattr->ia_valid |= ATTR_UID; } if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { @@ -327,8 +327,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) + return status; iattr->ia_valid |= ATTR_GID; } if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { @@ -1180,8 +1180,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); - for (i = 0; i < dummy; ++i) - READ32(dummy); break; case RPC_AUTH_GSS: dprintk("RPC_AUTH_GSS callback secflavor " @@ -1197,7 +1195,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy); - p += XDR_QUADLEN(dummy); break; default: dprintk("Illegal callback secflavor\n"); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 3c11112..4096277 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -819,7 +819,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino) if (ra->p_count == 0) frap = rap; } - depth = nfsdstats.ra_size*11/10; + depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 73a11cc..c90b2dd 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -3215,7 +3215,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, u32 num_clusters, unsigned int e_flags) { int ret, delete, index, credits = 0; - u32 new_bit, new_len; + u32 new_bit, new_len, orig_num_clusters; unsigned int set_len; struct ocfs2_super *osb = OCFS2_SB(sb); handle_t *handle; @@ -3248,6 +3248,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, goto out; } + orig_num_clusters = num_clusters; + while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, p_cluster, num_clusters, @@ -3335,7 +3337,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, * in write-back mode. */ if (context->get_clusters == ocfs2_di_get_clusters) { - ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); + ret = ocfs2_cow_sync_writeback(sb, context, cpos, + orig_num_clusters); if (ret) mlog_errno(ret); } diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 2dc57bc..22db114 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -191,7 +191,7 @@ static struct ocfs2_live_connection *ocfs2_connection_find(const char *name) return c; } - return c; + return NULL; } /* diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 648c9d8..5638c19 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) } vm->vblk_size = get_unaligned_be32(data + 0x08); + if (vm->vblk_size == 0) { + ldm_error ("Illegal VBLK size"); + return false; + } + vm->vblk_offset = get_unaligned_be32(data + 0x0C); vm->last_vblk_seq = get_unaligned_be32(data + 0x04); diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 74465ff..0984d92 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) int mac_partition(struct parsed_partitions *state) { - int slot = 1; Sector sect; unsigned char *data; - int blk, blocks_in_map; + int slot, blocks_in_map; unsigned secsize; #ifdef CONFIG_PPC_PMAC int found_root = 0; @@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) put_dev_sector(sect); return 0; /* not a MacOS disk */ } - printk(" [mac]"); blocks_in_map = be32_to_cpu(part->map_count); - for (blk = 1; blk <= blocks_in_map; ++blk) { - int pos = blk * secsize; + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { + put_dev_sector(sect); + return 0; + } + printk(" [mac]"); + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; put_dev_sector(sect); data = read_part_sector(state, pos/512, §); if (!data) @@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) } if (goodness > found_root_goodness) { - found_root = blk; + found_root = slot; found_root_goodness = goodness; } } #endif /* CONFIG_PPC_PMAC */ - - ++slot; } #ifdef CONFIG_PPC_PMAC if (found_root_goodness) diff --git a/fs/proc/array.c b/fs/proc/array.c index fff6572..9e5f430 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_cap(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); -#if defined(CONFIG_S390) - task_show_regs(m, task); -#endif task_context_switch_counts(m, task); return 0; } @@ -492,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, vsize, mm ? get_mm_rss(mm) : 0, rsslim, - mm ? mm->start_code : 0, - mm ? mm->end_code : 0, + mm ? (permitted ? mm->start_code : 1) : 0, + mm ? (permitted ? mm->end_code : 1) : 0, (permitted && mm) ? mm->start_stack : 0, esp, eip, diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 271afc4..0dfd815 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -248,8 +248,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) const char *name = arch_vma_name(vma); if (!name) { if (mm) { - if (vma->vm_start <= mm->start_brk && - vma->vm_end >= mm->brk) { + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { name = "[heap]"; } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 7c9d28d..3667812 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -1021,7 +1021,7 @@ struct drm_device { struct pci_controller *hose; #endif struct drm_sg_mem *sg; /**< Scatter gather memory */ - int num_crtcs; /**< Number of CRTCs on this device */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ void *dev_private; /**< device private data */ void *mm_private; struct address_space *dev_mapping; diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 883c1d4..40b1f0e 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -28,7 +28,6 @@ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ - {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index 5cb86c3..fc48754 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h @@ -99,7 +99,6 @@ struct rxrpc_key_token { * structure of raw payloads passed to add_key() or instantiate key */ struct rxrpc_key_data_v1 { - u32 kif_version; /* 1 */ u16 security_index; u16 ticket_length; u32 expiry; /* time_t */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 41e4633..59c3939 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -423,6 +423,7 @@ extern void unregister_ftrace_graph(void); extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); static inline int task_curr_ret_stack(struct task_struct *t) { @@ -446,6 +447,7 @@ static inline void unpause_graph_tracing(void) static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b3876..ff43e92 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -64,6 +64,8 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) + #ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) @@ -82,10 +84,13 @@ /* * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? + * in_softirq - Are we currently processing softirq or have bh disabled? + * in_serving_softirq - Are we currently processing softirq? */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) /* * Are we in NMI context? @@ -132,10 +137,12 @@ extern void synchronize_irq(unsigned int irq); struct task_struct; -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) static inline void account_system_vtime(struct task_struct *tsk) { } +#else +extern void account_system_vtime(struct task_struct *tsk); #endif #if defined(CONFIG_NO_HZ) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 97b2eae..731854c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -959,7 +959,7 @@ struct ieee80211_ht_info { /* block-ack parameters */ #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 diff --git a/include/linux/klist.h b/include/linux/klist.h index e91a4e5..a370ce5 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -22,7 +22,7 @@ struct klist { struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); -} __attribute__ ((aligned (4))); +} __attribute__ ((aligned (sizeof(void *)))); #define KLIST_INIT(_name, _get, _put) \ { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 142bf18..e857c94 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2344,6 +2344,10 @@ do { \ }) #endif +#define MODULE_ALIAS_NETDEV(device) \ + MODULE_ALIAS("netdev-" device) + + #endif /* __KERNEL__ */ #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1272de4..fc531a7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -856,6 +856,7 @@ struct sched_group { * single CPU. */ unsigned int cpu_power; + unsigned int group_weight; /* * The CPUs this group covers. @@ -1076,7 +1077,7 @@ struct sched_class { struct task_struct *task); #ifdef CONFIG_FAIR_GROUP_SCHED - void (*moved_group) (struct task_struct *p, int on_rq); + void (*task_move_group) (struct task_struct *p, int on_rq); #endif }; @@ -1685,8 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * Per process flags */ -#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ - /* Not implemented yet, only for 486*/ +#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ @@ -1824,6 +1824,19 @@ extern void sched_clock_idle_wakeup_event(u64 delta_ns); */ extern unsigned long long cpu_clock(int cpu); +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + extern unsigned long long task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); @@ -2363,9 +2376,9 @@ extern int __cond_resched_lock(spinlock_t *lock); extern int __cond_resched_softirq(void); -#define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ - __cond_resched_softirq(); \ +#define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ }) /* diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 84a4c44..8ab6142 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -346,6 +346,9 @@ extern int usb_serial_handle_sysrq_char(struct tty_struct *tty, struct usb_serial_port *port, unsigned int ch); extern int usb_serial_handle_break(struct usb_serial_port *port); +extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, + unsigned int status); extern int usb_serial_bus_register(struct usb_serial_driver *device); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index d80b6db..558fa2f 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -451,7 +451,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev) } static inline int scsi_device_enclosure(struct scsi_device *sdev) { - return sdev->inquiry[6] & (1<<6); + return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; } static inline int scsi_device_protection(struct scsi_device *sdev) diff --git a/init/calibrate.c b/init/calibrate.c index 6eb48e5..24fe022 100644 --- a/init/calibrate.c +++ b/init/calibrate.c @@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void) pre_start = 0; read_current_timer(&start); start_jiffies = jiffies; - while (jiffies <= (start_jiffies + 1)) { + while (time_before_eq(jiffies, start_jiffies + 1)) { pre_start = start; read_current_timer(&start); } @@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void) pre_end = 0; end = post_start; - while (jiffies <= - (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) { + while (time_before_eq(jiffies, start_jiffies + 1 + + DELAY_CALIBRATION_TICKS)) { pre_end = end; read_current_timer(&end); } diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 02b9611..8c781d5 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1576,8 +1576,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, return -ENODEV; trialcs = alloc_trial_cpuset(cs); - if (!trialcs) - return -ENOMEM; + if (!trialcs) { + retval = -ENOMEM; + goto out; + } switch (cft->private) { case FILE_CPULIST: @@ -1592,6 +1594,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, } free_trial_cpuset(trialcs); +out: cgroup_unlock(); return retval; } diff --git a/kernel/cred.c b/kernel/cred.c index 60bc8b1..a8d12fe 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -256,13 +256,13 @@ struct cred *cred_alloc_blank(void) #endif atomic_set(&new->usage, 1); +#ifdef CONFIG_DEBUG_CREDENTIALS + new->magic = CRED_MAGIC; +#endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; -#ifdef CONFIG_DEBUG_CREDENTIALS - new->magic = CRED_MAGIC; -#endif return new; error: @@ -663,6 +663,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) validate_creds(old); *new = *old; + atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); @@ -680,8 +682,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; - atomic_set(&new->usage, 1); - set_cred_subscribers(new, 0); put_cred(old); validate_creds(new); return new; @@ -754,7 +754,11 @@ bool creds_are_invalid(const struct cred *cred) if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX - if (selinux_is_enabled()) { + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + if (selinux_is_enabled() && cred->security) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9..6a2281f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1093,7 +1093,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, if (retval) kfree(action); -#ifdef CONFIG_DEBUG_SHIRQ +#ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d1a6c04..fa8123e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -58,7 +58,8 @@ static atomic_t nr_task_events __read_mostly; */ int sysctl_perf_event_paranoid __read_mostly = 1; -int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ +/* Minimum for 128 pages + 1 for the user control page */ +int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ /* * max perf event sample rate @@ -5400,17 +5401,20 @@ __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { - struct perf_event *parent_event; + if (child_event->parent) { + raw_spin_lock_irq(&child_ctx->lock); + perf_group_detach(child_event); + raw_spin_unlock_irq(&child_ctx->lock); + } perf_event_remove_from_context(child_event); - parent_event = child_event->parent; /* - * It can happen that parent exits first, and has events + * It can happen that the parent exits first, and has events * that are still around due to the child reference. These - * events need to be zapped - but otherwise linger. + * events need to be zapped. */ - if (parent_event) { + if (child_event->parent) { sync_child_event(child_event, child); free_event(child_event); } diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b..51e2643 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -266,6 +266,7 @@ static int __init pm_init(void) int error = pm_start_workqueue(); if (error) return error; + hibernate_image_size_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; diff --git a/kernel/power/power.h b/kernel/power/power.h index 006270f..54580cb 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -14,6 +14,9 @@ struct swsusp_info { } __attribute__((aligned(PAGE_SIZE))); #ifdef CONFIG_HIBERNATION +/* kernel/power/snapshot.c */ +extern void __init hibernate_image_size_init(void); + #ifdef CONFIG_ARCH_HIBERNATION_HEADER /* Maximum size of architecture specific data in a hibernation header */ #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) @@ -49,7 +52,11 @@ static inline char *check_image_kernel(struct swsusp_info *info) extern int hibernation_snapshot(int platform_mode); extern int hibernation_restore(int platform_mode); extern int hibernation_platform_enter(void); -#endif + +#else /* !CONFIG_HIBERNATION */ + +static inline void hibernate_image_size_init(void) {} +#endif /* !CONFIG_HIBERNATION */ extern int pfn_is_nosave(unsigned long); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 7fa52b5..4d8548d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -46,7 +46,12 @@ static void swsusp_unset_page_forbidden(struct page *); * size will not exceed N bytes, but if that is impossible, it will * try to create the smallest image possible. */ -unsigned long image_size = 500 * 1024 * 1024; +unsigned long image_size; + +void __init hibernate_image_size_init(void) +{ + image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; +} /* List of PBEs needed for restoring the pages that were allocated before * the suspend and included in the suspend image, but have also been @@ -1318,12 +1323,14 @@ int hibernate_preallocate_memory(void) /* Compute the maximum number of saveable pages to leave in memory. */ max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; + /* Compute the desired number of image pages specified by image_size. */ size = DIV_ROUND_UP(image_size, PAGE_SIZE); if (size > max_size) size = max_size; /* - * If the maximum is not less than the current number of saveable pages - * in memory, allocate page frames for the image and we're done. + * If the desired number of image pages is at least as large as the + * current number of saveable pages in memory, allocate page frames for + * the image and we're done. */ if (size >= saveable) { pages = preallocate_image_highmem(save_highmem); @@ -1512,11 +1519,8 @@ static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages, unsigned int nr_highmem) { - int error = 0; - if (nr_highmem > 0) { - error = get_highmem_buffer(PG_ANY); - if (error) + if (get_highmem_buffer(PG_ANY)) goto err_out; if (nr_highmem > alloc_highmem) { nr_highmem -= alloc_highmem; @@ -1539,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, err_out: swsusp_free(); - return error; + return -ENOMEM; } asmlinkage int swsusp_save(void) diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 74a3d69..752321e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) child->exit_code = data; dead = __ptrace_detach(current, child); if (!child->exit_state) - wake_up_process(child); + wake_up_state(child, TASK_TRACED | TASK_STOPPED); } write_unlock_irq(&tasklist_lock); diff --git a/kernel/sched.c b/kernel/sched.c index d4f71b0..d40d662 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -491,6 +491,7 @@ struct rq { struct mm_struct *prev_mm; u64 clock; + u64 clock_task; atomic_t nr_iowait; @@ -518,6 +519,10 @@ struct rq { u64 avg_idle; #endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif + /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; @@ -641,10 +646,21 @@ static inline struct task_group *task_group(struct task_struct *p) #endif /* CONFIG_CGROUP_SCHED */ +static u64 irq_time_cpu(int cpu); +static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); + inline void update_rq_clock(struct rq *rq) { + int cpu = cpu_of(rq); + u64 irq_time; + if (!rq->skip_clock_update) rq->clock = sched_clock_cpu(cpu_of(rq)); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -1270,6 +1286,10 @@ static void resched_task(struct task_struct *p) static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } + +static void sched_avg_update(struct rq *rq) +{ +} #endif /* CONFIG_SMP */ #if BITS_PER_LONG == 32 @@ -1815,6 +1835,94 @@ static const struct sched_class rt_sched_class; #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +/* + * There are no locks covering percpu hardirq/softirq time. + * They are only modified in account_system_vtime, on corresponding CPU + * with interrupts disabled. So, writes are safe. + * They are read and saved off onto struct rq in update_rq_clock(). + * This may result in other CPU reading this CPU's irq time and can + * race with irq/account_system_vtime on this CPU. We would either get old + * or new value (or semi updated value on 32 bit) with a side effect of + * accounting a slice of irq time to wrong task when irq is in progress + * while we read rq->clock. That is a worthy compromise in place of having + * locks on each irq in account_system_time. + */ +static DEFINE_PER_CPU(u64, cpu_hardirq_time); +static DEFINE_PER_CPU(u64, cpu_softirq_time); + +static DEFINE_PER_CPU(u64, irq_start_time); +static int sched_clock_irqtime; + +void enable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 1; +} + +void disable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 0; +} + +static u64 irq_time_cpu(int cpu) +{ + if (!sched_clock_irqtime) + return 0; + + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); +} + +void account_system_vtime(struct task_struct *curr) +{ + unsigned long flags; + int cpu; + u64 now, delta; + + if (!sched_clock_irqtime) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + now = sched_clock_cpu(cpu); + delta = now - per_cpu(irq_start_time, cpu); + per_cpu(irq_start_time, cpu) = now; + /* + * We do not account for softirq time from ksoftirqd here. + * We want to continue accounting softirq time to ksoftirqd thread + * in that case, so as not to confuse scheduler with a special task + * that do not consume any time, but still wants to run. + */ + if (hardirq_count()) + per_cpu(cpu_hardirq_time, cpu) += delta; + else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) + per_cpu(cpu_softirq_time, cpu) += delta; + + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(account_system_vtime); + +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) +{ + if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { + u64 delta_irq = curr_irq_time - rq->prev_irq_time; + rq->prev_irq_time = curr_irq_time; + sched_rt_avg_update(rq, delta_irq); + } +} + +#else + +static u64 irq_time_cpu(int cpu) +{ + return 0; +} + +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } + +#endif + #include "sched_stats.h" static void inc_nr_running(struct rq *rq) @@ -1968,6 +2076,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) if (p->sched_class != &fair_sched_class) return 0; + if (unlikely(p->policy == SCHED_IDLE)) + return 0; + /* * Buddy candidates are cache hot: */ @@ -3152,6 +3263,8 @@ static void update_cpu_load(struct rq *this_rq) } calc_load_account_active(this_rq); + + sched_avg_update(this_rq); } #ifdef CONFIG_SMP @@ -3205,7 +3318,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) if (task_current(rq, p)) { update_rq_clock(rq); - ns = rq->clock - p->se.exec_start; + ns = rq->clock_task - p->se.exec_start; if ((s64)ns < 0) ns = 0; } @@ -3354,7 +3467,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); - else if (softirq_count()) + else if (in_serving_softirq()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else cpustat->system = cputime64_add(cpustat->system, tmp); @@ -5315,7 +5428,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; - ftrace_graph_init_task(idle); + ftrace_graph_init_idle_task(idle, cpu); } /* @@ -6726,6 +6839,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) if (cpu != group_first_cpu(sd->groups)) return; + sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); + child = sd->child; sd->groups->cpu_power = 0; @@ -8219,12 +8334,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); - set_task_rq(tsk, task_cpu(tsk)); - #ifdef CONFIG_FAIR_GROUP_SCHED - if (tsk->sched_class->moved_group) - tsk->sched_class->moved_group(tsk, on_rq); + if (tsk->sched_class->task_move_group) + tsk->sched_class->task_move_group(tsk, on_rq); + else #endif + set_task_rq(tsk, task_cpu(tsk)); if (unlikely(running)) tsk->sched_class->set_curr_task(rq); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a878b53..09a9811 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -519,7 +519,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; - u64 now = rq_of(cfs_rq)->clock; + u64 now = rq_of(cfs_rq)->clock_task; unsigned long delta_exec; if (unlikely(!curr)) @@ -602,7 +602,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * We are starting a new run period: */ - se->exec_start = rq_of(cfs_rq)->clock; + se->exec_start = rq_of(cfs_rq)->clock_task; } /************************************************** @@ -1765,6 +1765,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); check_preempt_curr(this_rq, p, 0); + + /* re-arm NEWIDLE balancing when moving tasks */ + src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; + this_rq->idle_stamp = 0; } /* @@ -1799,7 +1803,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, * 2) too many balance attempts have failed. */ - tsk_cache_hot = task_hot(p, rq->clock, sd); + tsk_cache_hot = task_hot(p, rq->clock_task, sd); if (!tsk_cache_hot || sd->nr_balance_failed > sd->cache_nice_tries) { #ifdef CONFIG_SCHEDSTATS @@ -2031,12 +2035,17 @@ struct sd_lb_stats { unsigned long this_load; unsigned long this_load_per_task; unsigned long this_nr_running; + unsigned long this_has_capacity; + unsigned int this_idle_cpus; /* Statistics of the busiest group */ + unsigned int busiest_idle_cpus; unsigned long max_load; unsigned long busiest_load_per_task; unsigned long busiest_nr_running; unsigned long busiest_group_capacity; + unsigned long busiest_has_capacity; + unsigned int busiest_group_weight; int group_imb; /* Is there imbalance in this sd */ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -2058,7 +2067,10 @@ struct sg_lb_stats { unsigned long sum_nr_running; /* Nr tasks running in the group */ unsigned long sum_weighted_load; /* Weighted load of group's tasks */ unsigned long group_capacity; + unsigned long idle_cpus; + unsigned long group_weight; int group_imb; /* Is there an imbalance in the group ? */ + int group_has_capacity; /* Is there extra capacity in the group? */ }; /** @@ -2268,11 +2280,14 @@ unsigned long scale_rt_power(int cpu) struct rq *rq = cpu_rq(cpu); u64 total, available; - sched_avg_update(rq); - total = sched_avg_period() + (rq->clock - rq->age_stamp); - available = total - rq->rt_avg; + if (unlikely(total < rq->rt_avg)) { + /* Ensures that power won't end up being negative */ + available = 0; + } else { + available = total - rq->rt_avg; + } if (unlikely((s64)total < SCHED_LOAD_SCALE)) total = SCHED_LOAD_SCALE; @@ -2354,7 +2369,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, int local_group, const struct cpumask *cpus, int *balance, struct sg_lb_stats *sgs) { - unsigned long load, max_cpu_load, min_cpu_load; + unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; int i; unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long avg_load_per_task = 0; @@ -2365,6 +2380,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, /* Tally up the load of all CPUs in the group */ max_cpu_load = 0; min_cpu_load = ~0UL; + max_nr_running = 0; for_each_cpu_and(i, sched_group_cpus(group), cpus) { struct rq *rq = cpu_rq(i); @@ -2382,8 +2398,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, load = target_load(i, load_idx); } else { load = source_load(i, load_idx); - if (load > max_cpu_load) + if (load > max_cpu_load) { max_cpu_load = load; + max_nr_running = rq->nr_running; + } if (min_cpu_load > load) min_cpu_load = load; } @@ -2391,7 +2409,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, sgs->group_load += load; sgs->sum_nr_running += rq->nr_running; sgs->sum_weighted_load += weighted_cpuload(i); - + if (idle_cpu(i)) + sgs->idle_cpus++; } /* @@ -2423,11 +2442,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, if (sgs->sum_nr_running) avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; - if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) + if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1) sgs->group_imb = 1; - sgs->group_capacity = - DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); + sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); + sgs->group_weight = group->group_weight; + + if (sgs->group_capacity > sgs->sum_nr_running) + sgs->group_has_capacity = 1; } /** @@ -2474,9 +2496,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, /* * In case the child domain prefers tasks go to siblings * first, lower the group capacity to one so that we'll try - * and move all the excess tasks away. + * and move all the excess tasks away. We lower the capacity + * of a group only if the local group has the capacity to fit + * these excess tasks, i.e. nr_running < group_capacity. The + * extra check prevents the case where you always pull from the + * heaviest group when it is already under-utilized (possible + * with a large weight task outweighs the tasks on the system). */ - if (prefer_sibling) + if (prefer_sibling && !local_group && sds->this_has_capacity) sgs.group_capacity = min(sgs.group_capacity, 1UL); if (local_group) { @@ -2484,14 +2511,19 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, sds->this = group; sds->this_nr_running = sgs.sum_nr_running; sds->this_load_per_task = sgs.sum_weighted_load; + sds->this_has_capacity = sgs.group_has_capacity; + sds->this_idle_cpus = sgs.idle_cpus; } else if (sgs.avg_load > sds->max_load && (sgs.sum_nr_running > sgs.group_capacity || sgs.group_imb)) { sds->max_load = sgs.avg_load; sds->busiest = group; sds->busiest_nr_running = sgs.sum_nr_running; + sds->busiest_idle_cpus = sgs.idle_cpus; sds->busiest_group_capacity = sgs.group_capacity; + sds->busiest_group_weight = sgs.group_weight; sds->busiest_load_per_task = sgs.sum_weighted_load; + sds->busiest_has_capacity = sgs.group_has_capacity; sds->group_imb = sgs.group_imb; } @@ -2637,6 +2669,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, return fix_small_imbalance(sds, this_cpu, imbalance); } + /******* find_busiest_group() helpers end here *********************/ /** @@ -2688,6 +2721,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, * 4) This group is more busy than the avg busieness at this * sched_domain. * 5) The imbalance is within the specified limit. + * + * Note: when doing newidle balance, if the local group has excess + * capacity (i.e. nr_running < group_capacity) and the busiest group + * does not have any capacity, we force a load balance to pull tasks + * to the local group. In this case, we skip past checks 3, 4 and 5. */ if (!(*balance)) goto ret; @@ -2695,6 +2733,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (!sds.busiest || sds.busiest_nr_running == 0) goto out_balanced; + /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ + if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && + !sds.busiest_has_capacity) + goto force_balance; + if (sds.this_load >= sds.max_load) goto out_balanced; @@ -2703,9 +2746,28 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (sds.this_load >= sds.avg_load) goto out_balanced; - if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) - goto out_balanced; + /* + * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative. + * And to check for busy balance use !idle_cpu instead of + * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE + * even when they are idle. + */ + if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) { + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) + goto out_balanced; + } else { + /* + * This cpu is idle. If the busiest group load doesn't + * have more tasks than the number of available cpu's and + * there is no imbalance between this and busiest group + * wrt to idle cpu's, it is balanced. + */ + if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && + sds.busiest_nr_running <= sds.busiest_group_weight) + goto out_balanced; + } +force_balance: /* Looks like there is an imbalance. Compute it */ calculate_imbalance(&sds, this_cpu, imbalance); return sds.busiest; @@ -2896,7 +2958,14 @@ redo: if (!ld_moved) { schedstat_inc(sd, lb_failed[idle]); - sd->nr_balance_failed++; + /* + * Increment the failure counter only on periodic balance. + * We do not want newidle balance, which can be very + * frequent, pollute the failure counter causing + * excessive cache_hot migrations and active balances. + */ + if (idle != CPU_NEWLY_IDLE) + sd->nr_balance_failed++; if (need_active_balance(sd, sd_idle, idle)) { raw_spin_lock_irqsave(&busiest->lock, flags); @@ -3017,10 +3086,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; - if (pulled_task) { - this_rq->idle_stamp = 0; + if (pulled_task) break; - } } raw_spin_lock(&this_rq->lock); @@ -3542,8 +3609,11 @@ static void task_fork_fair(struct task_struct *p) raw_spin_lock_irqsave(&rq->lock, flags); - if (unlikely(task_cpu(p) != this_cpu)) + if (unlikely(task_cpu(p) != this_cpu)) { + rcu_read_lock(); __set_task_cpu(p, this_cpu); + rcu_read_unlock(); + } update_curr(cfs_rq); @@ -3615,13 +3685,26 @@ static void set_curr_task_fair(struct rq *rq) } #ifdef CONFIG_FAIR_GROUP_SCHED -static void moved_group_fair(struct task_struct *p, int on_rq) +static void task_move_group_fair(struct task_struct *p, int on_rq) { - struct cfs_rq *cfs_rq = task_cfs_rq(p); - - update_curr(cfs_rq); + /* + * If the task was not on the rq at the time of this cgroup movement + * it must have been asleep, sleeping tasks keep their ->vruntime + * absolute on their old rq until wakeup (needed for the fair sleeper + * bonus in place_entity()). + * + * If it was on the rq, we've just 'preempted' it, which does convert + * ->vruntime to a relative base. + * + * Make sure both cases convert their relative position when migrating + * to another cgroup's rq. This does somewhat interfere with the + * fair sleeper stuff for the first placement, but who cares. + */ + if (!on_rq) + p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; + set_task_rq(p, task_cpu(p)); if (!on_rq) - place_entity(cfs_rq, &p->se, 1); + p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; } #endif @@ -3673,7 +3756,7 @@ static const struct sched_class fair_sched_class = { .get_rr_interval = get_rr_interval_fair, #ifdef CONFIG_FAIR_GROUP_SCHED - .moved_group = moved_group_fair, + .task_move_group = task_move_group_fair, #endif }; diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 83c66e8..185f920 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1) * release the lock. Decreases scheduling overhead. */ SCHED_FEAT(OWNER_SPIN, 1) + +/* + * Decrement CPU power based on irq activity + */ +SCHED_FEAT(NONIRQ_POWER, 1) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8afb953..a851cc0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -609,7 +609,7 @@ static void update_curr_rt(struct rq *rq) if (!task_has_rt_policy(curr)) return; - delta_exec = rq->clock - curr->se.exec_start; + delta_exec = rq->clock_task - curr->se.exec_start; if (unlikely((s64)delta_exec < 0)) delta_exec = 0; @@ -618,7 +618,7 @@ static void update_curr_rt(struct rq *rq) curr->se.sum_exec_runtime += delta_exec; account_group_exec_runtime(curr, delta_exec); - curr->se.exec_start = rq->clock; + curr->se.exec_start = rq->clock_task; cpuacct_charge(curr, delta_exec); sched_rt_avg_update(rq, delta_exec); @@ -960,18 +960,19 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) * runqueue. Otherwise simply start this RT task * on its current runqueue. * - * We want to avoid overloading runqueues. Even if - * the RT task is of higher priority than the current RT task. - * RT tasks behave differently than other tasks. If - * one gets preempted, we try to push it off to another queue. - * So trying to keep a preempting RT task on the same - * cache hot CPU will force the running RT task to - * a cold CPU. So we waste all the cache for the lower - * RT task in hopes of saving some of a RT task - * that is just being woken and probably will have - * cold cache anyway. + * We want to avoid overloading runqueues. If the woken + * task is a higher priority, then it will stay on this CPU + * and the lower prio task should be moved to another CPU. + * Even though this will probably make the lower prio task + * lose its cache, we do not want to bounce a higher task + * around just because it gave up its CPU, perhaps for a + * lock? + * + * For equal prio tasks, we just let the scheduler sort it out. */ if (unlikely(rt_task(rq->curr)) && + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio) && (p->rt.nr_cpus_allowed > 1)) { int cpu = find_lowest_rq(p); @@ -1074,7 +1075,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) } while (rt_rq); p = rt_task_of(rt_se); - p->se.exec_start = rq->clock; + p->se.exec_start = rq->clock_task; return p; } @@ -1491,7 +1492,10 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && - p->rt.nr_cpus_allowed > 1) + p->rt.nr_cpus_allowed > 1 && + rt_task(rq->curr) && + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio)) push_rt_tasks(rq); } @@ -1712,7 +1716,7 @@ static void set_curr_task_rt(struct rq *rq) { struct task_struct *p = rq->curr; - p->se.exec_start = rq->clock; + p->se.exec_start = rq->clock_task; /* The running task is never eligible for pushing */ dequeue_pushable_task(rq, p); diff --git a/kernel/signal.c b/kernel/signal.c index bded651..fdecae0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2410,9 +2410,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, return -EFAULT; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info.si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info.si_code >= 0 || info.si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info.si_code < 0); return -EPERM; + } info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ @@ -2426,9 +2430,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) return -EINVAL; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info->si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info->si_code >= 0 || info->si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info->si_code < 0); return -EPERM; + } info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c..b89ef31 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -194,6 +194,24 @@ void generic_smp_call_function_interrupt(void) list_for_each_entry_rcu(data, &call_function.queue, csd.list) { int refs; + /* + * Since we walk the list without any locks, we might + * see an entry that was completed, removed from the + * list and is in the process of being reused. + * + * We must check that the cpu is in the cpumask before + * checking the refs, and both must be set before + * executing the callback on this cpu. + */ + + if (!cpumask_test_cpu(cpu, data->cpumask)) + continue; + + smp_rmb(); + + if (atomic_read(&data->refs) == 0) + continue; + if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) continue; @@ -409,7 +427,7 @@ void smp_call_function_many(const struct cpumask *mask, { struct call_function_data *data; unsigned long flags; - int cpu, next_cpu, this_cpu = smp_processor_id(); + int refs, cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. @@ -420,7 +438,7 @@ void smp_call_function_many(const struct cpumask *mask, WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); - /* So, what's a CPU they want? Ignoring this one. */ + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); @@ -443,11 +461,48 @@ void smp_call_function_many(const struct cpumask *mask, data = &__get_cpu_var(cfd_data); csd_lock(&data->csd); + /* This BUG_ON verifies our reuse assertions and can be removed */ + BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); + + /* + * The global call function queue list add and delete are protected + * by a lock, but the list is traversed without any lock, relying + * on the rcu list add and delete to allow safe concurrent traversal. + * We reuse the call function data without waiting for any grace + * period after some other cpu removes it from the global queue. + * This means a cpu might find our data block as it is being + * filled out. + * + * We hold off the interrupt handler on the other cpu by + * ordering our writes to the cpu mask vs our setting of the + * refs counter. We assert only the cpu owning the data block + * will set a bit in cpumask, and each bit will only be cleared + * by the subject cpu. Each cpu must first find its bit is + * set and then check that refs is set indicating the element is + * ready to be processed, otherwise it must skip the entry. + * + * On the previous iteration refs was set to 0 by another cpu. + * To avoid the use of transitivity, set the counter to 0 here + * so the wmb will pair with the rmb in the interrupt handler. + */ + atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ + data->csd.func = func; data->csd.info = info; + + /* Ensure 0 refs is visible before mask. Also orders func and info */ + smp_wmb(); + + /* We rely on the "and" being processed before the store */ cpumask_and(data->cpumask, mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, data->cpumask); - atomic_set(&data->refs, cpumask_weight(data->cpumask)); + refs = cpumask_weight(data->cpumask); + + /* Some callers race with other cpus changing the passed mask */ + if (unlikely(!refs)) { + csd_unlock(&data->csd); + return; + } raw_spin_lock_irqsave(&call_function.lock, flags); /* @@ -456,6 +511,12 @@ void smp_call_function_many(const struct cpumask *mask, * will not miss any other list entries: */ list_add_rcu(&data->csd.list, &call_function.queue); + /* + * We rely on the wmb() in list_add_rcu to complete our writes + * to the cpumask before this write to refs, which indicates + * data is on the list and is ready to be processed. + */ + atomic_set(&data->refs, refs); raw_spin_unlock_irqrestore(&call_function.lock, flags); /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b..79ee8f1 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -77,11 +77,21 @@ void wakeup_softirqd(void) } /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving + * softirq processing. + * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) + * on local_bh_disable or local_bh_enable. + * This lets us distinguish between whether we are currently processing + * softirq and whether we just have bh disabled. + */ + +/* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ #ifdef CONFIG_TRACE_IRQFLAGS -static void __local_bh_disable(unsigned long ip) +static void __local_bh_disable(unsigned long ip, unsigned int cnt) { unsigned long flags; @@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip) * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ - preempt_count() += SOFTIRQ_OFFSET; + preempt_count() += cnt; /* * Were softirqs turned off above: */ - if (softirq_count() == SOFTIRQ_OFFSET) + if (softirq_count() == cnt) trace_softirqs_off(ip); raw_local_irq_restore(flags); - if (preempt_count() == SOFTIRQ_OFFSET) + if (preempt_count() == cnt) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } #else /* !CONFIG_TRACE_IRQFLAGS */ -static inline void __local_bh_disable(unsigned long ip) +static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) { - add_preempt_count(SOFTIRQ_OFFSET); + add_preempt_count(cnt); barrier(); } #endif /* CONFIG_TRACE_IRQFLAGS */ void local_bh_disable(void) { - __local_bh_disable((unsigned long)__builtin_return_address(0)); + __local_bh_disable((unsigned long)__builtin_return_address(0), + SOFTIRQ_DISABLE_OFFSET); } EXPORT_SYMBOL(local_bh_disable); +static void __local_bh_enable(unsigned int cnt) +{ + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (softirq_count() == cnt) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + sub_preempt_count(cnt); +} + /* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), @@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable); */ void _local_bh_enable(void) { - WARN_ON_ONCE(in_irq()); - WARN_ON_ONCE(!irqs_disabled()); - - if (softirq_count() == SOFTIRQ_OFFSET) - trace_softirqs_on((unsigned long)__builtin_return_address(0)); - sub_preempt_count(SOFTIRQ_OFFSET); + __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); } EXPORT_SYMBOL(_local_bh_enable); @@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) /* * Are softirqs going to be turned on now: */ - if (softirq_count() == SOFTIRQ_OFFSET) + if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ - sub_preempt_count(SOFTIRQ_OFFSET - 1); + sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); @@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); account_system_vtime(current); - __local_bh_disable((unsigned long)__builtin_return_address(0)); + __local_bh_disable((unsigned long)__builtin_return_address(0), + SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); @@ -245,7 +262,7 @@ restart: lockdep_softirq_exit(); account_system_vtime(current); - _local_bh_enable(); + __local_bh_enable(SOFTIRQ_OFFSET); } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -279,10 +296,16 @@ void irq_enter(void) rcu_irq_enter(); if (idle_cpu(cpu) && !in_interrupt()) { - __irq_enter(); + /* + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ + local_bh_disable(); tick_check_idle(cpu); - } else - __irq_enter(); + _local_bh_enable(); + } + + __irq_enter(); } #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED @@ -696,6 +719,7 @@ static int run_ksoftirqd(void * __bind_cpu) { set_current_state(TASK_INTERRUPTIBLE); + current->flags |= PF_KSOFTIRQD; while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b3bafd5..2030f62 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void) return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; } +/* + * Check whether the broadcast device supports oneshot. + */ +bool tick_broadcast_oneshot_available(void) +{ + struct clock_event_device *bc = tick_broadcast_device.evtdev; + + return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; +} + #endif diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b6b898d..61e296b 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -51,7 +51,11 @@ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; - return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); + if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) + return 0; + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) + return 1; + return tick_broadcast_oneshot_available(); } /* diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 290eefb..f65d3a7 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_broadcast_oneshot_active(void); extern void tick_check_oneshot_broadcast(int cpu); +bool tick_broadcast_oneshot_available(void); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { @@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline void tick_check_oneshot_broadcast(int cpu) { } +static inline bool tick_broadcast_oneshot_available(void) { return true; } # endif /* !BROADCAST */ #else /* !ONESHOT */ @@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) return 0; } static inline int tick_broadcast_oneshot_active(void) { return 0; } +static inline bool tick_broadcast_oneshot_available(void) { return false; } #endif /* !TICK_ONESHOT */ /* diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6b316b3..d9aaa9b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3290,7 +3290,7 @@ static int start_graph_tracing(void) /* The cpu_boot init_task->ret_stack will never be freed */ for_each_online_cpu(cpu) { if (!idle_task(cpu)->ret_stack) - ftrace_graph_init_task(idle_task(cpu)); + ftrace_graph_init_idle_task(idle_task(cpu), cpu); } do { @@ -3380,6 +3380,49 @@ void unregister_ftrace_graph(void) mutex_unlock(&ftrace_lock); } +static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + +static void +graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) +{ + atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visable before we add the ret_stack */ + smp_wmb(); + t->ret_stack = ret_stack; +} + +/* + * Allocate a return stack for the idle task. May be the first + * time through, or it may be done by CPU hotplug online. + */ +void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) +{ + t->curr_ret_stack = -1; + /* + * The idle task has no parent, it either has its own + * stack or no stack at all. + */ + if (t->ret_stack) + WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); + + if (ftrace_graph_active) { + struct ftrace_ret_stack *ret_stack; + + ret_stack = per_cpu(idle_ret_stack, cpu); + if (!ret_stack) { + ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH + * sizeof(struct ftrace_ret_stack), + GFP_KERNEL); + if (!ret_stack) + return; + per_cpu(idle_ret_stack, cpu) = ret_stack; + } + graph_init_task(t, ret_stack); + } +} + /* Allocate a return stack for newly created task */ void ftrace_graph_init_task(struct task_struct *t) { @@ -3395,12 +3438,7 @@ void ftrace_graph_init_task(struct task_struct *t) GFP_KERNEL); if (!ret_stack) return; - atomic_set(&t->tracing_graph_pause, 0); - atomic_set(&t->trace_overrun, 0); - t->ftrace_timestamp = 0; - /* make curr_ret_stack visable before we add the ret_stack */ - smp_wmb(); - t->ret_stack = ret_stack; + graph_init_task(t, ret_stack); } } diff --git a/kernel/user.c b/kernel/user.c index 8ce395f..5c598ca 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -91,6 +91,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) * upon function exit. */ static void free_user(struct user_struct *up, unsigned long flags) + __releases(&uidhash_lock) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); diff --git a/mm/mremap.c b/mm/mremap.c index cde56ee..10d5f62 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -91,9 +91,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); - if (new_vma->vm_truncate_count && - new_vma->vm_truncate_count != vma->vm_truncate_count) - new_vma->vm_truncate_count = 0; + new_vma->vm_truncate_count = 0; } /* diff --git a/mm/shmem.c b/mm/shmem.c index f65f840..676eed3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2756,5 +2756,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } diff --git a/mm/slab.c b/mm/slab.c index e4f747f..91e691b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2289,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (ralign < align) { ralign = align; } - /* disable debug if not aligning with REDZONE_ALIGN */ - if (ralign & (__alignof__(unsigned long long) - 1)) + /* disable debug if necessary */ + if (ralign > __alignof__(unsigned long long)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. @@ -2316,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ - cachep->obj_offset += align; - size += align + sizeof(unsigned long long); + cachep->obj_offset += sizeof(unsigned long long); + size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of diff --git a/net/core/dev.c b/net/core/dev.c index d7d352b..7c3e7c3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1121,13 +1121,20 @@ EXPORT_SYMBOL(netdev_bonding_change); void dev_load(struct net *net, const char *name) { struct net_device *dev; + int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); - - if (!dev && capable(CAP_NET_ADMIN)) - request_module("%s", name); + no_module = !dev; + if (no_module && capable(CAP_NET_ADMIN)) + no_module = request_module("netdev-%s", name); + if (no_module && capable(CAP_SYS_MODULE)) { + if (!request_module("%s", name)) + pr_err("Loading kernel module for a network device " +"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " +"instead\n", name); + } } EXPORT_SYMBOL(dev_load); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a2af24..04b3dcf 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1545,6 +1545,9 @@ replay: snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); dest_net = rtnl_link_get_net(net, tb); + if (IS_ERR(dest_net)) + return PTR_ERR(dest_net); + dev = rtnl_create_link(net, dest_net, ifname, ops, tb); if (IS_ERR(dev)) diff --git a/net/dccp/input.c b/net/dccp/input.c index 6beb6a7..ca16869 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -617,6 +617,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* Caller (dccp_v4_do_rcv) will send Reset */ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; return 1; + } else if (sk->sk_state == DCCP_CLOSED) { + dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; + return 1; } if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { @@ -679,10 +682,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } switch (sk->sk_state) { - case DCCP_CLOSED: - dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; - return 1; - case DCCP_REQUESTING: queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); if (queued >= 0) diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index da14c49..ff6c372 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1029,6 +1029,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) return mtu >= 68; } +static void inetdev_send_gratuitous_arp(struct net_device *dev, + struct in_device *in_dev) + +{ + struct in_ifaddr *ifa = in_dev->ifa_list; + + if (!ifa) + return; + + arp_send(ARPOP_REQUEST, ETH_P_ARP, + ifa->ifa_address, dev, + ifa->ifa_address, NULL, + dev->dev_addr, NULL); +} + /* Called only under RTNL semaphore */ static int inetdev_event(struct notifier_block *this, unsigned long event, @@ -1081,18 +1096,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, } ip_mc_up(in_dev); /* fall through */ - case NETDEV_NOTIFY_PEERS: case NETDEV_CHANGEADDR: + if (!IN_DEV_ARP_NOTIFY(in_dev)) + break; + /* fall through */ + case NETDEV_NOTIFY_PEERS: /* Send gratuitous ARP to notify of link change */ - if (IN_DEV_ARP_NOTIFY(in_dev)) { - struct in_ifaddr *ifa = in_dev->ifa_list; - - if (ifa) - arp_send(ARPOP_REQUEST, ETH_P_ARP, - ifa->ifa_address, dev, - ifa->ifa_address, NULL, - dev->dev_addr, NULL); - } + inetdev_send_gratuitous_arp(dev, in_dev); break; case NETDEV_DOWN: ip_mc_down(in_dev); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 32618e1..cd527c4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1701,3 +1701,4 @@ module_exit(ipgre_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("gre"); MODULE_ALIAS_RTNL_LINK("gretap"); +MODULE_ALIAS_NETDEV("gre0"); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 7fd6367..5d8e362 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -850,3 +850,4 @@ static void __exit ipip_fini(void) module_init(ipip_init); module_exit(ipip_fini); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETDEV("tunl0"); diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index e1be7dd..1dcf5d2 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -60,12 +60,12 @@ static int checkentry(const struct xt_tgchk_param *par) if (mangle->flags & ~ARPT_MANGLE_MASK || !(mangle->flags & ARPT_MANGLE_MASK)) - return false; + return -EINVAL; if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && mangle->target != ARPT_CONTINUE) - return false; - return true; + return -EINVAL; + return 0; } static struct xt_target arpt_mangle_reg __read_mostly = { diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8f39893..81b7887 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -57,6 +57,7 @@ MODULE_AUTHOR("Ville Nuorvala"); MODULE_DESCRIPTION("IPv6 tunneling device"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETDEV("ip6tnl0"); #define IPV6_TLV_TEL_DST_SIZE 8 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e51e650..2020bea 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1239,4 +1239,4 @@ static int __init sit_init(void) module_init(sit_init); module_exit(sit_cleanup); MODULE_LICENSE("GPL"); -MODULE_ALIAS("sit0"); +MODULE_ALIAS_NETDEV("sit0"); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ba9360a..cd6999e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -239,6 +239,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; + sta->last_rx = jiffies; if (sta_prepare_rate_control(local, sta, gfp)) { kfree(sta); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 7df37fd..2a2a20e 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) { + if (pf >= ARRAY_SIZE(nf_loggers)) + return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); @@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(u_int8_t pf) { + if (pf >= ARRAY_SIZE(nf_loggers)) + return; mutex_lock(&nf_log_mutex); rcu_assign_pointer(nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 54d7308..eb16478 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1323,19 +1323,26 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; - if (NULL == siocb->scm) + if (NULL == siocb->scm) { siocb->scm = &scm; + memset(&scm, 0, sizeof(scm)); + } + err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; if (msg->msg_namelen) { - if (addr->nl_family != AF_NETLINK) - return -EINVAL; + if (addr->nl_family != AF_NETLINK) { + err = -EINVAL; + goto out; + } dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); - if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) - return -EPERM; + if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) { + err = -EPERM; + goto out; + } } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; @@ -1387,6 +1394,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: + scm_destroy(siocb->scm); + siocb->scm = NULL; return err; } diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 08be223..d49c40f 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -121,7 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ - if (softirq_count() != SOFTIRQ_OFFSET) { + if (in_serving_softirq()) { /* If there is an sk_classid we'll use that. */ if (!skb->sk) return -1; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 8636639..ddbbf7c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) id = ntohs(hmacs->hmac_ids[i]); /* Check the id is in the supported range */ - if (id > SCTP_AUTH_HMAC_ID_MAX) + if (id > SCTP_AUTH_HMAC_ID_MAX) { + id = 0; continue; + } /* See is we support the id. Supported IDs have name and * length fields set, so that we can allocated and use * them. We can safely just check for name, for without the * name, we can't allocate the TFM. */ - if (!sctp_hmac_list[id].hmac_name) + if (!sctp_hmac_list[id].hmac_name) { + id = 0; continue; + } break; } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 4a843b8..c55f753 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -637,14 +637,12 @@ static void __rpc_execute(struct rpc_task *task) save_callback = task->tk_callback; task->tk_callback = NULL; save_callback(task); - } - - /* - * Perform the next FSM step. - * tk_action may be NULL when the task has been killed - * by someone else. - */ - if (!RPC_IS_QUEUED(task)) { + } else { + /* + * Perform the next FSM step. + * tk_action may be NULL when the task has been killed + * by someone else. + */ if (task->tk_action == NULL) break; task->tk_action(task); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7ca65c7..02fbb8d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + transport->srcport = 0; + write_lock_bh(&sk->sk_callback_lock); transport->inet = NULL; transport->sock = NULL; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 93d89d6..bbaab4b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -53,8 +53,10 @@ static bool can_beacon_sec_chan(struct wiphy *wiphy, switch (channel_type) { case NL80211_CHAN_HT40PLUS: diff = 20; + break; case NL80211_CHAN_HT40MINUS: diff = -20; + break; default: return false; } diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index b25c646..88048b6 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -392,9 +392,12 @@ void __exit x25_link_free(void) write_lock_bh(&x25_neigh_list_lock); list_for_each_safe(entry, tmp, &x25_neigh_list) { + struct net_device *dev; + nb = list_entry(entry, struct x25_neigh, node); + dev = nb->dev; __x25_remove_neigh(nb); - dev_put(nb->dev); + dev_put(dev); } write_unlock_bh(&x25_neigh_list_lock); } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 5c9f25b..7b765f0 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2573,7 +2573,10 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, sid = tsec->sid; newsid = tsec->create_sid; - if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { + if ((sbsec->flags & SE_SBINITIALIZED) && + (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) + newsid = sbsec->mntpoint_sid; + else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { rc = security_transition_sid(sid, dsec->sid, inode_mode_to_security_class(inode->i_mode), &newsid); @@ -3231,7 +3234,11 @@ static void selinux_cred_free(struct cred *cred) { struct task_security_struct *tsec = cred->security; - BUG_ON((unsigned long) cred->security < PAGE_SIZE); + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); cred->security = (void *) 0x7UL; kfree(tsec); } diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 75ec0c6..8b02b21 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -65,6 +65,8 @@ static struct nlmsg_perm nlmsg_route_perms[] = { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, }; static struct nlmsg_perm nlmsg_firewall_perms[] = diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c index 7730575..07efa29 100644 --- a/sound/core/hrtimer.c +++ b/sound/core/hrtimer.c @@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; + unsigned long oruns; if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; - hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); - snd_timer_interrupt(stime->timer, t->sticks); + oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); + snd_timer_interrupt(stime->timer, t->sticks * oruns); if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; diff --git a/sound/core/init.c b/sound/core/init.c index ec4a50c..82f350e 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -848,6 +848,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file) return -ENOMEM; mfile->file = file; mfile->disconnected_f_op = NULL; + INIT_LIST_HEAD(&mfile->shutdown_list); spin_lock(&card->files_lock); if (card->shutdown) { spin_unlock(&card->files_lock); @@ -883,6 +884,9 @@ int snd_card_file_remove(struct snd_card *card, struct file *file) list_for_each_entry(mfile, &card->files_list, list) { if (mfile->file == file) { list_del(&mfile->list); + spin_lock(&shutdown_lock); + list_del(&mfile->shutdown_list); + spin_unlock(&shutdown_lock); if (mfile->disconnected_f_op) fops_put(mfile->disconnected_f_op); found = mfile; diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c index 938c48c..f4ffdff 100644 --- a/sound/oss/opl3.c +++ b/sound/oss/opl3.c @@ -849,6 +849,10 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, static void opl3_panning(int dev, int voice, int value) { + + if (voice < 0 || voice >= devc->nr_voice) + return; + devc->voc[voice].panning = value; } @@ -1066,8 +1070,15 @@ static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info static void opl3_setup_voice(int dev, int voice, int chn) { - struct channel_info *info = - &synth_devs[dev]->chn_info[chn]; + struct channel_info *info; + + if (voice < 0 || voice >= devc->nr_voice) + return; + + if (chn < 0 || chn > 15) + return; + + info = &synth_devs[dev]->chn_info[chn]; opl3_set_instr(dev, voice, info->pgm_num); diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c index b9d2f20..5439d66 100644 --- a/sound/pci/au88x0/au88x0_pcm.c +++ b/sound/pci/au88x0/au88x0_pcm.c @@ -42,11 +42,7 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = { .rate_min = 5000, .rate_max = 48000, .channels_min = 1, -#ifdef CHIP_AU8830 - .channels_max = 4, -#else .channels_max = 2, -#endif .buffer_bytes_max = 0x10000, .period_bytes_min = 0x1, .period_bytes_max = 0x1000, @@ -115,6 +111,17 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_wt = { .periods_max = 64, }; #endif +#ifdef CHIP_AU8830 +static unsigned int au8830_channels[3] = { + 1, 2, 4, +}; + +static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = { + .count = ARRAY_SIZE(au8830_channels), + .list = au8830_channels, + .mask = 0, +}; +#endif /* open callback */ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream) { @@ -156,6 +163,15 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream) if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB || VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S) runtime->hw = snd_vortex_playback_hw_adb; +#ifdef CHIP_AU8830 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && + VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) { + runtime->hw.channels_max = 4; + snd_pcm_hw_constraint_list(runtime, 0, + SNDRV_PCM_HW_PARAM_CHANNELS, + &hw_constraints_au8830_channels); + } +#endif substream->runtime->private_data = NULL; } #ifndef CHIP_AU8810 diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index 1bff80c..b932154 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -869,7 +869,7 @@ spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm) mutex_lock(&atc->atc_mutex); dao->ops->get_spos(dao, &status); if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) { - status &= ((~IEC958_AES3_CON_FS) << 24); + status &= ~(IEC958_AES3_CON_FS << 24); status |= (iec958_con_fs << 24); dao->ops->set_spos(dao, status); dao->ops->commit_write(dao); diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c index af56eb9..47d9ea9 100644 --- a/sound/pci/ctxfi/ctdaio.c +++ b/sound/pci/ctxfi/ctdaio.c @@ -176,6 +176,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input) if (!entry) return -ENOMEM; + dao->ops->clear_left_input(dao); /* Program master and conjugate resources */ input->ops->master(input); daio->rscl.ops->master(&daio->rscl); @@ -204,6 +205,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input) if (!entry) return -ENOMEM; + dao->ops->clear_right_input(dao); /* Program master and conjugate resources */ input->ops->master(input); daio->rscr.ops->master(&daio->rscr); diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c index 15c1e72..c3519ff 100644 --- a/sound/pci/ctxfi/ctmixer.c +++ b/sound/pci/ctxfi/ctmixer.c @@ -566,19 +566,6 @@ static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, return 0; } -static int ct_spdif_default_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF; - - ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; - ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; - ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; - ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; - - return 0; -} - static int ct_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -586,6 +573,10 @@ static int ct_spdif_get(struct snd_kcontrol *kcontrol, unsigned int status; atc->spdif_out_get_status(atc, &status); + + if (status == 0) + status = SNDRV_PCM_DEFAULT_CON_SPDIF; + ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; @@ -629,7 +620,7 @@ static struct snd_kcontrol_new iec958_default_ctl = { .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .count = 1, .info = ct_spdif_info, - .get = ct_spdif_default_get, + .get = ct_spdif_get, .put = ct_spdif_put, .private_value = MIXER_IEC958_DEFAULT }; diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index d8da18a..f585200 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -381,7 +381,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a) snd_print_pcm_rates(a->rates, buf, sizeof(buf)); if (a->format == AUDIO_CODING_TYPE_LPCM) - snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8)); + snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8); else if (a->max_bitrate) snprintf(buf2, sizeof(buf2), ", max bitrate = %d", a->max_bitrate); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c4c9217..824e220 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2300,6 +2300,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index aeea1cd..ffba0a1 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -391,10 +391,16 @@ static int conexant_add_jack(struct hda_codec *codec, struct conexant_spec *spec; struct conexant_jack *jack; const char *name; - int err; + int i, err; spec = codec->spec; snd_array_init(&spec->jacks, sizeof(*jack), 32); + + jack = spec->jacks.list; + for (i = 0; i < spec->jacks.used; i++, jack++) + if (jack->nid == nid) + return 0 ; /* already present */ + jack = snd_array_new(&spec->jacks); name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b6b59a5..2f06425 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1161,7 +1161,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) case 0x10ec0883: case 0x10ec0885: case 0x10ec0887: - case 0x10ec0889: + /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ alc889_coef_init(codec); break; case 0x10ec0888: diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 364848e..3532d2c 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -737,7 +737,7 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); const struct hda_input_mux *imux = spec->input_mux; - unsigned int idx, prev_idx; + unsigned int idx, prev_idx, didx; idx = ucontrol->value.enumerated.item[0]; if (idx >= imux->num_items) @@ -749,7 +749,8 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[idx].index); - if (prev_idx >= spec->num_analog_muxes) { + if (prev_idx >= spec->num_analog_muxes && + spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { imux = spec->dinput_mux; /* 0 = analog */ snd_hda_codec_write_cache(codec, @@ -759,9 +760,13 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e } } else { imux = spec->dinput_mux; + /* first dimux item is hardcoded to select analog imux, + * so lets skip it + */ + didx = idx - spec->num_analog_muxes + 1; snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, - imux->items[idx - 1].index); + imux->items[didx].index); } spec->cur_mux[adc_idx] = idx; return 1; diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c index 523b7fc..b69cd4b 100644 --- a/sound/soc/blackfin/bf5xx-ac97.c +++ b/sound/soc/blackfin/bf5xx-ac97.c @@ -261,9 +261,9 @@ static int bf5xx_ac97_suspend(struct snd_soc_dai *dai) pr_debug("%s : sport %d\n", __func__, dai->id); if (!dai->active) return 0; - if (dai->capture.active) + if (dai->capture_active) sport_rx_stop(sport); - if (dai->playback.active) + if (dai->playback_active) sport_tx_stop(sport); return 0; } diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c index c018772..bdbfac7 100644 --- a/sound/soc/codecs/wm8990.c +++ b/sound/soc/codecs/wm8990.c @@ -1185,7 +1185,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, WM8990_VMIDTOG); /* Delay to allow output caps to discharge */ - msleep(msecs_to_jiffies(300)); + msleep(300); /* Disable VMIDTOG */ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | @@ -1197,17 +1197,17 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, /* Enable outputs */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00); - msleep(msecs_to_jiffies(50)); + msleep(50); /* Enable VMID at 2x50k */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02); - msleep(msecs_to_jiffies(100)); + msleep(100); /* Enable VREF */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03); - msleep(msecs_to_jiffies(600)); + msleep(600); /* Enable BUFIOEN */ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | @@ -1252,7 +1252,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, /* Disable VMID */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01); - msleep(msecs_to_jiffies(300)); + msleep(300); /* Enable all output discharge bits */ snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE | diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 4328cad..a184e91 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -640,7 +640,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) } dev->pcm->private_data = dev; - strcpy(dev->pcm->name, dev->product_name); + strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c7..a1a4708 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c @@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) if (ret < 0) return ret; - strcpy(rmidi->name, device->product_name); + strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = device; diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index 6ef68e4..084e6fc 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c @@ -273,29 +273,26 @@ static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { struct us122l *us122l = hw->private_data; - struct usb_stream *s = us122l->sk.s; unsigned *polled; unsigned int mask; poll_wait(file, &us122l->sk.sleep, wait); - switch (s->state) { - case usb_stream_ready: - if (us122l->first == file) - polled = &s->periods_polled; - else - polled = &us122l->second_periods_polled; - if (*polled != s->periods_done) { - *polled = s->periods_done; - mask = POLLIN | POLLOUT | POLLWRNORM; - break; + mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; + if (mutex_trylock(&us122l->mutex)) { + struct usb_stream *s = us122l->sk.s; + if (s && s->state == usb_stream_ready) { + if (us122l->first == file) + polled = &s->periods_polled; + else + polled = &us122l->second_periods_polled; + if (*polled != s->periods_done) { + *polled = s->periods_done; + mask = POLLIN | POLLOUT | POLLWRNORM; + } else + mask = 0; } - /* Fall through */ - mask = 0; - break; - default: - mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; - break; + mutex_unlock(&us122l->mutex); } return mask; } @@ -381,6 +378,7 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, { struct usb_stream_config *cfg; struct us122l *us122l = hw->private_data; + struct usb_stream *s; unsigned min_period_frames; int err = 0; bool high_speed; @@ -426,18 +424,18 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, snd_power_wait(hw->card, SNDRV_CTL_POWER_D0); mutex_lock(&us122l->mutex); + s = us122l->sk.s; if (!us122l->master) us122l->master = file; else if (us122l->master != file) { - if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) { + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) { err = -EIO; goto unlock; } us122l->slave = file; } - if (!us122l->sk.s || - memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) || - us122l->sk.s->state == usb_stream_xrun) { + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) || + s->state == usb_stream_xrun) { us122l_stop(us122l); if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames)) err = -EIO; @@ -448,6 +446,7 @@ unlock: mutex_unlock(&us122l->mutex); free: kfree(cfg); + wake_up_all(&us122l->sk.sleep); return err; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index c422cd6..af094a2 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -134,6 +134,7 @@ void perf_session__delete(struct perf_session *self) void perf_session__remove_thread(struct perf_session *self, struct thread *th) { + self->last_match = NULL; rb_erase(&th->rb_node, &self->threads); /* * We may have references to this thread, for instance in some hist_entry