diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices index e894902..ed1779c 100644 --- a/Documentation/i2c/instantiating-devices +++ b/Documentation/i2c/instantiating-devices @@ -100,7 +100,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) (...) i2c_adap = i2c_get_adapter(2); memset(&i2c_info, 0, sizeof(struct i2c_board_info)); - strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); + strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, normal_i2c); i2c_put_adapter(i2c_adap); diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2b2407d..713c589 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -917,6 +917,7 @@ and is between 256 and 4096 characters. It is defined in the file controller i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX controllers + i8042.notimeout [HW] Ignore timeout condition signalled by conroller i8042.panicblink= [HW] Frequency with which keyboard LEDs should blink when kernel panics (default is 0.5 sec) @@ -2671,6 +2672,10 @@ and is between 256 and 4096 characters. It is defined in the file disables clocksource verification at runtime. Used to enable high-resolution timer mode on older hardware, and in virtualized environment. + [x86] noirqtime: Do not use TSC to do irq accounting. + Used to run time disable IRQ_TIME_ACCOUNTING on any + platforms where RDTSC is slow and this accounting + can add overhead. turbografx.map[2|3]= [HW,JOY] TurboGraFX parallel port interface diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index e67211f..c537834 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt @@ -57,7 +57,7 @@ smallest image possible. In particular, if "0" is written to this file, the suspend image will be as small as possible. Reading from this file will display the current image size limit, which -is set to 500 MB by default. +is set to 2/5 of available RAM by default. /sys/power/pm_trace controls the code which saves the last PM event point in the RTC across reboots, so that you can debug a machine that just hangs diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 55b859b..506d9d9 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -336,8 +336,8 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: zero) bool pm_runtime_suspended(struct device *dev); - - return true if the device's runtime PM status is 'suspended', or false - otherwise + - return true if the device's runtime PM status is 'suspended' and its + 'power.disable_depth' field is equal to zero, or false otherwise void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index 1d38b0d..50faa7e 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -282,12 +282,14 @@ Conexant 5051 hp HP Spartan laptop hp-dv6736 HP dv6736 hp-f700 HP Compaq Presario F700 + ideapad Lenovo IdeaPad laptop lenovo-x200 Lenovo X200 laptop toshiba Toshiba Satellite M300 Conexant 5066 ============= laptop Basic Laptop config (default) + hp-laptop HP laptops, e g G60 dell-laptop Dell laptops olpc-xo-1_5 OLPC XO 1.5 ideapad Lenovo IdeaPad U150 diff --git a/MAINTAINERS b/MAINTAINERS index 02f75fc..696e7d2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5442,7 +5442,6 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman -M: Chris Wright L: stable@kernel.org S: Maintained diff --git a/Makefile b/Makefile index 141da26..5ade818 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 35 -EXTRAVERSION = -NAME = Sheep on Meth +EXTRAVERSION = .13 +NAME = Yokohama # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c index 52a79df..5c905aa 100644 --- a/arch/alpha/kernel/err_marvel.c +++ b/arch/alpha/kernel/err_marvel.c @@ -109,7 +109,7 @@ marvel_print_err_cyc(u64 err_cyc) #define IO7__ERR_CYC__CYCLE__M (0x7) printk("%s Packet In Error: %s\n" - "%s Error in %s, cycle %ld%s%s\n", + "%s Error in %s, cycle %lld%s%s\n", err_print_prefix, packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)], err_print_prefix, @@ -313,7 +313,7 @@ marvel_print_po7_ugbge_sym(u64 ugbge_sym) } printk("%s Up Hose Garbage Symptom:\n" - "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n", + "%s Source Port: %lld - Dest PID: %lld - OpCode: %s\n", err_print_prefix, err_print_prefix, EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT), @@ -552,7 +552,7 @@ marvel_print_pox_spl_cmplt(u64 spl_cmplt) #define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff) printk("%s Split Completion Error:\n" - "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n", + "%s Source (Bus:Dev:Func): %lld:%lld:%lld\n", err_print_prefix, err_print_prefix, EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS), diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h index 3d2627e..d3e52d3 100644 --- a/arch/alpha/kernel/proto.h +++ b/arch/alpha/kernel/proto.h @@ -156,9 +156,6 @@ extern void SMC669_Init(int); /* es1888.c */ extern void es1888_init(void); -/* ns87312.c */ -extern void ns87312_enable_ide(long ide_base); - /* ../lib/fpreg.c */ extern void alpha_write_fp_reg (unsigned long reg, unsigned long val); extern unsigned long alpha_read_fp_reg (unsigned long reg); diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index affd0f3..14c8898 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c @@ -33,7 +33,7 @@ #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" - +#include "pc873xx.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask = ~0UL; @@ -236,17 +236,30 @@ cabriolet_map_irq(struct pci_dev *dev, u8 slot, u8 pin) } static inline void __init +cabriolet_enable_ide(void) +{ + if (pc873xx_probe() == -1) { + printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); + } else { + printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", + pc873xx_get_model(), pc873xx_get_base()); + + pc873xx_enable_ide(); + } +} + +static inline void __init cabriolet_init_pci(void) { common_init_pci(); - ns87312_enable_ide(0x398); + cabriolet_enable_ide(); } static inline void __init cia_cab_init_pci(void) { cia_init_pci(); - ns87312_enable_ide(0x398); + cabriolet_enable_ide(); } /* diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index 2304648..4da596b 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c @@ -29,7 +29,7 @@ #include "irq_impl.h" #include "pci_impl.h" #include "machvec_impl.h" - +#include "pc873xx.h" /* Note mask bit is true for DISABLED irqs. */ static unsigned long cached_irq_mask[2] = { -1, -1 }; @@ -264,7 +264,14 @@ takara_init_pci(void) alpha_mv.pci_map_irq = takara_map_irq_srm; cia_init_pci(); - ns87312_enable_ide(0x26e); + + if (pc873xx_probe() == -1) { + printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n"); + } else { + printk(KERN_INFO "Found %s Super IO chip at 0x%x\n", + pc873xx_get_model(), pc873xx_get_base()); + pc873xx_enable_ide(); + } } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 98922f7..831acd06 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -273,7 +273,6 @@ config ARCH_AT91 bool "Atmel AT91" select ARCH_REQUIRE_GPIOLIB select HAVE_CLK - select ARCH_USES_GETTIMEOFFSET help This enables support for systems based on the Atmel AT91RM9200, AT91SAM9 and AT91CAP9 processors. @@ -1027,6 +1026,18 @@ config PL310_ERRATA_588369 is not correctly implemented in PL310 as clean lines are not invalidated as a result of these operations. Note that this errata uses Texas Instrument's secure monitor api. + +config ARM_ERRATA_720789 + bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" + depends on CPU_V7 && SMP + help + This option enables the workaround for the 720789 Cortex-A9 (prior to + r2p0) erratum. A faulty ASID can be sent to the other CPUs for the + broadcasted CP15 TLB maintenance operations TLBIASIDIS and TLBIMVAIS. + As a consequence of this erratum, some TLB entries which should be + invalidated are not, resulting in an incoherency in the system page + tables. The workaround changes the TLB flushing routines to invalidate + entries regardless of the ASID. endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 6e8f05c..d757555 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -215,7 +215,7 @@ @ Slightly optimised to avoid incrementing the pointer twice usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort .if \rept == 2 - usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort + usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort .endif add\cond \ptr, #\rept * \inc diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 9dcb11e..bf62c44 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -158,15 +158,24 @@ struct pt_regs { */ static inline int valid_user_regs(struct pt_regs *regs) { - if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) { - regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); - return 1; + unsigned long mode = regs->ARM_cpsr & MODE_MASK; + + /* + * Always clear the F (FIQ) and A (delayed abort) bits + */ + regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); + + if ((regs->ARM_cpsr & PSR_I_BIT) == 0) { + if (mode == USR_MODE) + return 1; + if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE) + return 1; } /* * Force CPSR to something logical... */ - regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT; + regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; if (!(elf_hwcap & HWCAP_26BIT)) regs->ARM_cpsr |= USR_MODE; diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index bd863d8..33b546a 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h @@ -378,7 +378,11 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) if (tlb_flag(TLB_V6_I_ASID)) asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); if (tlb_flag(TLB_V7_UIS_ASID)) +#ifdef CONFIG_ARM_ERRATA_720789 + asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); +#else asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); +#endif if (tlb_flag(TLB_BTB)) { /* flush the branch target cache */ @@ -424,7 +428,11 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) if (tlb_flag(TLB_V6_I_PAGE)) asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); if (tlb_flag(TLB_V7_UIS_PAGE)) +#ifdef CONFIG_ARM_ERRATA_720789 + asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); +#else asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); +#endif if (tlb_flag(TLB_BTB)) { /* flush the branch target cache */ diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 2c1db77..a6c66f5 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -382,11 +382,13 @@ ENDPROC(sys_clone_wrapper) sys_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_sigreturn ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a6..abaf844 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, unsigned long handler = (unsigned long)ka->sa.sa_handler; unsigned long retcode; int thumb = 0; - unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; + unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); + + cpsr |= PSR_ENDSTATE; /* * Maybe we need to deliver a 32-bit signal to a 26-bit task. diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 1e4cbd4..64f6bc1 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S @@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be) */ .L_found: #if __LINUX_ARM_ARCH__ >= 5 - rsb r1, r3, #0 - and r3, r3, r1 + rsb r0, r3, #0 + and r3, r3, r0 clz r3, r3 rsb r3, r3, #31 add r0, r2, r3 @@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be) addeq r2, r2, #1 mov r0, r2 #endif + cmp r1, r0 @ Clamp to maxbit + movlo r0, r1 mov pc, lr diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 809114d..501ac6f 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c @@ -46,7 +46,7 @@ static struct resource hdmac_resources[] = { .end = AT91_BASE_SYS + AT91_DMA + SZ_512 - 1, .flags = IORESOURCE_MEM, }, - [2] = { + [1] = { .start = AT91SAM9G45_ID_DMA, .end = AT91SAM9G45_ID_DMA, .flags = IORESOURCE_IRQ, diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h index 57f8ee1..27ac6f5 100644 --- a/arch/arm/mach-at91/include/mach/at91_mci.h +++ b/arch/arm/mach-at91/include/mach/at91_mci.h @@ -74,6 +74,8 @@ #define AT91_MCI_TRTYP_BLOCK (0 << 19) #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) #define AT91_MCI_TRTYP_STREAM (2 << 19) +#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) +#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) #define AT91_MCI_BLKR 0x18 /* Block Register */ #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ diff --git a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S index d16ce7e..9b50442 100644 --- a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S +++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S @@ -10,7 +10,7 @@ * published by the Free Software Foundation. */ - .macro addruart,rx + .macro addruart,rx,rtmp mrc p15, 0, \rx, c1, c0 tst \rx, #1 @ MMU enabled? moveq \rx, #0x10000000 diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index 827cbc4..ea9ee4e 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c @@ -100,6 +100,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) static struct platform_nand_data ixdp425_flash_nand_data = { .chip = { + .nr_chips = 1, .chip_delay = 30, .options = NAND_NO_AUTOINCR, #ifdef CONFIG_MTD_PARTITIONS diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c index e5b5b83..1f9363f 100644 --- a/arch/arm/mach-mx3/mach-qong.c +++ b/arch/arm/mach-mx3/mach-qong.c @@ -169,6 +169,7 @@ static void qong_nand_select_chip(struct mtd_info *mtd, int chip) static struct platform_nand_data qong_nand_data = { .chip = { + .nr_chips = 1, .chip_delay = 20, .options = 0, }, diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c index 5041d1b..696b1a9 100644 --- a/arch/arm/mach-orion5x/ts78xx-setup.c +++ b/arch/arm/mach-orion5x/ts78xx-setup.c @@ -216,6 +216,7 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = { static struct platform_nand_data ts78xx_ts_nand_data = { .chip = { + .nr_chips = 1, .part_probe_types = ts_nand_part_probes, .partitions = ts78xx_ts_nand_parts, .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index fdda6be..d717b49 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -745,9 +745,10 @@ static void __init cm_x300_init(void) { cm_x300_init_mfp(); - pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); + if (cpu_is_pxa300()) + pxa_set_ffuart_info(NULL); cm_x300_init_da9030(); cm_x300_init_dm9000(); diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 6353459..d6dd6f6 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c @@ -225,7 +225,7 @@ static void ct_ca9x4_init(void) int i; #ifdef CONFIG_CACHE_L2X0 - l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00000000, 0xfe0fffff); + l2x0_init(MMIO_P2V(CT_CA9X4_L2CC), 0x00400000, 0xfe0fffff); #endif clkdev_add_table(lookups, ARRAY_SIZE(lookups)); diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 86aa689..47010d8 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -196,6 +196,10 @@ ENTRY(v6_flush_kern_dcache_area) * - end - virtual end address of region */ v6_dma_inv_range: +#ifdef CONFIG_DMA_CACHE_RWFO + ldrb r2, [r0] @ read for ownership + strb r2, [r0] @ write for ownership +#endif tst r0, #D_CACHE_LINE_SIZE - 1 bic r0, r0, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE @@ -204,6 +208,10 @@ v6_dma_inv_range: mcrne p15, 0, r0, c7, c11, 1 @ clean unified line #endif tst r1, #D_CACHE_LINE_SIZE - 1 +#ifdef CONFIG_DMA_CACHE_RWFO + ldrneb r2, [r1, #-1] @ read for ownership + strneb r2, [r1, #-1] @ write for ownership +#endif bic r1, r1, #D_CACHE_LINE_SIZE - 1 #ifdef HARVARD_CACHE mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line @@ -211,10 +219,6 @@ v6_dma_inv_range: mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line #endif 1: -#ifdef CONFIG_DMA_CACHE_RWFO - ldr r2, [r0] @ read for ownership - str r2, [r0] @ write for ownership -#endif #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c6, 1 @ invalidate D line #else @@ -222,6 +226,10 @@ v6_dma_inv_range: #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 +#ifdef CONFIG_DMA_CACHE_RWFO + ldrlo r2, [r0] @ read for ownership + strlo r2, [r0] @ write for ownership +#endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer @@ -256,12 +264,12 @@ v6_dma_clean_range: * - end - virtual end address of region */ ENTRY(v6_dma_flush_range) - bic r0, r0, #D_CACHE_LINE_SIZE - 1 -1: #ifdef CONFIG_DMA_CACHE_RWFO - ldr r2, [r0] @ read for ownership - str r2, [r0] @ write for ownership + ldrb r2, [r0] @ read for ownership + strb r2, [r0] @ write for ownership #endif + bic r0, r0, #D_CACHE_LINE_SIZE - 1 +1: #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line #else @@ -269,6 +277,10 @@ ENTRY(v6_dma_flush_range) #endif add r0, r0, #D_CACHE_LINE_SIZE cmp r0, r1 +#ifdef CONFIG_DMA_CACHE_RWFO + ldrlob r2, [r0] @ read for ownership + strlob r2, [r0] @ write for ownership +#endif blo 1b mov r0, #0 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906de..56036ff 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -65,6 +65,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, return ret; } +#if USE_SPLIT_PTLOCKS +/* + * If we are using split PTE locks, then we need to take the page + * lock here. Otherwise we are using shared mm->page_table_lock + * which is already locked, thus cannot take it. + */ +static inline void do_pte_lock(spinlock_t *ptl) +{ + /* + * Use nested version here to indicate that we are already + * holding one similar spinlock. + */ + spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); +} + +static inline void do_pte_unlock(spinlock_t *ptl) +{ + spin_unlock(ptl); +} +#else /* !USE_SPLIT_PTLOCKS */ +static inline void do_pte_lock(spinlock_t *ptl) {} +static inline void do_pte_unlock(spinlock_t *ptl) {} +#endif /* USE_SPLIT_PTLOCKS */ + static int adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) { @@ -89,11 +113,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, */ ptl = pte_lockptr(vma->vm_mm, pmd); pte = pte_offset_map_nested(pmd, address); - spin_lock(ptl); + do_pte_lock(ptl); ret = do_adjust_pte(vma, address, pfn, pte); - spin_unlock(ptl); + do_pte_unlock(ptl); pte_unmap_nested(pte); return ret; diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index 0691176..72e09eb 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -102,6 +102,7 @@ static int op_create_counter(int cpu, int event) if (IS_ERR(pevent)) { ret = PTR_ERR(pevent); } else if (pevent->state != PERF_EVENT_STATE_ACTIVE) { + perf_event_release_kernel(pevent); pr_warning("oprofile: failed to enable event %d " "on CPU %d\n", event, cpu); ret = -EBUSY; @@ -365,6 +366,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ret = init_driverfs(); if (ret) { kfree(counter_config); + counter_config = NULL; return ret; } @@ -402,7 +404,6 @@ void oprofile_arch_exit(void) struct perf_event *event; if (*perf_events) { - exit_driverfs(); for_each_possible_cpu(cpu) { for (id = 0; id < perf_num_counters; ++id) { event = perf_events[cpu][id]; @@ -413,8 +414,10 @@ void oprofile_arch_exit(void) } } - if (counter_config) + if (counter_config) { kfree(counter_config); + exit_driverfs(); + } } #else int __init oprofile_arch_init(struct oprofile_operations *ops) diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c index 71437c6..9ebbd31 100644 --- a/arch/arm/plat-mxc/gpio.c +++ b/arch/arm/plat-mxc/gpio.c @@ -214,13 +214,16 @@ static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset, struct mxc_gpio_port *port = container_of(chip, struct mxc_gpio_port, chip); u32 l; + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); l = __raw_readl(port->base + GPIO_GDIR); if (dir) l |= 1 << offset; else l &= ~(1 << offset); __raw_writel(l, port->base + GPIO_GDIR); + spin_unlock_irqrestore(&port->lock, flags); } static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value) @@ -229,9 +232,12 @@ static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value) container_of(chip, struct mxc_gpio_port, chip); void __iomem *reg = port->base + GPIO_DR; u32 l; + unsigned long flags; + spin_lock_irqsave(&port->lock, flags); l = (__raw_readl(reg) & (~(1 << offset))) | (value << offset); __raw_writel(l, reg); + spin_unlock_irqrestore(&port->lock, flags); } static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset) @@ -285,6 +291,8 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt) port[i].chip.base = i * 32; port[i].chip.ngpio = 32; + spin_lock_init(&port[i].lock); + /* its a serious configuration bug when it fails */ BUG_ON( gpiochip_add(&port[i].chip) < 0 ); diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h index 894d2f8..7a0dc5a 100644 --- a/arch/arm/plat-mxc/include/mach/gpio.h +++ b/arch/arm/plat-mxc/include/mach/gpio.h @@ -19,6 +19,7 @@ #ifndef __ASM_ARCH_MXC_GPIO_H__ #define __ASM_ARCH_MXC_GPIO_H__ +#include #include #include @@ -36,6 +37,7 @@ struct mxc_gpio_port { int virtual_irq_start; struct gpio_chip chip; u32 both_edges; + spinlock_t lock; }; int mxc_gpio_init(struct mxc_gpio_port*, int); diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index f7f571e..087e81a 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -993,11 +994,17 @@ void omap_start_dma(int lch) l = dma_read(CCR(lch)); /* - * Errata: On ES2.0 BUFFERING disable must be set. - * This will always fail on ES1.0 + * Errata: Inter Frame DMA buffering issue (All OMAP2420 and + * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and + * bursting is enabled. This might result in data gets stalled in + * FIFO at the end of the block. + * Workaround: DMA channels must have BUFFERING_DISABLED bit set to + * guarantee no data will stay in the DMA FIFO in case inter frame + * buffering occurs. */ - if (cpu_is_omap24xx()) - l |= OMAP_DMA_CCR_EN; + if (cpu_is_omap2420() || + (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0))) + l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); @@ -1015,8 +1022,39 @@ void omap_stop_dma(int lch) dma_write(0, CICR(lch)); l = dma_read(CCR(lch)); - l &= ~OMAP_DMA_CCR_EN; - dma_write(l, CCR(lch)); + /* OMAP3 Errata i541: sDMA FIFO draining does not finish */ + if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { + int i = 0; + u32 sys_cf; + + /* Configure No-Standby */ + l = dma_read(OCP_SYSCONFIG); + sys_cf = l; + l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; + l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); + dma_write(l , OCP_SYSCONFIG); + + l = dma_read(CCR(lch)); + l &= ~OMAP_DMA_CCR_EN; + dma_write(l, CCR(lch)); + + /* Wait for sDMA FIFO drain */ + l = dma_read(CCR(lch)); + while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | + OMAP_DMA_CCR_WR_ACTIVE))) { + udelay(5); + i++; + l = dma_read(CCR(lch)); + } + if (i >= 100) + printk(KERN_ERR "DMA drain did not complete on " + "lch %d\n", lch); + /* Restore OCP_SYSCONFIG */ + dma_write(sys_cf, OCP_SYSCONFIG); + } else { + l &= ~OMAP_DMA_CCR_EN; + dma_write(l, CCR(lch)); + } if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h index 02232ca..6b78986 100644 --- a/arch/arm/plat-omap/include/plat/dma.h +++ b/arch/arm/plat-omap/include/plat/dma.h @@ -335,6 +335,10 @@ #define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11) #define OMAP_DMA_CCR_EN (1 << 7) +#define OMAP_DMA_CCR_RD_ACTIVE (1 << 9) +#define OMAP_DMA_CCR_WR_ACTIVE (1 << 10) +#define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24) +#define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25) #define OMAP_DMA_DATA_TYPE_S8 0x00 #define OMAP_DMA_DATA_TYPE_S16 0x01 diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c index 9eaf5b0..68a27bc 100644 --- a/arch/blackfin/mach-bf537/boards/stamp.c +++ b/arch/blackfin/mach-bf537/boards/stamp.c @@ -400,6 +400,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd) static struct platform_nand_data bfin_plat_nand_data = { .chip = { + .nr_chips = 1, .chip_delay = 30, #ifdef CONFIG_MTD_PARTITIONS .part_probe_types = part_probes, diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c index bfcfa86..35b6d12 100644 --- a/arch/blackfin/mach-bf561/boards/acvilon.c +++ b/arch/blackfin/mach-bf561/boards/acvilon.c @@ -284,6 +284,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd) static struct platform_nand_data bfin_plat_nand_data = { .chip = { + .nr_chips = 1, .chip_delay = 30, #ifdef CONFIG_MTD_PARTITIONS .part_probe_types = part_probes, diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h index f90edc8..9301a28 100644 --- a/arch/ia64/include/asm/compat.h +++ b/arch/ia64/include/asm/compat.h @@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr) } static __inline__ void __user * -compat_alloc_user_space (long len) +arch_compat_alloc_user_space (long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h index 9f342a5..dd028f2 100644 --- a/arch/ia64/include/asm/system.h +++ b/arch/ia64/include/asm/system.h @@ -272,10 +272,6 @@ void cpu_idle_wait(void); void default_idle(void); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void account_system_vtime(struct task_struct *); -#endif - #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 3567d54..331d42b 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -420,22 +420,31 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set ;; RSM_PSR_I(p0, r18, r19) // mask interrupt delivery - mov ar.ccv=0 andcm r14=r14,r17 // filter out SIGKILL & SIGSTOP + mov r8=EINVAL // default to EINVAL #ifdef CONFIG_SMP - mov r17=1 + // __ticket_spin_trylock(r31) + ld4 r17=[r31] ;; - cmpxchg4.acq r18=[r31],r17,ar.ccv // try to acquire the lock - mov r8=EINVAL // default to EINVAL + mov.m ar.ccv=r17 + extr.u r9=r17,17,15 + adds r19=1,r17 + extr.u r18=r17,0,15 + ;; + cmp.eq p6,p7=r9,r18 ;; +(p6) cmpxchg4.acq r9=[r31],r19,ar.ccv +(p6) dep.z r20=r19,1,15 // next serving ticket for unlock +(p7) br.cond.spnt.many .lock_contention + ;; + cmp4.eq p0,p7=r9,r17 + adds r31=2,r31 +(p7) br.cond.spnt.many .lock_contention ld8 r3=[r2] // re-read current->blocked now that we hold the lock - cmp4.ne p6,p0=r18,r0 -(p6) br.cond.spnt.many .lock_contention ;; #else ld8 r3=[r2] // re-read current->blocked now that we hold the lock - mov r8=EINVAL // default to EINVAL #endif add r18=IA64_TASK_PENDING_OFFSET+IA64_SIGPENDING_SIGNAL_OFFSET,r16 add r19=IA64_TASK_SIGNAL_OFFSET,r16 @@ -490,7 +499,9 @@ EX(.fail_efault, ld8 r14=[r33]) // r14 <- *set (p6) br.cond.spnt.few 1b // yes -> retry #ifdef CONFIG_SMP - st4.rel [r31]=r0 // release the lock + // __ticket_spin_unlock(r31) + st2.rel [r31]=r20 + mov r20=0 // i must not leak kernel bits... #endif SSM_PSR_I(p0, p9, r31) ;; @@ -512,7 +523,8 @@ EX(.fail_efault, (p15) st8 [r34]=r3) .sig_pending: #ifdef CONFIG_SMP - st4.rel [r31]=r0 // release the lock + // __ticket_spin_unlock(r31) + st2.rel [r31]=r20 // release the lock #endif SSM_PSR_I(p0, p9, r17) ;; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index a0220dc..6871c1b 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1859,7 +1859,8 @@ ia64_mca_cpu_init(void *cpu_data) data = mca_bootmem(); first_time = 0; } else - data = __get_free_pages(GFP_KERNEL, get_order(sz)); + data = (void *)__get_free_pages(GFP_KERNEL, + get_order(sz)); if (!data) panic("Could not allocate MCA memory for cpu %d\n", cpu); diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c index 6c89228..4a746ea 100644 --- a/arch/ia64/kernel/msi_ia64.c +++ b/arch/ia64/kernel/msi_ia64.c @@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq, if (irq_prepare_move(irq, cpu)) return -1; - read_msi_msg(irq, &msg); + get_cached_msi_msg(irq, &msg); addr = msg.address_lo; addr &= MSI_ADDR_DEST_ID_MASK; diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c index ebfdd6a..0c72dd4 100644 --- a/arch/ia64/sn/kernel/msi_sn.c +++ b/arch/ia64/sn/kernel/msi_sn.c @@ -175,7 +175,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq, * Release XIO resources for the old MSI PCI address */ - read_msi_msg(irq, &msg); + get_cached_msi_msg(irq, &msg); sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; pdev = sn_pdev->pdi_linux_pcidev; provider = SN_PCIDEV_BUSPROVIDER(pdev); diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index 4d4536e..9c271be 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c @@ -509,7 +509,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) * use the GART mapped mode. */ static u64 -tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags) +tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags) { u64 mapaddr; diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 72f6e85..45e6afc 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile @@ -72,12 +72,16 @@ export MMU DTB all: linux.bin -BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.% +# With make 3.82 we cannot mix normal and wildcard targets +BOOT_TARGETS1 = linux.bin linux.bin.gz +BOOT_TARGETS2 = simpleImage.% archclean: $(Q)$(MAKE) $(clean)=$(boot) -$(BOOT_TARGETS): vmlinux +$(BOOT_TARGETS1): vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ +$(BOOT_TARGETS2): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ define archhelp diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42a..956f946 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c @@ -28,6 +28,8 @@ #include #include +#include + static struct gpio_keys_button mtx1_gpio_button[] = { { .gpio = 207, @@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { &mtx1_mtd, }; +static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { + .phy_search_highest_addr = 1, + .phy1_search_mac0 = 1, +}; + static int __init mtx1_register_devices(void) { int rc; + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); + rc = gpio_request(mtx1_gpio_button[0].gpio, mtx1_gpio_button[0].desc); if (rc < 0) { diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h index 613f691..dbc5106 100644 --- a/arch/mips/include/asm/compat.h +++ b/arch/mips/include/asm/compat.h @@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = (struct pt_regs *) ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h index 02b77ba..efa0b60 100644 --- a/arch/parisc/include/asm/compat.h +++ b/arch/parisc/include/asm/compat.h @@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static __inline__ void __user *compat_alloc_user_space(long len) +static __inline__ void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = ¤t->thread.regs; return (void __user *)regs->gr[30]; diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index 4c247e0..4896ed0 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c @@ -1123,42 +1123,23 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096]; */ int pdc_iodc_print(const unsigned char *str, unsigned count) { - static int posx; /* for simple TAB-Simulation... */ unsigned int i; unsigned long flags; - for (i = 0; i < count && i < 79;) { + for (i = 0; i < count;) { switch(str[i]) { case '\n': iodc_dbuf[i+0] = '\r'; iodc_dbuf[i+1] = '\n'; i += 2; - posx = 0; goto print; - case '\t': - while (posx & 7) { - iodc_dbuf[i] = ' '; - i++, posx++; - } - break; - case '\b': /* BS */ - posx -= 2; default: iodc_dbuf[i] = str[i]; - i++, posx++; + i++; break; } } - /* if we're at the end of line, and not already inserting a newline, - * insert one anyway. iodc console doesn't claim to support >79 char - * lines. don't account for this in the return value. - */ - if (i == 79 && iodc_dbuf[i-1] != '\n') { - iodc_dbuf[i+0] = '\r'; - iodc_dbuf[i+1] = '\n'; - } - print: spin_lock_irqsave(&pdc_lock, flags); real32_call(PAGE0->mem_cons.iodc_io, diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index efbcee5..e13fc3f 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -117,7 +117,7 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest) int cpu_dest; /* timer and ipi have to always be received on all CPUs */ - if (CHECK_IRQ_PER_CPU(irq)) { + if (CHECK_IRQ_PER_CPU(irq_to_desc(irq)->status)) { /* Bad linux design decision. The mask has already * been set; we must reset it */ cpumask_setall(irq_desc[irq].affinity); diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 77cfe7a..b2e3635 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -163,9 +163,11 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ # Default to zImage, override when needed all: zImage -BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.% +# With make 3.82 we cannot mix normal and wildcard targets +BOOT_TARGETS1 := zImage zImage.initrd uImage +BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% -PHONY += $(BOOT_TARGETS) +PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) boot := arch/$(ARCH)/boot @@ -180,10 +182,16 @@ relocs_check: arch/powerpc/relocs_check.pl vmlinux zImage: relocs_check endif -$(BOOT_TARGETS): vmlinux +$(BOOT_TARGETS1): vmlinux + $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) +$(BOOT_TARGETS2): vmlinux + $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) + + +bootwrapper_install: $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) -bootwrapper_install %.dtb: +%.dtb: $(Q)$(MAKE) ARCH=ppc64 $(build)=$(boot) $(patsubst %,$(boot)/%,$@) define archhelp diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 396d21a..a11d4ea 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h @@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = current->thread.regs; unsigned long usp = regs->gpr[1]; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index d62fdf4..1dc338c 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -878,6 +878,7 @@ #define PV_970 0x0039 #define PV_POWER5 0x003A #define PV_POWER5p 0x003B +#define PV_POWER7 0x003F #define PV_970FX 0x003C #define PV_630 0x0040 #define PV_630p 0x0041 diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index a6297c6..880fb57 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -545,10 +545,6 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void account_system_vtime(struct task_struct *); -#endif - extern struct dentry *powerpc_debugfs_root; #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a..f8cd9fb 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include _GLOBAL(__setup_cpu_603) - mflr r4 + mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_604) - mflr r4 + mflr r5 bl setup_common_caches bl setup_604_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750cx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750fx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7400) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7410) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_745x) - mflr r4 + mflr r5 bl setup_common_caches bl setup_745x_specifics - mtlr r4 + mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 844a44b..c571cd3 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -572,15 +572,21 @@ __secondary_start: /* Set thread priority to MEDIUM */ HMT_MEDIUM - /* Do early setup for that CPU (stab, slb, hash table pointer) */ - bl .early_setup_secondary - /* Initialize the kernel stack. Just a repeat for iSeries. */ LOAD_REG_ADDR(r3, current_set) sldi r28,r24,3 /* get current_set[cpu#] */ - ldx r1,r3,r28 - addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD - std r1,PACAKSAVE(r13) + ldx r14,r3,r28 + addi r14,r14,THREAD_SIZE-STACK_FRAME_OVERHEAD + std r14,PACAKSAVE(r13) + + /* Do early setup for that CPU (stab, slb, hash table pointer) */ + bl .early_setup_secondary + + /* + * setup the new stack pointer, but *don't* use this until + * translation is on. + */ + mr r1, r14 /* Clear backchain so we get nice backtraces */ li r7,0 diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index ed31a29..060c6d3 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -199,9 +200,32 @@ static void kexec_prepare_cpus_wait(int wait_state) mb(); } -static void kexec_prepare_cpus(void) +/* + * We need to make sure each present CPU is online. The next kernel will scan + * the device tree and assume primary threads are online and query secondary + * threads via RTAS to online them if required. If we don't online primary + * threads, they will be stuck. However, we also online secondary threads as we + * may be using 'cede offline'. In this case RTAS doesn't see the secondary + * threads as offline -- and again, these CPUs will be stuck. + * + * So, we online all CPUs that should be running, including secondary threads. + */ +static void wake_offline_cpus(void) { + int cpu = 0; + + for_each_present_cpu(cpu) { + if (!cpu_online(cpu)) { + printk(KERN_INFO "kexec: Waking offline cpu %d.\n", + cpu); + cpu_up(cpu); + } + } +} +static void kexec_prepare_cpus(void) +{ + wake_offline_cpus(); smp_call_function(kexec_smp_down, NULL, /* wait */0); local_irq_disable(); mb(); /* make sure IRQs are disabled before we say they are */ diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 5c14ffe..59f2467 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -1230,6 +1230,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1277,7 +1299,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } } diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index babccee..4339d20 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -569,6 +569,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct perf_sample_data data; perf_sample_data_init(&data, 0); + data.period = event->hw.last_period; if (perf_event_overflow(event, nmi, &data, regs)) { /* diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 8eff48e..3fee685 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c @@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event) switch (unit) { case PM_VPU: mask = 0x4c; /* byte 0 bits 2,3,6 */ + break; case PM_LSU0: /* byte 2 bits 0,2,3,4,6; all of byte 1 */ mask = 0x085dff00; + break; case PM_LSU1L: mask = 0x50 << 24; /* byte 3 bits 4,6 */ break; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 0441bbd..b119497 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -616,20 +616,11 @@ void timer_interrupt(struct pt_regs * regs) * some CPUs will continuue to take decrementer exceptions */ set_dec(DECREMENTER_MAX); -#ifdef CONFIG_PPC32 +#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); #endif - now = get_tb_or_rtc(); - if (now < decrementer->next_tb) { - /* not time for this event yet */ - now = decrementer->next_tb - now; - if (now <= DECREMENTER_MAX) - set_dec((int)now); - trace_timer_interrupt_exit(regs); - return; - } old_regs = set_irq_regs(regs); irq_enter(); @@ -645,8 +636,16 @@ void timer_interrupt(struct pt_regs * regs) get_lppaca()->int_dword.fields.decr_int = 0; #endif - if (evt->event_handler) - evt->event_handler(evt); + now = get_tb_or_rtc(); + if (now >= decrementer->next_tb) { + decrementer->next_tb = ~(u64)0; + if (evt->event_handler) + evt->event_handler(evt); + } else { + now = decrementer->next_tb - now; + if (now <= DECREMENTER_MAX) + set_dec((int)now); + } #ifdef CONFIG_PPC_ISERIES if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending()) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6..1eb64ba 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1122,7 +1122,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, else #endif /* CONFIG_PPC_HAS_HASH_64K */ rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, - subpage_protection(pgdir, ea)); + subpage_protection(mm, ea)); /* Dump some info in case of hash insertion failure, they should * never happen so it is really useful to know if/when they do diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 30e1626..494a63c 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c @@ -952,7 +952,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance) if (dsr & DOORBELL_DSR_QFI) { pr_info("RIO: doorbell queue full\n"); out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); - goto out; } /* XXX Need to check/dispatch until queue empty */ diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h index 104f200..a875c2f 100644 --- a/arch/s390/include/asm/compat.h +++ b/arch/s390/include/asm/compat.h @@ -181,7 +181,7 @@ static inline int is_compat_task(void) #endif -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { unsigned long stack; diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 73e2598..f73b50a 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -150,11 +150,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); */ extern unsigned long thread_saved_pc(struct task_struct *t); -/* - * Print register of task into buffer. Used in fs/proc/array.c. - */ -extern void task_show_regs(struct seq_file *m, struct task_struct *task); - extern void show_code(struct pt_regs *regs); unsigned long get_wchan(struct task_struct *p); diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index cef6621..38ddd8a 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -97,7 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs) extern void account_vtime(struct task_struct *, struct task_struct *); extern void account_tick_vtime(struct task_struct *); -extern void account_system_vtime(struct task_struct *); #ifdef CONFIG_PFAULT extern void pfault_irq_init(void); diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index ac15139..1995c17 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck); static int notrace s390_revalidate_registers(struct mci *mci) { int kill_task; - u64 tmpclock; u64 zero; void *fpt_save_area, *fpt_creg_save_area; @@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci) : "0", "cc"); #endif /* Revalidate clock comparator register */ - asm volatile( - " stck 0(%1)\n" - " sckc 0(%1)" - : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); - + if (S390_lowcore.clock_comparator == -1) + set_clock_comparator(S390_lowcore.mcck_clock); + else + set_clock_comparator(S390_lowcore.clock_comparator); /* Check if old PSW is valid */ if (!mci->wp) /* diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 5d8f0f3..657cb17 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs) show_last_breaking_event(regs); } -/* This is called from fs/proc/array.c */ -void task_show_regs(struct seq_file *m, struct task_struct *task) -{ - struct pt_regs *regs; - - regs = task_pt_regs(task); - seq_printf(m, "task: %p, ksp: %p\n", - task, (void *)task->thread.ksp); - seq_printf(m, "User PSW : %p %p\n", - (void *) regs->psw.mask, (void *)regs->psw.addr); - - seq_printf(m, "User GPRS: " FOURLONG, - regs->gprs[0], regs->gprs[1], - regs->gprs[2], regs->gprs[3]); - seq_printf(m, " " FOURLONG, - regs->gprs[4], regs->gprs[5], - regs->gprs[6], regs->gprs[7]); - seq_printf(m, " " FOURLONG, - regs->gprs[8], regs->gprs[9], - regs->gprs[10], regs->gprs[11]); - seq_printf(m, " " FOURLONG, - regs->gprs[12], regs->gprs[13], - regs->gprs[14], regs->gprs[15]); - seq_printf(m, "User ACRS: %08x %08x %08x %08x\n", - task->thread.acrs[0], task->thread.acrs[1], - task->thread.acrs[2], task->thread.acrs[3]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[4], task->thread.acrs[5], - task->thread.acrs[6], task->thread.acrs[7]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[8], task->thread.acrs[9], - task->thread.acrs[10], task->thread.acrs[11]); - seq_printf(m, " %08x %08x %08x %08x\n", - task->thread.acrs[12], task->thread.acrs[13], - task->thread.acrs[14], task->thread.acrs[15]); -} - static DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 3479f1b..c1e326c 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -565,6 +566,23 @@ void init_cpu_vtimer(void) __ctl_set_bit(0,10); } +static int __cpuinit s390_nohz_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + struct s390_idle_data *idle; + long cpu = (long) hcpu; + + idle = &per_cpu(s390_idle, cpu); + switch (action) { + case CPU_DYING: + case CPU_DYING_FROZEN: + idle->nohz_delay = 0; + default: + break; + } + return NOTIFY_OK; +} + void __init vtime_init(void) { /* request the cpu timer external interrupt */ @@ -573,5 +591,6 @@ void __init vtime_init(void) /* Enable cpu timer interrupts on the boot cpu. */ init_cpu_vtimer(); + cpu_notifier(s390_nohz_notify, 0); } diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 752b362..7c37ec3 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs) { unsigned long mask, cr0, cr0_saved; u64 clock_saved; + u64 end; + mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; + end = get_clock() + (usecs << 12); clock_saved = local_tick_disable(); - set_clock_comparator(get_clock() + (usecs << 12)); __ctl_store(cr0_saved, 0, 0); cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; __ctl_load(cr0 , 0, 0); - mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; lockdep_off(); - trace_hardirqs_on(); - __load_psw_mask(mask); - local_irq_disable(); + do { + set_clock_comparator(end); + trace_hardirqs_on(); + __load_psw_mask(mask); + local_irq_disable(); + } while (get_clock() < end); lockdep_on(); __ctl_load(cr0_saved, 0, 0); local_tick_enable(clock_saved); diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index 2050ca0..bdb2ff8 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -20,14 +20,14 @@ #define atomic64_set(v, i) (((v)->counter) = i) extern void atomic_add(int, atomic_t *); -extern void atomic64_add(int, atomic64_t *); +extern void atomic64_add(long, atomic64_t *); extern void atomic_sub(int, atomic_t *); -extern void atomic64_sub(int, atomic64_t *); +extern void atomic64_sub(long, atomic64_t *); extern int atomic_add_ret(int, atomic_t *); -extern int atomic64_add_ret(int, atomic64_t *); +extern long atomic64_add_ret(long, atomic64_t *); extern int atomic_sub_ret(int, atomic_t *); -extern int atomic64_sub_ret(int, atomic64_t *); +extern long atomic64_sub_ret(long, atomic64_t *); #define atomic_dec_return(v) atomic_sub_ret(1, v) #define atomic64_dec_return(v) atomic64_sub_ret(1, v) @@ -91,7 +91,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static inline int atomic64_add_unless(atomic64_t *v, long a, long u) +static inline long atomic64_add_unless(atomic64_t *v, long a, long u) { long c, old; c = atomic64_read(v); diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h index 5016f76..6f57325 100644 --- a/arch/sparc/include/asm/compat.h +++ b/arch/sparc/include/asm/compat.h @@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = current_thread_info()->kregs; unsigned long usp = regs->u_regs[UREG_I6]; diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h index e834880..2173432 100644 --- a/arch/sparc/include/asm/fb.h +++ b/arch/sparc/include/asm/fb.h @@ -1,5 +1,6 @@ #ifndef _SPARC_FB_H_ #define _SPARC_FB_H_ +#include #include #include #include @@ -18,6 +19,9 @@ static inline int fb_is_primary_device(struct fb_info *info) struct device *dev = info->device; struct device_node *node; + if (console_set_on_cmdline) + return 0; + node = dev->of_node; if (node && node == of_console_device) diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h index 963e1a4..f61a501 100644 --- a/arch/sparc/include/asm/openprom.h +++ b/arch/sparc/include/asm/openprom.h @@ -37,7 +37,7 @@ struct linux_dev_v2_funcs { int (*v2_dev_open)(char *devpath); void (*v2_dev_close)(int d); int (*v2_dev_read)(int d, char *buf, int nbytes); - int (*v2_dev_write)(int d, char *buf, int nbytes); + int (*v2_dev_write)(int d, const char *buf, int nbytes); int (*v2_dev_seek)(int d, int hi, int lo); /* Never issued (multistage load support) */ diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h index 33e31ce..618a5bd 100644 --- a/arch/sparc/include/asm/oplib_32.h +++ b/arch/sparc/include/asm/oplib_32.h @@ -60,25 +60,6 @@ extern char *prom_getbootargs(void); extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes); extern void prom_unmapio(char *virt_addr, unsigned int num_bytes); -/* Device operations. */ - -/* Open the device described by the passed string. Note, that the format - * of the string is different on V0 vs. V2->higher proms. The caller must - * know what he/she is doing! Returns the device descriptor, an int. - */ -extern int prom_devopen(char *device_string); - -/* Close a previously opened device described by the passed integer - * descriptor. - */ -extern int prom_devclose(int device_handle); - -/* Do a seek operation on the device described by the passed integer - * descriptor. - */ -extern void prom_seek(int device_handle, unsigned int seek_hival, - unsigned int seek_lowval); - /* Miscellaneous routines, don't really fit in any category per se. */ /* Reboot the machine with the command line passed. */ @@ -121,19 +102,8 @@ extern int prom_getrev(void); /* Get the prom firmware revision. */ extern int prom_getprev(void); -/* Character operations to/from the console.... */ - -/* Non-blocking get character from console. */ -extern int prom_nbgetchar(void); - -/* Non-blocking put character to console. */ -extern int prom_nbputchar(char character); - -/* Blocking get character from console. */ -extern char prom_getchar(void); - -/* Blocking put character to console. */ -extern void prom_putchar(char character); +/* Write a buffer of characters to the console. */ +extern void prom_console_write_buf(const char *buf, int len); /* Prom's internal routines, don't use in kernel/boot code. */ extern void prom_printf(const char *fmt, ...); @@ -238,7 +208,6 @@ extern int prom_node_has_property(int node, char *property); extern int prom_setprop(int node, const char *prop_name, char *prop_value, int value_size); -extern int prom_pathtoinode(char *path); extern int prom_inst2pkg(int); /* Dorking with Bus ranges... */ diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h index a5db031..209463d 100644 --- a/arch/sparc/include/asm/oplib_64.h +++ b/arch/sparc/include/asm/oplib_64.h @@ -67,27 +67,6 @@ extern void prom_init(void *cif_handler, void *cif_stack); /* Boot argument acquisition, returns the boot command line string. */ extern char *prom_getbootargs(void); -/* Device utilities. */ - -/* Device operations. */ - -/* Open the device described by the passed string. Note, that the format - * of the string is different on V0 vs. V2->higher proms. The caller must - * know what he/she is doing! Returns the device descriptor, an int. - */ -extern int prom_devopen(const char *device_string); - -/* Close a previously opened device described by the passed integer - * descriptor. - */ -extern int prom_devclose(int device_handle); - -/* Do a seek operation on the device described by the passed integer - * descriptor. - */ -extern void prom_seek(int device_handle, unsigned int seek_hival, - unsigned int seek_lowval); - /* Miscellaneous routines, don't really fit in any category per se. */ /* Reboot the machine with the command line passed. */ @@ -109,33 +88,14 @@ extern void prom_halt(void) __attribute__ ((noreturn)); /* Halt and power-off the machine. */ extern void prom_halt_power_off(void) __attribute__ ((noreturn)); -/* Set the PROM 'sync' callback function to the passed function pointer. - * When the user gives the 'sync' command at the prom prompt while the - * kernel is still active, the prom will call this routine. - * - */ -typedef int (*callback_func_t)(long *cmd); -extern void prom_setcallback(callback_func_t func_ptr); - /* Acquire the IDPROM of the root node in the prom device tree. This * gets passed a buffer where you would like it stuffed. The return value * is the format type of this idprom or 0xff on error. */ extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); -/* Character operations to/from the console.... */ - -/* Non-blocking get character from console. */ -extern int prom_nbgetchar(void); - -/* Non-blocking put character to console. */ -extern int prom_nbputchar(char character); - -/* Blocking get character from console. */ -extern char prom_getchar(void); - -/* Blocking put character to console. */ -extern void prom_putchar(char character); +/* Write a buffer of characters to the console. */ +extern void prom_console_write_buf(const char *buf, int len); /* Prom's internal routines, don't use in kernel/boot code. */ extern void prom_printf(const char *fmt, ...); @@ -185,9 +145,8 @@ extern int prom_getunumber(int syndrome_code, char *buf, int buflen); /* Retain physical memory to the caller across soft resets. */ -extern unsigned long prom_retain(const char *name, - unsigned long pa_low, unsigned long pa_high, - long size, long align); +extern int prom_retain(const char *name, unsigned long size, + unsigned long align, unsigned long *paddr); /* Load explicit I/D TLB entries into the calling processor. */ extern long prom_itlb_load(unsigned long index, @@ -279,34 +238,12 @@ extern int prom_finddevice(const char *name); extern int prom_setprop(int node, const char *prop_name, char *prop_value, int value_size); -extern int prom_pathtoinode(const char *path); extern int prom_inst2pkg(int); -extern int prom_service_exists(const char *service_name); extern void prom_sun4v_guest_soft_state(void); extern int prom_ihandle2path(int handle, char *buffer, int bufsize); /* Client interface level routines. */ -extern long p1275_cmd(const char *, long, ...); - -#if 0 -#define P1275_SIZE(x) ((((long)((x) / 32)) << 32) | (x)) -#else -#define P1275_SIZE(x) x -#endif - -/* We support at most 16 input and 1 output argument */ -#define P1275_ARG_NUMBER 0 -#define P1275_ARG_IN_STRING 1 -#define P1275_ARG_OUT_BUF 2 -#define P1275_ARG_OUT_32B 3 -#define P1275_ARG_IN_FUNCTION 4 -#define P1275_ARG_IN_BUF 5 -#define P1275_ARG_IN_64B 6 - -#define P1275_IN(x) ((x) & 0xf) -#define P1275_OUT(x) (((x) << 4) & 0xf0) -#define P1275_INOUT(i,o) (P1275_IN(i)|P1275_OUT(o)) -#define P1275_ARG(n,x) ((x) << ((n)*3 + 8)) +extern void p1275_cmd_direct(unsigned long *); #endif /* !(__SPARC64_OPLIB_H) */ diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index c333b8d..d21ad50 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -228,6 +228,10 @@ static const struct of_device_id ecpp_match[] = { .name = "parallel", .compatible = "ns87317-ecpp", }, + { + .name = "parallel", + .compatible = "pnpALI,1533,3", + }, {}, }; diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h index a303c9d..e4c61a1 100644 --- a/arch/sparc/include/asm/rwsem-const.h +++ b/arch/sparc/include/asm/rwsem-const.h @@ -5,7 +5,7 @@ #define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_ACTIVE_BIAS 0x00000001 #define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS 0xffff0000 +#define RWSEM_WAITING_BIAS (-0x00010000) #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 6a7b4db..dcefd22 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -114,7 +114,7 @@ void __init leon_init_timers(irq_handler_t counter_fn) if (leon3_gptimer_regs && leon3_irqctrl_regs) { LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, - (((1000000 / 100) - 1))); + (((1000000 / HZ) - 1))); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); #ifdef CONFIG_SMP @@ -128,7 +128,7 @@ void __init leon_init_timers(irq_handler_t counter_fn) } LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1))); LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); # endif diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile index 1b8c073..816c0fa 100644 --- a/arch/sparc/prom/Makefile +++ b/arch/sparc/prom/Makefile @@ -6,7 +6,6 @@ ccflags := -Werror lib-y := bootstr_$(BITS).o lib-$(CONFIG_SPARC32) += devmap.o -lib-y += devops_$(BITS).o lib-y += init_$(BITS).o lib-$(CONFIG_SPARC32) += memory.o lib-y += misc_$(BITS).o diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S index 5f27ad7..9c86b4b 100644 --- a/arch/sparc/prom/cif.S +++ b/arch/sparc/prom/cif.S @@ -9,18 +9,18 @@ #include .text - .globl prom_cif_interface -prom_cif_interface: - sethi %hi(p1275buf), %o0 - or %o0, %lo(p1275buf), %o0 - ldx [%o0 + 0x010], %o1 ! prom_cif_stack - save %o1, -192, %sp - ldx [%i0 + 0x008], %l2 ! prom_cif_handler + .globl prom_cif_direct +prom_cif_direct: + sethi %hi(p1275buf), %o1 + or %o1, %lo(p1275buf), %o1 + ldx [%o1 + 0x0010], %o2 ! prom_cif_stack + save %o2, -192, %sp + ldx [%i1 + 0x0008], %l2 ! prom_cif_handler mov %g4, %l0 mov %g5, %l1 mov %g6, %l3 call %l2 - add %i0, 0x018, %o0 ! prom_args + mov %i0, %o0 ! prom_args mov %l0, %g4 mov %l1, %g5 mov %l3, %g6 diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c index 5340264..4886310 100644 --- a/arch/sparc/prom/console_32.c +++ b/arch/sparc/prom/console_32.c @@ -16,63 +16,26 @@ extern void restore_current(void); -/* Non blocking get character from console input device, returns -1 - * if no input was taken. This can be used for polling. - */ -int -prom_nbgetchar(void) -{ - static char inc; - int i = -1; - unsigned long flags; - - spin_lock_irqsave(&prom_lock, flags); - switch(prom_vers) { - case PROM_V0: - i = (*(romvec->pv_nbgetchar))(); - break; - case PROM_V2: - case PROM_V3: - if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) { - i = inc; - } else { - i = -1; - } - break; - default: - i = -1; - break; - }; - restore_current(); - spin_unlock_irqrestore(&prom_lock, flags); - return i; /* Ugh, we could spin forever on unsupported proms ;( */ -} - /* Non blocking put character to console device, returns -1 if * unsuccessful. */ -int -prom_nbputchar(char c) +static int prom_nbputchar(const char *buf) { - static char outc; unsigned long flags; int i = -1; spin_lock_irqsave(&prom_lock, flags); switch(prom_vers) { case PROM_V0: - i = (*(romvec->pv_nbputchar))(c); + i = (*(romvec->pv_nbputchar))(*buf); break; case PROM_V2: case PROM_V3: - outc = c; - if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1) + if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, + buf, 0x1) == 1) i = 0; - else - i = -1; break; default: - i = -1; break; }; restore_current(); @@ -80,18 +43,14 @@ prom_nbputchar(char c) return i; /* Ugh, we could spin forever on unsupported proms ;( */ } -/* Blocking version of get character routine above. */ -char -prom_getchar(void) +void prom_console_write_buf(const char *buf, int len) { - int character; - while((character = prom_nbgetchar()) == -1) ; - return (char) character; + while (len) { + int n = prom_nbputchar(buf); + if (n) + continue; + len--; + buf++; + } } -/* Blocking version of put character routine above. */ -void -prom_putchar(char c) -{ - while(prom_nbputchar(c) == -1) ; -} diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c index f55d58a..ed39e75 100644 --- a/arch/sparc/prom/console_64.c +++ b/arch/sparc/prom/console_64.c @@ -15,59 +15,34 @@ extern int prom_stdin, prom_stdout; -/* Non blocking get character from console input device, returns -1 - * if no input was taken. This can be used for polling. - */ -inline int -prom_nbgetchar(void) +static int __prom_console_write_buf(const char *buf, int len) { - char inc; - - if (p1275_cmd("read", P1275_ARG(1,P1275_ARG_OUT_BUF)| - P1275_INOUT(3,1), - prom_stdin, &inc, P1275_SIZE(1)) == 1) - return inc; - else - return -1; -} + unsigned long args[7]; + int ret; -/* Non blocking put character to console device, returns -1 if - * unsuccessful. - */ -inline int -prom_nbputchar(char c) -{ - char outc; - - outc = c; - if (p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| - P1275_INOUT(3,1), - prom_stdout, &outc, P1275_SIZE(1)) == 1) - return 0; - else - return -1; -} + args[0] = (unsigned long) "write"; + args[1] = 3; + args[2] = 1; + args[3] = (unsigned int) prom_stdout; + args[4] = (unsigned long) buf; + args[5] = (unsigned int) len; + args[6] = (unsigned long) -1; -/* Blocking version of get character routine above. */ -char -prom_getchar(void) -{ - int character; - while((character = prom_nbgetchar()) == -1) ; - return (char) character; -} + p1275_cmd_direct(args); -/* Blocking version of put character routine above. */ -void -prom_putchar(char c) -{ - prom_nbputchar(c); + ret = (int) args[6]; + if (ret < 0) + return -1; + return ret; } -void -prom_puts(const char *s, int len) +void prom_console_write_buf(const char *buf, int len) { - p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)| - P1275_INOUT(3,1), - prom_stdout, s, P1275_SIZE(len)); + while (len) { + int n = __prom_console_write_buf(buf, len); + if (n < 0) + continue; + len -= n; + buf += len; + } } diff --git a/arch/sparc/prom/devops_32.c b/arch/sparc/prom/devops_32.c deleted file mode 100644 index 9c5d468..0000000 --- a/arch/sparc/prom/devops_32.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * devops.c: Device operations using the PROM. - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - */ -#include -#include -#include - -#include -#include - -extern void restore_current(void); - -/* Open the device described by the string 'dstr'. Returns the handle - * to that device used for subsequent operations on that device. - * Returns -1 on failure. - */ -int -prom_devopen(char *dstr) -{ - int handle; - unsigned long flags; - spin_lock_irqsave(&prom_lock, flags); - switch(prom_vers) { - case PROM_V0: - handle = (*(romvec->pv_v0devops.v0_devopen))(dstr); - if(handle == 0) handle = -1; - break; - case PROM_V2: - case PROM_V3: - handle = (*(romvec->pv_v2devops.v2_dev_open))(dstr); - break; - default: - handle = -1; - break; - }; - restore_current(); - spin_unlock_irqrestore(&prom_lock, flags); - - return handle; -} - -/* Close the device described by device handle 'dhandle'. */ -int -prom_devclose(int dhandle) -{ - unsigned long flags; - spin_lock_irqsave(&prom_lock, flags); - switch(prom_vers) { - case PROM_V0: - (*(romvec->pv_v0devops.v0_devclose))(dhandle); - break; - case PROM_V2: - case PROM_V3: - (*(romvec->pv_v2devops.v2_dev_close))(dhandle); - break; - default: - break; - }; - restore_current(); - spin_unlock_irqrestore(&prom_lock, flags); - return 0; -} - -/* Seek to specified location described by 'seekhi' and 'seeklo' - * for device 'dhandle'. - */ -void -prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) -{ - unsigned long flags; - spin_lock_irqsave(&prom_lock, flags); - switch(prom_vers) { - case PROM_V0: - (*(romvec->pv_v0devops.v0_seekdev))(dhandle, seekhi, seeklo); - break; - case PROM_V2: - case PROM_V3: - (*(romvec->pv_v2devops.v2_dev_seek))(dhandle, seekhi, seeklo); - break; - default: - break; - }; - restore_current(); - spin_unlock_irqrestore(&prom_lock, flags); -} diff --git a/arch/sparc/prom/devops_64.c b/arch/sparc/prom/devops_64.c deleted file mode 100644 index 9dbd803..0000000 --- a/arch/sparc/prom/devops_64.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * devops.c: Device operations using the PROM. - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - */ -#include -#include -#include - -#include -#include - -/* Open the device described by the string 'dstr'. Returns the handle - * to that device used for subsequent operations on that device. - * Returns 0 on failure. - */ -int -prom_devopen(const char *dstr) -{ - return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)| - P1275_INOUT(1,1), - dstr); -} - -/* Close the device described by device handle 'dhandle'. */ -int -prom_devclose(int dhandle) -{ - p1275_cmd ("close", P1275_INOUT(1,0), dhandle); - return 0; -} - -/* Seek to specified location described by 'seekhi' and 'seeklo' - * for device 'dhandle'. - */ -void -prom_seek(int dhandle, unsigned int seekhi, unsigned int seeklo) -{ - p1275_cmd ("seek", P1275_INOUT(3,1), dhandle, seekhi, seeklo); -} diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index 39fc6af..2fdcebf 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c @@ -18,12 +18,19 @@ #include #include -int prom_service_exists(const char *service_name) +static int prom_service_exists(const char *service_name) { - int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_INOUT(1, 1), service_name); + unsigned long args[5]; - if (err) + args[0] = (unsigned long) "test"; + args[1] = 1; + args[2] = 1; + args[3] = (unsigned long) service_name; + args[4] = (unsigned long) -1; + + p1275_cmd_direct(args); + + if (args[4]) return 0; return 1; } @@ -31,30 +38,47 @@ int prom_service_exists(const char *service_name) void prom_sun4v_guest_soft_state(void) { const char *svc = "SUNW,soft-state-supported"; + unsigned long args[3]; if (!prom_service_exists(svc)) return; - p1275_cmd(svc, P1275_INOUT(0, 0)); + args[0] = (unsigned long) svc; + args[1] = 0; + args[2] = 0; + p1275_cmd_direct(args); } /* Reset and reboot the machine with the command 'bcommand'. */ void prom_reboot(const char *bcommand) { + unsigned long args[4]; + #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_reboot(bcommand); #endif - p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_INOUT(1, 0), bcommand); + args[0] = (unsigned long) "boot"; + args[1] = 1; + args[2] = 0; + args[3] = (unsigned long) bcommand; + + p1275_cmd_direct(args); } /* Forth evaluate the expression contained in 'fstring'. */ void prom_feval(const char *fstring) { + unsigned long args[5]; + if (!fstring || fstring[0] == 0) return; - p1275_cmd("interpret", P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_INOUT(1, 1), fstring); + args[0] = (unsigned long) "interpret"; + args[1] = 1; + args[2] = 1; + args[3] = (unsigned long) fstring; + args[4] = (unsigned long) -1; + + p1275_cmd_direct(args); } EXPORT_SYMBOL(prom_feval); @@ -68,6 +92,7 @@ extern void smp_release(void); */ void prom_cmdline(void) { + unsigned long args[3]; unsigned long flags; local_irq_save(flags); @@ -76,7 +101,11 @@ void prom_cmdline(void) smp_capture(); #endif - p1275_cmd("enter", P1275_INOUT(0, 0)); + args[0] = (unsigned long) "enter"; + args[1] = 0; + args[2] = 0; + + p1275_cmd_direct(args); #ifdef CONFIG_SMP smp_release(); @@ -90,36 +119,37 @@ void prom_cmdline(void) */ void notrace prom_halt(void) { + unsigned long args[3]; + #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_power_off(); #endif again: - p1275_cmd("exit", P1275_INOUT(0, 0)); + args[0] = (unsigned long) "exit"; + args[1] = 0; + args[2] = 0; + p1275_cmd_direct(args); goto again; /* PROM is out to get me -DaveM */ } void prom_halt_power_off(void) { + unsigned long args[3]; + #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_power_off(); #endif - p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0)); + args[0] = (unsigned long) "SUNW,power-off"; + args[1] = 0; + args[2] = 0; + p1275_cmd_direct(args); /* if nothing else helps, we just halt */ prom_halt(); } -/* Set prom sync handler to call function 'funcp'. */ -void prom_setcallback(callback_func_t funcp) -{ - if (!funcp) - return; - p1275_cmd("set-callback", P1275_ARG(0, P1275_ARG_IN_FUNCTION) | - P1275_INOUT(1, 1), funcp); -} - /* Get the idprom and stuff it into buffer 'idbuf'. Returns the * format type. 'num_bytes' is the number of bytes that your idbuf * has space for. Returns 0xff on error. @@ -173,57 +203,61 @@ static int prom_get_memory_ihandle(void) } /* Load explicit I/D TLB entries. */ +static long tlb_load(const char *type, unsigned long index, + unsigned long tte_data, unsigned long vaddr) +{ + unsigned long args[9]; + + args[0] = (unsigned long) prom_callmethod_name; + args[1] = 5; + args[2] = 1; + args[3] = (unsigned long) type; + args[4] = (unsigned int) prom_get_mmu_ihandle(); + args[5] = vaddr; + args[6] = tte_data; + args[7] = index; + args[8] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (long) args[8]; +} + long prom_itlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { - return p1275_cmd(prom_callmethod_name, - (P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_ARG(2, P1275_ARG_IN_64B) | - P1275_ARG(3, P1275_ARG_IN_64B) | - P1275_INOUT(5, 1)), - "SUNW,itlb-load", - prom_get_mmu_ihandle(), - /* And then our actual args are pushed backwards. */ - vaddr, - tte_data, - index); + return tlb_load("SUNW,itlb-load", index, tte_data, vaddr); } long prom_dtlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { - return p1275_cmd(prom_callmethod_name, - (P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_ARG(2, P1275_ARG_IN_64B) | - P1275_ARG(3, P1275_ARG_IN_64B) | - P1275_INOUT(5, 1)), - "SUNW,dtlb-load", - prom_get_mmu_ihandle(), - /* And then our actual args are pushed backwards. */ - vaddr, - tte_data, - index); + return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr); } int prom_map(int mode, unsigned long size, unsigned long vaddr, unsigned long paddr) { - int ret = p1275_cmd(prom_callmethod_name, - (P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_ARG(3, P1275_ARG_IN_64B) | - P1275_ARG(4, P1275_ARG_IN_64B) | - P1275_ARG(6, P1275_ARG_IN_64B) | - P1275_INOUT(7, 1)), - prom_map_name, - prom_get_mmu_ihandle(), - mode, - size, - vaddr, - 0, - paddr); - + unsigned long args[11]; + int ret; + + args[0] = (unsigned long) prom_callmethod_name; + args[1] = 7; + args[2] = 1; + args[3] = (unsigned long) prom_map_name; + args[4] = (unsigned int) prom_get_mmu_ihandle(); + args[5] = (unsigned int) mode; + args[6] = size; + args[7] = vaddr; + args[8] = 0; + args[9] = paddr; + args[10] = (unsigned long) -1; + + p1275_cmd_direct(args); + + ret = (int) args[10]; if (ret == 0) ret = -1; return ret; @@ -231,40 +265,51 @@ int prom_map(int mode, unsigned long size, void prom_unmap(unsigned long size, unsigned long vaddr) { - p1275_cmd(prom_callmethod_name, - (P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_ARG(2, P1275_ARG_IN_64B) | - P1275_ARG(3, P1275_ARG_IN_64B) | - P1275_INOUT(4, 0)), - prom_unmap_name, - prom_get_mmu_ihandle(), - size, - vaddr); + unsigned long args[7]; + + args[0] = (unsigned long) prom_callmethod_name; + args[1] = 4; + args[2] = 0; + args[3] = (unsigned long) prom_unmap_name; + args[4] = (unsigned int) prom_get_mmu_ihandle(); + args[5] = size; + args[6] = vaddr; + + p1275_cmd_direct(args); } /* Set aside physical memory which is not touched or modified * across soft resets. */ -unsigned long prom_retain(const char *name, - unsigned long pa_low, unsigned long pa_high, - long size, long align) +int prom_retain(const char *name, unsigned long size, + unsigned long align, unsigned long *paddr) { - /* XXX I don't think we return multiple values correctly. - * XXX OBP supposedly returns pa_low/pa_high here, how does - * XXX it work? + unsigned long args[11]; + + args[0] = (unsigned long) prom_callmethod_name; + args[1] = 5; + args[2] = 3; + args[3] = (unsigned long) "SUNW,retain"; + args[4] = (unsigned int) prom_get_memory_ihandle(); + args[5] = align; + args[6] = size; + args[7] = (unsigned long) name; + args[8] = (unsigned long) -1; + args[9] = (unsigned long) -1; + args[10] = (unsigned long) -1; + + p1275_cmd_direct(args); + + if (args[8]) + return (int) args[8]; + + /* Next we get "phys_high" then "phys_low". On 64-bit + * the phys_high cell is don't care since the phys_low + * cell has the full value. */ + *paddr = args[10]; - /* If align is zero, the pa_low/pa_high args are passed, - * else they are not. - */ - if (align == 0) - return p1275_cmd("SUNW,retain", - (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(5, 2)), - name, pa_low, pa_high, size, align); - else - return p1275_cmd("SUNW,retain", - (P1275_ARG(0, P1275_ARG_IN_BUF) | P1275_INOUT(3, 2)), - name, size, align); + return 0; } /* Get "Unumber" string for the SIMM at the given @@ -277,62 +322,129 @@ int prom_getunumber(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { - return p1275_cmd(prom_callmethod_name, - (P1275_ARG(0, P1275_ARG_IN_STRING) | - P1275_ARG(3, P1275_ARG_OUT_BUF) | - P1275_ARG(6, P1275_ARG_IN_64B) | - P1275_INOUT(8, 2)), - "SUNW,get-unumber", prom_get_memory_ihandle(), - buflen, buf, P1275_SIZE(buflen), - 0, phys_addr, syndrome_code); + unsigned long args[12]; + + args[0] = (unsigned long) prom_callmethod_name; + args[1] = 7; + args[2] = 2; + args[3] = (unsigned long) "SUNW,get-unumber"; + args[4] = (unsigned int) prom_get_memory_ihandle(); + args[5] = buflen; + args[6] = (unsigned long) buf; + args[7] = 0; + args[8] = phys_addr; + args[9] = (unsigned int) syndrome_code; + args[10] = (unsigned long) -1; + args[11] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[10]; } /* Power management extensions. */ void prom_sleepself(void) { - p1275_cmd("SUNW,sleep-self", P1275_INOUT(0, 0)); + unsigned long args[3]; + + args[0] = (unsigned long) "SUNW,sleep-self"; + args[1] = 0; + args[2] = 0; + p1275_cmd_direct(args); } int prom_sleepsystem(void) { - return p1275_cmd("SUNW,sleep-system", P1275_INOUT(0, 1)); + unsigned long args[4]; + + args[0] = (unsigned long) "SUNW,sleep-system"; + args[1] = 0; + args[2] = 1; + args[3] = (unsigned long) -1; + p1275_cmd_direct(args); + + return (int) args[3]; } int prom_wakeupsystem(void) { - return p1275_cmd("SUNW,wakeup-system", P1275_INOUT(0, 1)); + unsigned long args[4]; + + args[0] = (unsigned long) "SUNW,wakeup-system"; + args[1] = 0; + args[2] = 1; + args[3] = (unsigned long) -1; + p1275_cmd_direct(args); + + return (int) args[3]; } #ifdef CONFIG_SMP void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) { - p1275_cmd("SUNW,start-cpu", P1275_INOUT(3, 0), cpunode, pc, arg); + unsigned long args[6]; + + args[0] = (unsigned long) "SUNW,start-cpu"; + args[1] = 3; + args[2] = 0; + args[3] = (unsigned int) cpunode; + args[4] = pc; + args[5] = arg; + p1275_cmd_direct(args); } void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) { - p1275_cmd("SUNW,start-cpu-by-cpuid", P1275_INOUT(3, 0), - cpuid, pc, arg); + unsigned long args[6]; + + args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid"; + args[1] = 3; + args[2] = 0; + args[3] = (unsigned int) cpuid; + args[4] = pc; + args[5] = arg; + p1275_cmd_direct(args); } void prom_stopcpu_cpuid(int cpuid) { - p1275_cmd("SUNW,stop-cpu-by-cpuid", P1275_INOUT(1, 0), - cpuid); + unsigned long args[4]; + + args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid"; + args[1] = 1; + args[2] = 0; + args[3] = (unsigned int) cpuid; + p1275_cmd_direct(args); } void prom_stopself(void) { - p1275_cmd("SUNW,stop-self", P1275_INOUT(0, 0)); + unsigned long args[3]; + + args[0] = (unsigned long) "SUNW,stop-self"; + args[1] = 0; + args[2] = 0; + p1275_cmd_direct(args); } void prom_idleself(void) { - p1275_cmd("SUNW,idle-self", P1275_INOUT(0, 0)); + unsigned long args[3]; + + args[0] = (unsigned long) "SUNW,idle-self"; + args[1] = 0; + args[2] = 0; + p1275_cmd_direct(args); } void prom_resumecpu(int cpunode) { - p1275_cmd("SUNW,resume-cpu", P1275_INOUT(1, 0), cpunode); + unsigned long args[4]; + + args[0] = (unsigned long) "SUNW,resume-cpu"; + args[1] = 1; + args[2] = 0; + args[3] = (unsigned int) cpunode; + p1275_cmd_direct(args); } #endif diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c index 2d8b70d..fa6e4e2 100644 --- a/arch/sparc/prom/p1275.c +++ b/arch/sparc/prom/p1275.c @@ -22,13 +22,11 @@ struct { long prom_callback; /* 0x00 */ void (*prom_cif_handler)(long *); /* 0x08 */ unsigned long prom_cif_stack; /* 0x10 */ - unsigned long prom_args [23]; /* 0x18 */ - char prom_buffer [3000]; } p1275buf; extern void prom_world(int); -extern void prom_cif_interface(void); +extern void prom_cif_direct(unsigned long *args); extern void prom_cif_callback(void); /* @@ -36,114 +34,20 @@ extern void prom_cif_callback(void); */ DEFINE_RAW_SPINLOCK(prom_entry_lock); -long p1275_cmd(const char *service, long fmt, ...) +void p1275_cmd_direct(unsigned long *args) { - char *p, *q; unsigned long flags; - int nargs, nrets, i; - va_list list; - long attrs, x; - - p = p1275buf.prom_buffer; raw_local_save_flags(flags); raw_local_irq_restore(PIL_NMI); raw_spin_lock(&prom_entry_lock); - p1275buf.prom_args[0] = (unsigned long)p; /* service */ - strcpy (p, service); - p = (char *)(((long)(strchr (p, 0) + 8)) & ~7); - p1275buf.prom_args[1] = nargs = (fmt & 0x0f); /* nargs */ - p1275buf.prom_args[2] = nrets = ((fmt & 0xf0) >> 4); /* nrets */ - attrs = fmt >> 8; - va_start(list, fmt); - for (i = 0; i < nargs; i++, attrs >>= 3) { - switch (attrs & 0x7) { - case P1275_ARG_NUMBER: - p1275buf.prom_args[i + 3] = - (unsigned)va_arg(list, long); - break; - case P1275_ARG_IN_64B: - p1275buf.prom_args[i + 3] = - va_arg(list, unsigned long); - break; - case P1275_ARG_IN_STRING: - strcpy (p, va_arg(list, char *)); - p1275buf.prom_args[i + 3] = (unsigned long)p; - p = (char *)(((long)(strchr (p, 0) + 8)) & ~7); - break; - case P1275_ARG_OUT_BUF: - (void) va_arg(list, char *); - p1275buf.prom_args[i + 3] = (unsigned long)p; - x = va_arg(list, long); - i++; attrs >>= 3; - p = (char *)(((long)(p + (int)x + 7)) & ~7); - p1275buf.prom_args[i + 3] = x; - break; - case P1275_ARG_IN_BUF: - q = va_arg(list, char *); - p1275buf.prom_args[i + 3] = (unsigned long)p; - x = va_arg(list, long); - i++; attrs >>= 3; - memcpy (p, q, (int)x); - p = (char *)(((long)(p + (int)x + 7)) & ~7); - p1275buf.prom_args[i + 3] = x; - break; - case P1275_ARG_OUT_32B: - (void) va_arg(list, char *); - p1275buf.prom_args[i + 3] = (unsigned long)p; - p += 32; - break; - case P1275_ARG_IN_FUNCTION: - p1275buf.prom_args[i + 3] = - (unsigned long)prom_cif_callback; - p1275buf.prom_callback = va_arg(list, long); - break; - } - } - va_end(list); - prom_world(1); - prom_cif_interface(); + prom_cif_direct(args); prom_world(0); - attrs = fmt >> 8; - va_start(list, fmt); - for (i = 0; i < nargs; i++, attrs >>= 3) { - switch (attrs & 0x7) { - case P1275_ARG_NUMBER: - (void) va_arg(list, long); - break; - case P1275_ARG_IN_STRING: - (void) va_arg(list, char *); - break; - case P1275_ARG_IN_FUNCTION: - (void) va_arg(list, long); - break; - case P1275_ARG_IN_BUF: - (void) va_arg(list, char *); - (void) va_arg(list, long); - i++; attrs >>= 3; - break; - case P1275_ARG_OUT_BUF: - p = va_arg(list, char *); - x = va_arg(list, long); - memcpy (p, (char *)(p1275buf.prom_args[i + 3]), (int)x); - i++; attrs >>= 3; - break; - case P1275_ARG_OUT_32B: - p = va_arg(list, char *); - memcpy (p, (char *)(p1275buf.prom_args[i + 3]), 32); - break; - } - } - va_end(list); - x = p1275buf.prom_args [nargs + 3]; - raw_spin_unlock(&prom_entry_lock); raw_local_irq_restore(flags); - - return x; } void prom_cif_init(void *cif_handler, void *cif_stack) diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c index ca86926..d9682f0 100644 --- a/arch/sparc/prom/printf.c +++ b/arch/sparc/prom/printf.c @@ -15,22 +15,45 @@ #include #include +#include #include #include +#define CONSOLE_WRITE_BUF_SIZE 1024 + static char ppbuf[1024]; +static char console_write_buf[CONSOLE_WRITE_BUF_SIZE]; +static DEFINE_RAW_SPINLOCK(console_write_lock); void notrace prom_write(const char *buf, unsigned int n) { - char ch; + unsigned int dest_len; + unsigned long flags; + char *dest; + + dest = console_write_buf; + raw_spin_lock_irqsave(&console_write_lock, flags); - while (n != 0) { - --n; - if ((ch = *buf++) == '\n') - prom_putchar('\r'); - prom_putchar(ch); + dest_len = 0; + while (n-- != 0) { + char ch = *buf++; + if (ch == '\n') { + *dest++ = '\r'; + dest_len++; + } + *dest++ = ch; + dest_len++; + if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) { + prom_console_write_buf(console_write_buf, dest_len); + dest = console_write_buf; + dest_len = 0; + } } + if (dest_len) + prom_console_write_buf(console_write_buf, dest_len); + + raw_spin_unlock_irqrestore(&console_write_lock, flags); } void notrace prom_printf(const char *fmt, ...) diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c index b21592f..71e7f08 100644 --- a/arch/sparc/prom/tree_32.c +++ b/arch/sparc/prom/tree_32.c @@ -341,18 +341,3 @@ int prom_inst2pkg(int inst) if (node == -1) return 0; return node; } - -/* Return 'node' assigned to a particular prom 'path' - * FIXME: Should work for v0 as well - */ -int prom_pathtoinode(char *path) -{ - int node, inst; - - inst = prom_devopen (path); - if (inst == -1) return 0; - node = prom_inst2pkg (inst); - prom_devclose (inst); - if (node == -1) return 0; - return node; -} diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c index 3c0d2dd..8327b1b 100644 --- a/arch/sparc/prom/tree_64.c +++ b/arch/sparc/prom/tree_64.c @@ -16,22 +16,39 @@ #include #include +static int prom_node_to_node(const char *type, int node) +{ + unsigned long args[5]; + + args[0] = (unsigned long) type; + args[1] = 1; + args[2] = 1; + args[3] = (unsigned int) node; + args[4] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[4]; +} + /* Return the child of node 'node' or zero if no this node has no * direct descendent. */ inline int __prom_getchild(int node) { - return p1275_cmd ("child", P1275_INOUT(1, 1), node); + return prom_node_to_node("child", node); } inline int prom_getchild(int node) { int cnode; - if(node == -1) return 0; + if (node == -1) + return 0; cnode = __prom_getchild(node); - if(cnode == -1) return 0; - return (int)cnode; + if (cnode == -1) + return 0; + return cnode; } EXPORT_SYMBOL(prom_getchild); @@ -39,10 +56,12 @@ inline int prom_getparent(int node) { int cnode; - if(node == -1) return 0; - cnode = p1275_cmd ("parent", P1275_INOUT(1, 1), node); - if(cnode == -1) return 0; - return (int)cnode; + if (node == -1) + return 0; + cnode = prom_node_to_node("parent", node); + if (cnode == -1) + return 0; + return cnode; } /* Return the next sibling of node 'node' or zero if no more siblings @@ -50,7 +69,7 @@ inline int prom_getparent(int node) */ inline int __prom_getsibling(int node) { - return p1275_cmd(prom_peer_name, P1275_INOUT(1, 1), node); + return prom_node_to_node(prom_peer_name, node); } inline int prom_getsibling(int node) @@ -72,11 +91,21 @@ EXPORT_SYMBOL(prom_getsibling); */ inline int prom_getproplen(int node, const char *prop) { - if((!node) || (!prop)) return -1; - return p1275_cmd ("getproplen", - P1275_ARG(1,P1275_ARG_IN_STRING)| - P1275_INOUT(2, 1), - node, prop); + unsigned long args[6]; + + if (!node || !prop) + return -1; + + args[0] = (unsigned long) "getproplen"; + args[1] = 2; + args[2] = 1; + args[3] = (unsigned int) node; + args[4] = (unsigned long) prop; + args[5] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[5]; } EXPORT_SYMBOL(prom_getproplen); @@ -87,19 +116,25 @@ EXPORT_SYMBOL(prom_getproplen); inline int prom_getproperty(int node, const char *prop, char *buffer, int bufsize) { + unsigned long args[8]; int plen; plen = prom_getproplen(node, prop); - if ((plen > bufsize) || (plen == 0) || (plen == -1)) { + if ((plen > bufsize) || (plen == 0) || (plen == -1)) return -1; - } else { - /* Ok, things seem all right. */ - return p1275_cmd(prom_getprop_name, - P1275_ARG(1,P1275_ARG_IN_STRING)| - P1275_ARG(2,P1275_ARG_OUT_BUF)| - P1275_INOUT(4, 1), - node, prop, buffer, P1275_SIZE(plen)); - } + + args[0] = (unsigned long) prom_getprop_name; + args[1] = 4; + args[2] = 1; + args[3] = (unsigned int) node; + args[4] = (unsigned long) prop; + args[5] = (unsigned long) buffer; + args[6] = bufsize; + args[7] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[7]; } EXPORT_SYMBOL(prom_getproperty); @@ -110,7 +145,7 @@ inline int prom_getint(int node, const char *prop) { int intprop; - if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) + if (prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) return intprop; return -1; @@ -126,7 +161,8 @@ int prom_getintdefault(int node, const char *property, int deflt) int retval; retval = prom_getint(node, property); - if(retval == -1) return deflt; + if (retval == -1) + return deflt; return retval; } @@ -138,7 +174,8 @@ int prom_getbool(int node, const char *prop) int retval; retval = prom_getproplen(node, prop); - if(retval == -1) return 0; + if (retval == -1) + return 0; return 1; } EXPORT_SYMBOL(prom_getbool); @@ -152,7 +189,8 @@ void prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size) int len; len = prom_getproperty(node, prop, user_buf, ubuf_size); - if(len != -1) return; + if (len != -1) + return; user_buf[0] = 0; } EXPORT_SYMBOL(prom_getstring); @@ -164,7 +202,8 @@ int prom_nodematch(int node, const char *name) { char namebuf[128]; prom_getproperty(node, "name", namebuf, sizeof(namebuf)); - if(strcmp(namebuf, name) == 0) return 1; + if (strcmp(namebuf, name) == 0) + return 1; return 0; } @@ -190,16 +229,29 @@ int prom_searchsiblings(int node_start, const char *nodename) } EXPORT_SYMBOL(prom_searchsiblings); +static const char *prom_nextprop_name = "nextprop"; + /* Return the first property type for node 'node'. * buffer should be at least 32B in length */ inline char *prom_firstprop(int node, char *buffer) { + unsigned long args[7]; + *buffer = 0; - if(node == -1) return buffer; - p1275_cmd ("nextprop", P1275_ARG(2,P1275_ARG_OUT_32B)| - P1275_INOUT(3, 0), - node, (char *) 0x0, buffer); + if (node == -1) + return buffer; + + args[0] = (unsigned long) prom_nextprop_name; + args[1] = 3; + args[2] = 1; + args[3] = (unsigned int) node; + args[4] = 0; + args[5] = (unsigned long) buffer; + args[6] = (unsigned long) -1; + + p1275_cmd_direct(args); + return buffer; } EXPORT_SYMBOL(prom_firstprop); @@ -210,9 +262,10 @@ EXPORT_SYMBOL(prom_firstprop); */ inline char *prom_nextprop(int node, const char *oprop, char *buffer) { + unsigned long args[7]; char buf[32]; - if(node == -1) { + if (node == -1) { *buffer = 0; return buffer; } @@ -220,10 +273,17 @@ inline char *prom_nextprop(int node, const char *oprop, char *buffer) strcpy (buf, oprop); oprop = buf; } - p1275_cmd ("nextprop", P1275_ARG(1,P1275_ARG_IN_STRING)| - P1275_ARG(2,P1275_ARG_OUT_32B)| - P1275_INOUT(3, 0), - node, oprop, buffer); + + args[0] = (unsigned long) prom_nextprop_name; + args[1] = 3; + args[2] = 1; + args[3] = (unsigned int) node; + args[4] = (unsigned long) oprop; + args[5] = (unsigned long) buffer; + args[6] = (unsigned long) -1; + + p1275_cmd_direct(args); + return buffer; } EXPORT_SYMBOL(prom_nextprop); @@ -231,12 +291,19 @@ EXPORT_SYMBOL(prom_nextprop); int prom_finddevice(const char *name) { + unsigned long args[5]; + if (!name) return 0; - return p1275_cmd(prom_finddev_name, - P1275_ARG(0,P1275_ARG_IN_STRING)| - P1275_INOUT(1, 1), - name); + args[0] = (unsigned long) "finddevice"; + args[1] = 1; + args[2] = 1; + args[3] = (unsigned long) name; + args[4] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[4]; } EXPORT_SYMBOL(prom_finddevice); @@ -247,7 +314,7 @@ int prom_node_has_property(int node, const char *prop) *buf = 0; do { prom_nextprop(node, buf, buf); - if(!strcmp(buf, prop)) + if (!strcmp(buf, prop)) return 1; } while (*buf); return 0; @@ -260,6 +327,8 @@ EXPORT_SYMBOL(prom_node_has_property); int prom_setprop(int node, const char *pname, char *value, int size) { + unsigned long args[8]; + if (size == 0) return 0; if ((pname == 0) || (value == 0)) @@ -271,42 +340,53 @@ prom_setprop(int node, const char *pname, char *value, int size) return 0; } #endif - return p1275_cmd ("setprop", P1275_ARG(1,P1275_ARG_IN_STRING)| - P1275_ARG(2,P1275_ARG_IN_BUF)| - P1275_INOUT(4, 1), - node, pname, value, P1275_SIZE(size)); + args[0] = (unsigned long) "setprop"; + args[1] = 4; + args[2] = 1; + args[3] = (unsigned int) node; + args[4] = (unsigned long) pname; + args[5] = (unsigned long) value; + args[6] = size; + args[7] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[7]; } EXPORT_SYMBOL(prom_setprop); inline int prom_inst2pkg(int inst) { + unsigned long args[5]; int node; - node = p1275_cmd ("instance-to-package", P1275_INOUT(1, 1), inst); - if (node == -1) return 0; - return node; -} + args[0] = (unsigned long) "instance-to-package"; + args[1] = 1; + args[2] = 1; + args[3] = (unsigned int) inst; + args[4] = (unsigned long) -1; -/* Return 'node' assigned to a particular prom 'path' - * FIXME: Should work for v0 as well - */ -int -prom_pathtoinode(const char *path) -{ - int node, inst; + p1275_cmd_direct(args); - inst = prom_devopen (path); - if (inst == 0) return 0; - node = prom_inst2pkg (inst); - prom_devclose (inst); - if (node == -1) return 0; + node = (int) args[4]; + if (node == -1) + return 0; return node; } int prom_ihandle2path(int handle, char *buffer, int bufsize) { - return p1275_cmd("instance-to-path", - P1275_ARG(1,P1275_ARG_OUT_BUF)| - P1275_INOUT(3, 1), - handle, buffer, P1275_SIZE(bufsize)); + unsigned long args[7]; + + args[0] = (unsigned long) "instance-to-path"; + args[1] = 3; + args[2] = 1; + args[3] = (unsigned int) handle; + args[4] = (unsigned long) buffer; + args[5] = bufsize; + args[6] = (unsigned long) -1; + + p1275_cmd_direct(args); + + return (int) args[6]; } diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 7f7338c..1664cce 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c @@ -727,6 +727,9 @@ struct winch { static void free_winch(struct winch *winch, int free_irq_ok) { + if (free_irq_ok) + free_irq(WINCH_IRQ, winch); + list_del(&winch->list); if (winch->pid != -1) @@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok) os_close_file(winch->fd); if (winch->stack != 0) free_stack(winch->stack, 0); - if (free_irq_ok) - free_irq(WINCH_IRQ, winch); kfree(winch); } diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index da992a3..3d63b83 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -162,6 +162,7 @@ struct ubd { struct scatterlist sg[MAX_SG]; struct request *request; int start_sg, end_sg; + sector_t rq_pos; }; #define DEFAULT_COW { \ @@ -186,6 +187,7 @@ struct ubd { .request = NULL, \ .start_sg = 0, \ .end_sg = 0, \ + .rq_pos = 0, \ } /* Protected by ubd_lock */ @@ -1223,7 +1225,6 @@ static void do_ubd_request(struct request_queue *q) { struct io_thread_req *io_req; struct request *req; - sector_t sector; int n; while(1){ @@ -1234,12 +1235,12 @@ static void do_ubd_request(struct request_queue *q) return; dev->request = req; + dev->rq_pos = blk_rq_pos(req); dev->start_sg = 0; dev->end_sg = blk_rq_map_sg(q, req, dev->sg); } req = dev->request; - sector = blk_rq_pos(req); while(dev->start_sg < dev->end_sg){ struct scatterlist *sg = &dev->sg[dev->start_sg]; @@ -1251,10 +1252,9 @@ static void do_ubd_request(struct request_queue *q) return; } prepare_request(req, io_req, - (unsigned long long)sector << 9, + (unsigned long long)dev->rq_pos << 9, sg->offset, sg->length, sg_page(sg)); - sector += sg->length >> 9; n = os_write_file(thread_fd, &io_req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)){ @@ -1267,6 +1267,7 @@ static void do_ubd_request(struct request_queue *q) return; } + dev->rq_pos += sg->length >> 9; dev->start_sg++; } dev->end_sg = 0; diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index ec63785..9a873d7 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -22,7 +22,7 @@ SECTIONS _text = .; _stext = .; __init_begin = .; - INIT_TEXT_SECTION(PAGE_SIZE) + INIT_TEXT_SECTION(0) . = ALIGN(PAGE_SIZE); .text : diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index dec5678..6e3359d 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c @@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv) long long disable_timer(void) { struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); - int remain, max = UM_NSEC_PER_SEC / UM_HZ; + long long remain, max = UM_NSEC_PER_SEC / UM_HZ; if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) printk(UM_KERN_ERR "disable_timer - setitimer failed, " diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index dcb0593..d2b8210 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -247,6 +247,11 @@ config ARCH_HWEIGHT_CFLAGS config KTIME_SCALAR def_bool X86_32 + +config ARCH_CPU_PROBE_RELEASE + def_bool y + depends on HOTPLUG_CPU + source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -792,6 +797,17 @@ config SCHED_MC making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. +config IRQ_TIME_ACCOUNTING + bool "Fine granularity task level IRQ time accounting" + default n + ---help--- + Select this option to enable fine granularity task irq time + accounting. This is done by reading a timestamp on each + transitions between softirq and hardirq state, so there can be a + small performance impact. + + If in doubt, say N here. + source "kernel/Kconfig.preempt" config X86_UP_APIC diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index e790bc1..4f5f71e 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S @@ -50,7 +50,12 @@ /* * Reload arg registers from stack in case ptrace changed them. * We don't reload %eax because syscall_trace_enter() returned - * the value it wants us to use in the table lookup. + * the %rax value we should see. Instead, we just truncate that + * value to 32 bits again as we did on entry from user mode. + * If it's a new value set by user_regset during entry tracing, + * this matches the normal truncation of the user-mode value. + * If it's -1 to make us punt the syscall, then (u32)-1 is still + * an appropriately invalid value. */ .macro LOAD_ARGS32 offset, _r9=0 .if \_r9 @@ -60,6 +65,7 @@ movl \offset+48(%rsp),%edx movl \offset+56(%rsp),%esi movl \offset+64(%rsp),%edi + movl %eax,%eax /* zero extension */ .endm .macro CFI_STARTPROC32 simple @@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) CFI_REMEMBER_STATE jnz sysenter_tracesys - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys sysenter_do_call: IA32_ARG_FIXUP @@ -195,7 +201,7 @@ sysexit_from_sys_call: movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ call audit_syscall_entry movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys movl %ebx,%edi /* reload 1st syscall arg */ movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ @@ -248,7 +254,7 @@ sysenter_tracesys: call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ jmp sysenter_do_call CFI_ENDPROC @@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) CFI_REMEMBER_STATE jnz cstar_tracesys - cmpl $IA32_NR_syscalls-1,%eax + cmpq $IA32_NR_syscalls-1,%rax ja ia32_badsys cstar_do_call: IA32_ARG_FIXUP 1 @@ -367,7 +373,7 @@ cstar_tracesys: LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ RESTORE_REST xchgl %ebp,%r9d - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ jmp cstar_do_call END(ia32_cstar_target) @@ -425,7 +431,7 @@ ENTRY(ia32_syscall) orl $TS_COMPAT,TI_status(%r10) testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) jnz ia32_tracesys - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja ia32_badsys ia32_do_call: IA32_ARG_FIXUP @@ -444,7 +450,7 @@ ia32_tracesys: call syscall_trace_enter LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ RESTORE_REST - cmpl $(IA32_NR_syscalls-1),%eax + cmpq $(IA32_NR_syscalls-1),%rax ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ jmp ia32_do_call END(ia32_syscall) diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index aa2c39d..27d8379 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -88,6 +88,7 @@ extern int acpi_disabled; extern int acpi_pci_disabled; extern int acpi_skip_timer_override; extern int acpi_use_timer_override; +extern int acpi_fix_pin2_polarity; extern u8 acpi_sci_flags; extern int acpi_sci_override_gsi; diff --git a/arch/x86/include/asm/amd_iommu_proto.h b/arch/x86/include/asm/amd_iommu_proto.h index d2544f1..cb03037 100644 --- a/arch/x86/include/asm/amd_iommu_proto.h +++ b/arch/x86/include/asm/amd_iommu_proto.h @@ -38,4 +38,10 @@ static inline void amd_iommu_stats_init(void) { } #endif /* !CONFIG_AMD_IOMMU_STATS */ +static inline bool is_rd890_iommu(struct pci_dev *pdev) +{ + return (pdev->vendor == PCI_VENDOR_ID_ATI) && + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); +} + #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 7014e88..0861618 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h @@ -368,6 +368,9 @@ struct amd_iommu { /* capabilities of that IOMMU read from ACPI */ u32 cap; + /* flags read from acpi table */ + u8 acpi_flags; + /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -411,6 +414,15 @@ struct amd_iommu { /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; + + /* + * This array is required to work around a potential BIOS bug. + * The BIOS may miss to restore parts of the PCI configuration + * space when the system resumes from S3. The result is that the + * IOMMU does not execute commands anymore which leads to system + * failure. + */ + u32 cache_cfg[4]; }; /* diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 8859e12..20955ea 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -27,20 +27,20 @@ struct __xchg_dummy { switch (size) { \ case 1: \ asm volatile("xchgb %b0,%1" \ - : "=q" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=q" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ case 2: \ asm volatile("xchgw %w0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=r" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ case 4: \ asm volatile("xchgl %0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=r" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ default: \ @@ -53,60 +53,33 @@ struct __xchg_dummy { __xchg((v), (ptr), sizeof(*ptr)) /* - * The semantics of XCHGCMP8B are a bit strange, this is why - * there is a loop and the loading of %%eax and %%edx has to - * be inside. This inlines well in most cases, the cached - * cost is around ~38 cycles. (in the future we might want - * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that - * might have an implicit FPU-save as a cost, so it's not - * clear which path to go.) + * CMPXCHG8B only writes to the target if we had the previous + * value in registers, otherwise it acts as a read and gives us the + * "new previous" value. That is why there is a loop. Preloading + * EDX:EAX is a performance optimization: in the common case it means + * we need only one locked operation. * - * cmpxchg8b must be used with the lock prefix here to allow - * the instruction to be executed atomically, see page 3-102 - * of the instruction set reference 24319102.pdf. We need - * the reader side to see the coherent 64bit value. + * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very + * least an FPU save and/or %cr0.ts manipulation. + * + * cmpxchg8b must be used with the lock prefix here to allow the + * instruction to be executed atomically. We need to have the reader + * side to see the coherent 64bit value. */ -static inline void __set_64bit(unsigned long long *ptr, - unsigned int low, unsigned int high) +static inline void set_64bit(volatile u64 *ptr, u64 value) { + u32 low = value; + u32 high = value >> 32; + u64 prev = *ptr; + asm volatile("\n1:\t" - "movl (%0), %%eax\n\t" - "movl 4(%0), %%edx\n\t" - LOCK_PREFIX "cmpxchg8b (%0)\n\t" + LOCK_PREFIX "cmpxchg8b %0\n\t" "jnz 1b" - : /* no outputs */ - : "D"(ptr), - "b"(low), - "c"(high) - : "ax", "dx", "memory"); -} - -static inline void __set_64bit_constant(unsigned long long *ptr, - unsigned long long value) -{ - __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); -} - -#define ll_low(x) *(((unsigned int *)&(x)) + 0) -#define ll_high(x) *(((unsigned int *)&(x)) + 1) - -static inline void __set_64bit_var(unsigned long long *ptr, - unsigned long long value) -{ - __set_64bit(ptr, ll_low(value), ll_high(value)); + : "=m" (*ptr), "+A" (prev) + : "b" (low), "c" (high) + : "memory"); } -#define set_64bit(ptr, value) \ - (__builtin_constant_p((value)) \ - ? __set_64bit_constant((ptr), (value)) \ - : __set_64bit_var((ptr), (value))) - -#define _set_64bit(ptr, value) \ - (__builtin_constant_p(value) \ - ? __set_64bit(ptr, (unsigned int)(value), \ - (unsigned int)((value) >> 32)) \ - : __set_64bit(ptr, ll_low((value)), ll_high((value)))) - extern void __cmpxchg_wrong_size(void); /* @@ -121,21 +94,21 @@ extern void __cmpxchg_wrong_size(void); __typeof__(*(ptr)) __new = (new); \ switch (size) { \ case 1: \ - asm volatile(lock "cmpxchgb %b1,%2" \ - : "=a"(__ret) \ - : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgb %b2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "q" (__new), "0" (__old) \ : "memory"); \ break; \ case 2: \ - asm volatile(lock "cmpxchgw %w1,%2" \ - : "=a"(__ret) \ - : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgw %w2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "r" (__new), "0" (__old) \ : "memory"); \ break; \ case 4: \ - asm volatile(lock "cmpxchgl %1,%2" \ - : "=a"(__ret) \ - : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgl %2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "r" (__new), "0" (__old) \ : "memory"); \ break; \ default: \ @@ -180,12 +153,12 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long new) { unsigned long long prev; - asm volatile(LOCK_PREFIX "cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) + asm volatile(LOCK_PREFIX "cmpxchg8b %1" + : "=A" (prev), + "+m" (*__xg(ptr)) + : "b" ((unsigned long)new), + "c" ((unsigned long)(new >> 32)), + "0" (old) : "memory"); return prev; } @@ -195,12 +168,12 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr, unsigned long long new) { unsigned long long prev; - asm volatile("cmpxchg8b %3" - : "=A"(prev) - : "b"((unsigned long)new), - "c"((unsigned long)(new >> 32)), - "m"(*__xg(ptr)), - "0"(old) + asm volatile("cmpxchg8b %1" + : "=A" (prev), + "+m" (*__xg(ptr)) + : "b" ((unsigned long)new), + "c" ((unsigned long)(new >> 32)), + "0" (old) : "memory"); return prev; } diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 485ae41..9596e7c 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -5,13 +5,11 @@ #define __xg(x) ((volatile long *)(x)) -static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) +static inline void set_64bit(volatile u64 *ptr, u64 val) { *ptr = val; } -#define _set_64bit set_64bit - extern void __xchg_wrong_size(void); extern void __cmpxchg_wrong_size(void); @@ -26,26 +24,26 @@ extern void __cmpxchg_wrong_size(void); switch (size) { \ case 1: \ asm volatile("xchgb %b0,%1" \ - : "=q" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=q" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ case 2: \ asm volatile("xchgw %w0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=r" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ case 4: \ asm volatile("xchgl %k0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=r" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ case 8: \ asm volatile("xchgq %0,%1" \ - : "=r" (__x) \ - : "m" (*__xg(ptr)), "0" (__x) \ + : "=r" (__x), "+m" (*__xg(ptr)) \ + : "0" (__x) \ : "memory"); \ break; \ default: \ @@ -71,27 +69,27 @@ extern void __cmpxchg_wrong_size(void); __typeof__(*(ptr)) __new = (new); \ switch (size) { \ case 1: \ - asm volatile(lock "cmpxchgb %b1,%2" \ - : "=a"(__ret) \ - : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgb %b2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "q" (__new), "0" (__old) \ : "memory"); \ break; \ case 2: \ - asm volatile(lock "cmpxchgw %w1,%2" \ - : "=a"(__ret) \ - : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgw %w2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "r" (__new), "0" (__old) \ : "memory"); \ break; \ case 4: \ - asm volatile(lock "cmpxchgl %k1,%2" \ - : "=a"(__ret) \ - : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgl %k2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "r" (__new), "0" (__old) \ : "memory"); \ break; \ case 8: \ - asm volatile(lock "cmpxchgq %1,%2" \ - : "=a"(__ret) \ - : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \ + asm volatile(lock "cmpxchgq %2,%1" \ + : "=a" (__ret), "+m" (*__xg(ptr)) \ + : "r" (__new), "0" (__old) \ : "memory"); \ break; \ default: \ diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index 306160e..1d9cd27 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr) return (u32)(unsigned long)uptr; } -static inline void __user *compat_alloc_user_space(long len) +static inline void __user *arch_compat_alloc_user_space(long len) { struct pt_regs *regs = task_pt_regs(current); return (void __user *)regs->sp - len; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 4681459..14e0ee1 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -150,7 +150,7 @@ #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ -#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ +#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 004e6e2..1d5c08a 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h @@ -68,7 +68,6 @@ extern unsigned long force_hpet_address; extern u8 hpet_blockid; extern int hpet_force_user; extern u8 hpet_msi_disable; -extern u8 hpet_readback_cmp; extern int is_hpet_enabled(void); extern int hpet_enable(void); extern void hpet_disable(void); diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e97..6a45ec4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) extern void iounmap(volatile void __iomem *addr); +extern void set_iounmap_nonlazy(void); #ifdef __KERNEL__ diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 0b2729b..50de7b4 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -143,7 +143,15 @@ struct x86_emulate_ops { struct operand { enum { OP_REG, OP_MEM, OP_IMM, OP_NONE } type; unsigned int bytes; - unsigned long val, orig_val, *ptr; + union { + unsigned long orig_val; + u64 orig_val64; + }; + unsigned long *ptr; + union { + unsigned long val; + u64 val64; + }; }; struct fetch_cache { diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 76f5483..535c373 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -77,7 +77,7 @@ #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) #define KVM_MIN_FREE_MMU_PAGES 5 #define KVM_REFILL_PAGES 25 -#define KVM_MAX_CPUID_ENTRIES 40 +#define KVM_MAX_CPUID_ENTRIES 80 #define KVM_NR_FIXED_MTRR_REGION 88 #define KVM_NR_VAR_MTRR 8 @@ -673,20 +673,6 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) return (struct kvm_mmu_page *)page_private(page); } -static inline u16 kvm_read_fs(void) -{ - u16 seg; - asm("mov %%fs, %0" : "=g"(seg)); - return seg; -} - -static inline u16 kvm_read_gs(void) -{ - u16 seg; - asm("mov %%gs, %0" : "=g"(seg)); - return seg; -} - static inline u16 kvm_read_ldt(void) { u16 ldt; @@ -694,16 +680,6 @@ static inline u16 kvm_read_ldt(void) return ldt; } -static inline void kvm_load_fs(u16 sel) -{ - asm("mov %0, %%fs" : : "rm"(sel)); -} - -static inline void kvm_load_gs(u16 sel) -{ - asm("mov %0, %%gs" : : "rm"(sel)); -} - static inline void kvm_load_ldt(u16 sel) { asm("lldt %0" : : "rm"(sel)); diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4a2d4e0..8b5393e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned cpu = smp_processor_id(); if (likely(prev != next)) { - /* stop flush ipis for the previous mm */ - cpumask_clear_cpu(cpu, mm_cpumask(prev)); #ifdef CONFIG_SMP percpu_write(cpu_tlbstate.state, TLBSTATE_OK); percpu_write(cpu_tlbstate.active_mm, next); @@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, /* Re-load page tables */ load_cr3(next->pgd); + /* stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + /* * load the LDT, if the LDT is different: */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 8c7ae43..0639d9f 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -85,11 +85,15 @@ #define MSR_IA32_MC0_ADDR 0x00000402 #define MSR_IA32_MC0_MISC 0x00000403 +#define MSR_AMD64_MC0_MASK 0xc0010044 + #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) #define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x)) #define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x)) #define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x)) +#define MSR_AMD64_MCx_MASK(x) (MSR_AMD64_MC0_MASK + (x)) + /* These are consecutive and not in the normal 4er MCE bank block */ #define MSR_IA32_MC0_CTL2 0x00000280 #define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x)) diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 177b016..33927d2 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -69,8 +69,6 @@ static inline void native_pmd_clear(pmd_t *pmd) static inline void pud_clear(pud_t *pudp) { - unsigned long pgd; - set_pud(pudp, __pud(0)); /* @@ -79,13 +77,10 @@ static inline void pud_clear(pud_t *pudp) * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... * - * Make sure the pud entry we're updating is within the - * current pgd to avoid unnecessary TLB flushes. + * Currently all places where pud_clear() is called either have + * flush_tlb_mm() followed or don't need TLB flush (x86_64 code or + * pud_clear_bad()), so we don't need TLB flush here. */ - pgd = read_cr3(); - if (__pa(pudp) >= pgd && __pa(pudp) < - (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) - write_cr3(pgd); } #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 2984a25..f686f49 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -26,6 +26,7 @@ struct mm_struct; struct vm_area_struct; extern pgd_t swapper_pg_dir[1024]; +extern pgd_t trampoline_pg_dir[1024]; static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 7e5c6a6..2fe362c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -763,29 +763,6 @@ extern unsigned long boot_option_idle_override; extern unsigned long idle_halt; extern unsigned long idle_nomwait; -/* - * on systems with caches, caches must be flashed as the absolute - * last instruction before going into a suspended halt. Otherwise, - * dirty data can linger in the cache and become stale on resume, - * leading to strange errors. - * - * perform a variety of operations to guarantee that the compiler - * will not reorder instructions. wbinvd itself is serializing - * so the processor will not reorder. - * - * Systems without cache can just go into halt. - */ -static inline void wbinvd_halt(void) -{ - mb(); - /* check for clflush to determine if wbinvd is legal */ - if (cpu_has_clflush) - asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); - else - while (1) - halt(); -} - extern void enable_sep_cpu(void); extern int sysenter_setup(void); @@ -1025,4 +1002,23 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, return ratio; } +/* + * AMD errata checking + */ +#ifdef CONFIG_CPU_SUP_AMD +extern const int amd_erratum_400[]; +extern bool cpu_has_amd_erratum(const int *); + +#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ + ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) + +#else +#define cpu_has_amd_erratum(x) (false) +#endif /* CONFIG_CPU_SUP_AMD */ + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index cd02f32..6226870 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -11,5 +11,6 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); void pvclock_read_wallclock(struct pvclock_wall_clock *wall, struct pvclock_vcpu_time_info *vcpu, struct timespec *ts); +void pvclock_resume(void); #endif /* _ASM_X86_PVCLOCK_H */ diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4cfc908..4c2f63c 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -50,7 +50,7 @@ struct smp_ops { void (*smp_prepare_cpus)(unsigned max_cpus); void (*smp_cpus_done)(unsigned max_cpus); - void (*smp_send_stop)(void); + void (*stop_other_cpus)(int wait); void (*smp_send_reschedule)(int cpu); int (*cpu_up)(unsigned cpu); @@ -73,7 +73,12 @@ extern struct smp_ops smp_ops; static inline void smp_send_stop(void) { - smp_ops.smp_send_stop(); + smp_ops.stop_other_cpus(0); +} + +static inline void stop_other_cpus(void) +{ + smp_ops.stop_other_cpus(1); } static inline void smp_prepare_boot_cpu(void) diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 1def601..cfdc6c8 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h @@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) */ CMOS_WRITE(0, 0xf); - *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; + *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; } static inline void __init smpboot_setup_io_apic(void) diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h index cb507bb..4dde797 100644 --- a/arch/x86/include/asm/trampoline.h +++ b/arch/x86/include/asm/trampoline.h @@ -13,14 +13,17 @@ extern unsigned char *trampoline_base; extern unsigned long init_rsp; extern unsigned long initial_code; +extern unsigned long initial_page_table; extern unsigned long initial_gs; #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE) extern unsigned long setup_trampoline(void); +extern void __init setup_trampoline_page_table(void); extern void __init reserve_trampoline_memory(void); #else -static inline void reserve_trampoline_memory(void) {}; +static inline void setup_trampoline_page_table(void) {} +static inline void reserve_trampoline_memory(void) {} #endif /* CONFIG_X86_TRAMPOLINE */ #endif /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index c042729..1ca132f 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu); extern void check_tsc_sync_target(void); extern int notsc_setup(char *); +extern void save_sched_clock_state(void); +extern void restore_sched_clock_state(void); #endif /* _ASM_X86_TSC_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index e77b220..0357c51 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -11,6 +11,8 @@ ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_tsc.o = -pg CFLAGS_REMOVE_rtc.o = -pg CFLAGS_REMOVE_paravirt-spinlocks.o = -pg +CFLAGS_REMOVE_pvclock.o = -pg +CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg endif diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index c05872a..510bc6d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; int acpi_sci_override_gsi __initdata; int acpi_skip_timer_override __initdata; int acpi_use_timer_override __initdata; +int acpi_fix_pin2_polarity __initdata; #ifdef CONFIG_X86_LOCAL_APIC static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; @@ -410,10 +411,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, return 0; } - if (acpi_skip_timer_override && - intsrc->source_irq == 0 && intsrc->global_irq == 2) { - printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); - return 0; + if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { + if (acpi_skip_timer_override) { + printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); + return 0; + } + if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { + intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; + printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); + } } mp_override_legacy_irq(intsrc->source_irq, diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 0d20286..4424c73 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c @@ -1953,6 +1953,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, size_t size, int dir) { + dma_addr_t flush_addr; dma_addr_t i, start; unsigned int pages; @@ -1960,6 +1961,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, (dma_addr + size > dma_dom->aperture_size)) return; + flush_addr = dma_addr; pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); dma_addr &= PAGE_MASK; start = dma_addr; @@ -1974,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, dma_ops_free_addresses(dma_dom, dma_addr, pages); if (amd_iommu_unmap_flush || dma_dom->need_flush) { - iommu_flush_pages(&dma_dom->domain, dma_addr, size); + iommu_flush_pages(&dma_dom->domain, flush_addr, size); dma_dom->need_flush = false; } } diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 3cc63e2..5a170cb 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c @@ -632,6 +632,13 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) iommu->last_device = calc_devid(MMIO_GET_BUS(range), MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); + + if (is_rd890_iommu(iommu->dev)) { + pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); + pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); + pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); + pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); + } } /* @@ -649,29 +656,9 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_entry *e; /* - * First set the recommended feature enable bits from ACPI - * into the IOMMU control registers + * First save the recommended feature enable bits from ACPI */ - h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); - - h->flags & IVHD_FLAG_PASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); - - h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); - - h->flags & IVHD_FLAG_ISOC_EN_MASK ? - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : - iommu_feature_disable(iommu, CONTROL_ISOC_EN); - - /* - * make IOMMU memory accesses cache coherent - */ - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); + iommu->acpi_flags = h->flags; /* * Done. Now parse the device entries @@ -1116,6 +1103,40 @@ static void init_device_table(void) } } +static void iommu_init_flags(struct amd_iommu *iommu) +{ + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); + + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : + iommu_feature_disable(iommu, CONTROL_PASSPW_EN); + + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); + + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? + iommu_feature_enable(iommu, CONTROL_ISOC_EN) : + iommu_feature_disable(iommu, CONTROL_ISOC_EN); + + /* + * make IOMMU memory accesses cache coherent + */ + iommu_feature_enable(iommu, CONTROL_COHERENT_EN); +} + +static void iommu_apply_quirks(struct amd_iommu *iommu) +{ + if (is_rd890_iommu(iommu->dev)) { + pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); + pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); + pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); + pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); + } +} + /* * This function finally enables all IOMMUs found in the system after * they have been initialized @@ -1126,6 +1147,8 @@ static void enable_iommus(void) for_each_iommu(iommu) { iommu_disable(iommu); + iommu_apply_quirks(iommu); + iommu_init_flags(iommu); iommu_set_device_table(iommu); iommu_enable_command_buffer(iommu); iommu_enable_event_buffer(iommu); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a96489e..6583884 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1340,6 +1340,14 @@ void __cpuinit end_local_APIC_setup(void) setup_apic_nmi_watchdog(NULL); apic_pm_activate(); + + /* + * Now that local APIC setup is completed for BP, configure the fault + * handling for interrupt remapping. + */ + if (!smp_processor_id() && intr_remapping_enabled) + enable_drhd_fault_handling(); + } #ifdef CONFIG_X86_X2APIC @@ -1606,7 +1614,7 @@ void __init init_apic_mappings(void) * acpi lapic path already maps that address in * acpi_register_lapic_address() */ - if (!acpi_lapic) + if (!acpi_lapic && !smp_found_config) set_fixmap_nocache(FIX_APIC_BASE, apic_phys); apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e41ed24..4d90327 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -306,14 +306,19 @@ void arch_init_copy_chip_data(struct irq_desc *old_desc, old_cfg = old_desc->chip_data; - memcpy(cfg, old_cfg, sizeof(struct irq_cfg)); + cfg->vector = old_cfg->vector; + cfg->move_in_progress = old_cfg->move_in_progress; + cpumask_copy(cfg->domain, old_cfg->domain); + cpumask_copy(cfg->old_domain, old_cfg->old_domain); init_copy_irq_2_pin(old_cfg, cfg, node); } -static void free_irq_cfg(struct irq_cfg *old_cfg) +static void free_irq_cfg(struct irq_cfg *cfg) { - kfree(old_cfg); + free_cpumask_var(cfg->domain); + free_cpumask_var(cfg->old_domain); + kfree(cfg); } void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) @@ -1392,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int irq, irte.dlvry_mode = apic->irq_delivery_mode; irte.vector = vector; irte.dest_id = IRTE_DEST(destination); + irte.redir_hint = 1; /* Set source-id of interrupt request */ set_ioapic_sid(&irte, apic_id); @@ -1728,6 +1734,8 @@ __apicdebuginit(void) print_IO_APIC(void) struct irq_pin_list *entry; cfg = desc->chip_data; + if (!cfg) + continue; entry = cfg->irq_2_pin; if (!entry) continue; @@ -3341,6 +3349,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, irte.dlvry_mode = apic->irq_delivery_mode; irte.vector = cfg->vector; irte.dest_id = IRTE_DEST(dest); + irte.redir_hint = 1; /* Set source-id of interrupt request */ if (pdev) @@ -3397,7 +3406,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask) cfg = desc->chip_data; - read_msi_msg_desc(desc, &msg); + get_cached_msi_msg_desc(desc, &msg); msg.data &= ~MSI_DATA_VECTOR_MASK; msg.data |= MSI_DATA_VECTOR(cfg->vector); @@ -3617,6 +3626,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) msg.data |= MSI_DATA_VECTOR(cfg->vector); msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; msg.address_lo |= MSI_ADDR_DEST_ID(dest); + msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); dmar_msi_write(irq, &msg); diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 83e9be4..fac49a8 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c @@ -76,13 +76,6 @@ void __init default_setup_apic_routing(void) /* need to update phys_pkg_id */ apic->phys_pkg_id = apicid_phys_pkg_id; } - - /* - * Now that apic routing model is selected, configure the - * fault handling for intr remapping. - */ - if (intr_remapping_enabled) - enable_drhd_fault_handling(); } /* Same for both flat and physical. */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e485825..b203d0d 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -305,8 +305,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) /* use socket ID also for last level cache */ per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; /* fixup topology information on multi-node processors */ - if ((c->x86 == 0x10) && (c->x86_model == 9)) - amd_fixup_dcm(c); + amd_fixup_dcm(c); #endif } @@ -565,6 +564,29 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } #endif + + /* As a rule processors have APIC timer running in deep C states */ + if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400)) + set_cpu_cap(c, X86_FEATURE_ARAT); + + /* + * Disable GART TLB Walk Errors on Fam10h. We do this here + * because this is always needed when GART is enabled, even in a + * kernel which has no MCE support built in. + */ + if (c->x86 == 0x10) { + /* + * BIOS should disable GartTlbWlk Errors themself. If + * it doesn't do it here as suggested by the BKDG. + * + * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 + */ + u64 mask; + + rdmsrl(MSR_AMD64_MCx_MASK(4), mask); + mask |= (1 << 10); + wrmsrl(MSR_AMD64_MCx_MASK(4), mask); + } } #ifdef CONFIG_X86_32 @@ -609,3 +631,68 @@ static const struct cpu_dev __cpuinitconst amd_cpu_dev = { }; cpu_dev_register(amd_cpu_dev); + +/* + * AMD errata checking + * + * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or + * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that + * have an OSVW id assigned, which it takes as first argument. Both take a + * variable number of family-specific model-stepping ranges created by + * AMD_MODEL_RANGE(). Each erratum also has to be declared as extern const + * int[] in arch/x86/include/asm/processor.h. + * + * Example: + * + * const int amd_erratum_319[] = + * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), + * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), + * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); + */ + +const int amd_erratum_400[] = + AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), + AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); + + +bool cpu_has_amd_erratum(const int *erratum) +{ + struct cpuinfo_x86 *cpu = ¤t_cpu_data; + int osvw_id = *erratum++; + u32 range; + u32 ms; + + /* + * If called early enough that current_cpu_data hasn't been initialized + * yet, fall back to boot_cpu_data. + */ + if (cpu->x86 == 0) + cpu = &boot_cpu_data; + + if (cpu->x86_vendor != X86_VENDOR_AMD) + return false; + + if (osvw_id >= 0 && osvw_id < 65536 && + cpu_has(cpu, X86_FEATURE_OSVW)) { + u64 osvw_len; + + rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); + if (osvw_id < osvw_len) { + u64 osvw_bits; + + rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), + osvw_bits); + return osvw_bits & (1ULL << (osvw_id & 0x3f)); + } + } + + /* OSVW unavailable or ID unknown, match family-model-stepping range */ + ms = (cpu->x86_model << 4) | cpu->x86_mask; + while ((range = *erratum++)) + if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && + (ms >= AMD_MODEL_RANGE_START(range)) && + (ms <= AMD_MODEL_RANGE_END(range))) + return true; + + return false; +} diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 68e4a6f..d938871 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -537,7 +537,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) } } -static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) +void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) { u32 tfms, xlvl; u32 ebx; @@ -576,6 +576,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) if (c->extended_cpuid_level >= 0x80000007) c->x86_power = cpuid_edx(0x80000007); + init_scattered_cpuid_features(c); } static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) @@ -731,7 +732,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) get_model_name(c); /* Default name */ - init_scattered_cpuid_features(c); detect_nopl(c); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 3624e8a..f668bb1 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -33,5 +33,6 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[], *const __x86_cpu_dev_end[]; extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c); +extern void get_cpu_cap(struct cpuinfo_x86 *c); #endif diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 1d3cdda..5384b04 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c @@ -704,6 +704,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) per_cpu(acfreq_data, policy->cpu) = NULL; acpi_processor_unregister_performance(data->acpi_data, policy->cpu); + kfree(data->freq_table); kfree(data); } diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 85f69cd..b438944 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -39,6 +39,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); c->cpuid_level = cpuid_eax(0); + get_cpu_cap(c); } } diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 224392d..f80ff85 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -141,6 +141,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) address = (low & MASK_BLKPTR_LO) >> 21; if (!address) break; + address += MCG_XBLK_ADDR; } else ++address; @@ -148,12 +149,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) if (rdmsr_safe(address, &low, &high)) break; - if (!(high & MASK_VALID_HI)) { - if (block) - continue; - else - break; - } + if (!(high & MASK_VALID_HI)) + continue; if (!(high & MASK_CNTP_HI) || (high & MASK_LOCKED_HI)) diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 06130b5..a670384 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c @@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return 0; - if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) + if (boot_cpu_data.x86 < 0xf) return 0; /* In case some hypervisor doesn't pass SYSCFG through: */ if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 79556bd..151787e 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -35,6 +35,7 @@ #include /* FIXME: kvm_para.h needs this */ +#include #include #include #include @@ -143,22 +144,28 @@ struct set_mtrr_data { mtrr_type smp_type; }; +static DEFINE_PER_CPU(struct cpu_stop_work, mtrr_work); + /** - * ipi_handler - Synchronisation handler. Executed by "other" CPUs. + * mtrr_work_handler - Synchronisation handler. Executed by "other" CPUs. * @info: pointer to mtrr configuration data * * Returns nothing. */ -static void ipi_handler(void *info) +static int mtrr_work_handler(void *info) { #ifdef CONFIG_SMP struct set_mtrr_data *data = info; unsigned long flags; + atomic_dec(&data->count); + while (!atomic_read(&data->gate)) + cpu_relax(); + local_irq_save(flags); atomic_dec(&data->count); - while (!atomic_read(&data->gate)) + while (atomic_read(&data->gate)) cpu_relax(); /* The master has cleared me to execute */ @@ -173,12 +180,13 @@ static void ipi_handler(void *info) } atomic_dec(&data->count); - while (atomic_read(&data->gate)) + while (!atomic_read(&data->gate)) cpu_relax(); atomic_dec(&data->count); local_irq_restore(flags); #endif + return 0; } static inline int types_compatible(mtrr_type type1, mtrr_type type2) @@ -198,7 +206,7 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) * * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: * - * 1. Send IPI to do the following: + * 1. Queue work to do the following on all processors: * 2. Disable Interrupts * 3. Wait for all procs to do so * 4. Enter no-fill cache mode @@ -215,14 +223,17 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) * 15. Enable interrupts. * * What does that mean for us? Well, first we set data.count to the number - * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait - * until it hits 0 and proceed. We set the data.gate flag and reset data.count. - * Meanwhile, they are waiting for that flag to be set. Once it's set, each + * of CPUs. As each CPU announces that it started the rendezvous handler by + * decrementing the count, We reset data.count and set the data.gate flag + * allowing all the cpu's to proceed with the work. As each cpu disables + * interrupts, it'll decrement data.count once. We wait until it hits 0 and + * proceed. We clear the data.gate flag and reset data.count. Meanwhile, they + * are waiting for that flag to be cleared. Once it's cleared, each * CPU goes through the transition of updating MTRRs. * The CPU vendors may each do it differently, * so we call mtrr_if->set() callback and let them take care of it. * When they're done, they again decrement data->count and wait for data.gate - * to be reset. + * to be set. * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag * Everyone then enables interrupts and we all continue on. * @@ -234,6 +245,9 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ { struct set_mtrr_data data; unsigned long flags; + int cpu; + + preempt_disable(); data.smp_reg = reg; data.smp_base = base; @@ -246,10 +260,15 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ atomic_set(&data.gate, 0); /* Start the ball rolling on other CPUs */ - if (smp_call_function(ipi_handler, &data, 0) != 0) - panic("mtrr: timed out waiting for other CPUs\n"); + for_each_online_cpu(cpu) { + struct cpu_stop_work *work = &per_cpu(mtrr_work, cpu); + + if (cpu == smp_processor_id()) + continue; + + stop_one_cpu_nowait(cpu, mtrr_work_handler, &data, work); + } - local_irq_save(flags); while (atomic_read(&data.count)) cpu_relax(); @@ -259,18 +278,38 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ smp_wmb(); atomic_set(&data.gate, 1); + local_irq_save(flags); + + while (atomic_read(&data.count)) + cpu_relax(); + + /* Ok, reset count and toggle gate */ + atomic_set(&data.count, num_booting_cpus() - 1); + smp_wmb(); + atomic_set(&data.gate, 0); + /* Do our MTRR business */ /* * HACK! - * We use this same function to initialize the mtrrs on boot. - * The state of the boot cpu's mtrrs has been saved, and we want - * to replicate across all the APs. - * If we're doing that @reg is set to something special... + * + * We use this same function to initialize the mtrrs during boot, + * resume, runtime cpu online and on an explicit request to set a + * specific MTRR. + * + * During boot or suspend, the state of the boot cpu's mtrrs has been + * saved, and we want to replicate that across all the cpus that come + * online (either at the end of boot or resume or during a runtime cpu + * online). If we're doing that, @reg is set to something special and on + * this cpu we still do mtrr_if->set_all(). During boot/resume, this + * is unnecessary if at this point we are still on the cpu that started + * the boot/resume sequence. But there is no guarantee that we are still + * on the same cpu. So we do mtrr_if->set_all() on this cpu aswell to be + * sure that we are in sync with everyone else. */ if (reg != ~0U) mtrr_if->set(reg, base, size, type); - else if (!mtrr_aps_delayed_init) + else mtrr_if->set_all(); /* Wait for the others */ @@ -279,7 +318,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ atomic_set(&data.count, num_booting_cpus() - 1); smp_wmb(); - atomic_set(&data.gate, 0); + atomic_set(&data.gate, 1); /* * Wait here for everyone to have seen the gate change @@ -289,6 +328,7 @@ set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type typ cpu_relax(); local_irq_restore(flags); + preempt_enable(); } /** @@ -763,13 +803,21 @@ void set_mtrr_aps_delayed_init(void) } /* - * MTRR initialization for all AP's + * Delayed MTRR initialization for all AP's */ void mtrr_aps_init(void) { if (!use_intel()) return; + /* + * Check if someone has requested the delay of AP MTRR initialization, + * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, + * then we are done. + */ + if (!mtrr_aps_delayed_init) + return; + set_mtrr(~0U, 0, 0, 0); mtrr_aps_delayed_init = false; } diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c2897b7..46d5844 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c @@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids [ C(DTLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ - [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ + [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = 0, @@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids [ C(ITLB) ] = { [ C(OP_READ) ] = { [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ - [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ + [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ }, [ C(OP_WRITE) ] = { [ C(RESULT_ACCESS) ] = -1, diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 214ac86..d8d86d0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added) * Intel Errata AAP53 (model 30) * Intel Errata BD53 (model 44) * - * These chips need to be 'reset' when adding counters by programming - * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 - * either in sequence on the same PMC or on different PMCs. + * The official story: + * These chips need to be 'reset' when adding counters by programming the + * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either + * in sequence on the same PMC or on different PMCs. + * + * In practise it appears some of these events do in fact count, and + * we need to programm all 4 events. */ -static void intel_pmu_nhm_enable_all(int added) +static void intel_pmu_nhm_workaround(void) { - if (added) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - int i; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + static const unsigned long nhm_magic[4] = { + 0x4300B5, + 0x4300D2, + 0x4300B1, + 0x4300B1 + }; + struct perf_event *event; + int i; + + /* + * The Errata requires below steps: + * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; + * 2) Configure 4 PERFEVTSELx with the magic events and clear + * the corresponding PMCx; + * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; + * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; + * 5) Clear 4 pairs of ERFEVTSELx and PMCx; + */ + + /* + * The real steps we choose are a little different from above. + * A) To reduce MSR operations, we don't run step 1) as they + * are already cleared before this function is called; + * B) Call x86_perf_event_update to save PMCx before configuring + * PERFEVTSELx with magic number; + * C) With step 5), we do clear only when the PERFEVTSELx is + * not used currently. + * D) Call x86_perf_event_set_period to restore PMCx; + */ - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); + /* We always operate 4 pairs of PERF Counters */ + for (i = 0; i < 4; i++) { + event = cpuc->events[i]; + if (event) + x86_perf_event_update(event); + } - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); + for (i = 0; i < 4; i++) { + wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); + wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); + } - for (i = 0; i < 3; i++) { - struct perf_event *event = cpuc->events[i]; + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); - if (!event) - continue; + for (i = 0; i < 4; i++) { + event = cpuc->events[i]; + if (event) { + x86_perf_event_set_period(event); __x86_pmu_enable_event(&event->hw, - ARCH_PERFMON_EVENTSEL_ENABLE); - } + ARCH_PERFMON_EVENTSEL_ENABLE); + } else + wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); } +} + +static void intel_pmu_nhm_enable_all(int added) +{ + if (added) + intel_pmu_nhm_workaround(); intel_pmu_enable_all(added); } diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ae85d69..a187365 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -457,6 +457,8 @@ static int p4_hw_config(struct perf_event *event) event->hw.config |= event->attr.config & (p4_config_pack_escr(P4_ESCR_MASK_HT) | p4_config_pack_cccr(P4_CCCR_MASK_HT)); + + event->hw.config &= ~P4_CCCR_FORCE_OVF; } rc = x86_setup_perfctr(event); @@ -581,6 +583,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) cpuc = &__get_cpu_var(cpu_hw_events); for (idx = 0; idx < x86_pmu.num_counters; idx++) { + int overflow; if (!test_bit(idx, cpuc->active_mask)) continue; @@ -591,12 +594,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) WARN_ON_ONCE(hwc->idx != idx); /* it might be unflagged overflow */ - handled = p4_pmu_clear_cccr_ovf(hwc); + overflow = p4_pmu_clear_cccr_ovf(hwc); val = x86_perf_event_update(event); - if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) + if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) continue; + handled += overflow; + /* event overflow for sure */ data.period = event->hw.last_period; @@ -612,7 +617,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) inc_irq_stat(apic_perf_irqs); } - return handled; + return handled > 0; } /* diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index b9d1ff5..ce9c6c2 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -51,7 +52,7 @@ static inline int __vmware_platform(void) static unsigned long vmware_get_tsc_khz(void) { - uint64_t tsc_hz; + uint64_t tsc_hz, lpj; uint32_t eax, ebx, ecx, edx; VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); @@ -62,6 +63,13 @@ static unsigned long vmware_get_tsc_khz(void) printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n", (unsigned long) tsc_hz / 1000, (unsigned long) tsc_hz % 1000); + + if (!preset_lpj) { + lpj = ((u64)tsc_hz * 1000); + do_div(lpj, HZ); + preset_lpj = lpj; + } + return tsc_hz; } diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 045b36c..9948288 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c @@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, if (!csize) return 0; - vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); if (!vaddr) return -ENOMEM; @@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, } else memcpy(buf, vaddr + offset, csize); + set_iounmap_nonlazy(); iounmap(vaddr); return csize; } diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 0d6fc71..28b09af 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -980,15 +980,21 @@ static int __init parse_memopt(char *p) if (!p) return -EINVAL; -#ifdef CONFIG_X86_32 if (!strcmp(p, "nopentium")) { +#ifdef CONFIG_X86_32 setup_clear_cpu_cap(X86_FEATURE_PSE); return 0; - } +#else + printk(KERN_WARNING "mem=nopentium ignored! (only supported on x86_32)\n"); + return -EINVAL; #endif + } userdef = 1; mem_size = memparse(p, &p); + /* don't remove all of memory when handling "mem={invalid}" param */ + if (mem_size == 0) + return -EINVAL; e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1); return 0; diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index e5cc7e8..f67a33c7 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -18,7 +18,6 @@ #include #include #include -#include static void __init fix_hypertransport_config(int num, int slot, int func) { @@ -146,15 +145,10 @@ static void __init ati_bugs(int num, int slot, int func) static u32 __init ati_sbx00_rev(int num, int slot, int func) { - u32 old, d; + u32 d; - d = read_pci_config(num, slot, func, 0x70); - old = d; - d &= ~(1<<8); - write_pci_config(num, slot, func, 0x70, d); d = read_pci_config(num, slot, func, 0x8); d &= 0xff; - write_pci_config(num, slot, func, 0x70, old); return d; } @@ -163,11 +157,19 @@ static void __init ati_bugs_contd(int num, int slot, int func) { u32 d, rev; - if (acpi_use_timer_override) + rev = ati_sbx00_rev(num, slot, func); + if (rev >= 0x40) + acpi_fix_pin2_polarity = 1; + + /* + * SB600: revisions 0x11, 0x12, 0x13, 0x14, ... + * SB700: revisions 0x39, 0x3a, ... + * SB800: revisions 0x40, 0x41, ... + */ + if (rev >= 0x39) return; - rev = ati_sbx00_rev(num, slot, func); - if (rev > 0x13) + if (acpi_use_timer_override) return; /* check for IRQ0 interrupt swap */ @@ -192,21 +194,6 @@ static void __init ati_bugs_contd(int num, int slot, int func) } #endif -/* - * Force the read back of the CMP register in hpet_next_event() - * to work around the problem that the CMP register write seems to be - * delayed. See hpet_next_event() for details. - * - * We do this on all SMBUS incarnations for now until we have more - * information about the affected chipsets. - */ -static void __init ati_hpet_bugs(int num, int slot, int func) -{ -#ifdef CONFIG_HPET_TIMER - hpet_readback_cmp = 1; -#endif -} - #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) @@ -236,8 +223,6 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs }, { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, - { PCI_VENDOR_ID_ATI, PCI_ANY_ID, - PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_hpet_bugs }, {} }; diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 4db7c4d..2642cf9 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1268,7 +1268,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) decl PER_CPU_VAR(irq_count) jmp error_exit CFI_ENDPROC -END(do_hypervisor_callback) +END(xen_do_hypervisor_callback) /* * Hypervisor uses this for application faults while it executes. diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 37c3d4b..75e3981 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -328,7 +328,7 @@ ENTRY(startup_32_smp) /* * Enable paging */ - movl $pa(swapper_pg_dir),%eax + movl pa(initial_page_table), %eax movl %eax,%cr3 /* set the page table pointer.. */ movl %cr0,%eax orl $X86_CR0_PG,%eax @@ -608,6 +608,8 @@ ignore_int: .align 4 ENTRY(initial_code) .long i386_start_kernel +ENTRY(initial_page_table) + .long pa(swapper_pg_dir) /* * BSS section @@ -623,6 +625,10 @@ ENTRY(swapper_pg_dir) #endif swapper_pg_fixmap: .fill 1024,4,0 +#ifdef CONFIG_X86_TRAMPOLINE +ENTRY(trampoline_pg_dir) + .fill 1024,4,0 +#endif ENTRY(empty_zero_page) .fill 4096,1,0 diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ba390d7..917c66f 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -36,7 +36,6 @@ unsigned long hpet_address; u8 hpet_blockid; /* OS timer block num */ u8 hpet_msi_disable; -u8 hpet_readback_cmp; #ifdef CONFIG_PCI_MSI static unsigned long hpet_num_timers; @@ -396,23 +395,27 @@ static int hpet_next_event(unsigned long delta, * at that point and we would wait for the next hpet interrupt * forever. We found out that reading the CMP register back * forces the transfer so we can rely on the comparison with - * the counter register below. + * the counter register below. If the read back from the + * compare register does not match the value we programmed + * then we might have a real hardware problem. We can not do + * much about it here, but at least alert the user/admin with + * a prominent warning. * - * That works fine on those ATI chipsets, but on newer Intel - * chipsets (ICH9...) this triggers due to an erratum: Reading - * the comparator immediately following a write is returning - * the old value. + * An erratum on some chipsets (ICH9,..), results in + * comparator read immediately following a write returning old + * value. Workaround for this is to read this value second + * time, when first read returns old value. * - * We restrict the read back to the affected ATI chipsets (set - * by quirks) and also run it with hpet=verbose for debugging - * purposes. + * In fact the write to the comparator register is delayed up + * to two HPET cycles so the workaround we tried to restrict + * the readback to those known to be borked ATI chipsets + * failed miserably. So we give up on optimizations forever + * and penalize all HPET incarnations unconditionally. */ - if (hpet_readback_cmp || hpet_verbose) { - u32 cmp = hpet_readl(HPET_Tn_CMP(timer)); - - if (cmp != cnt) + if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { + if (hpet_readl(HPET_Tn_CMP(timer)) != cnt) printk_once(KERN_WARNING - "hpet: compare register read back failed.\n"); + "hpet: compare register read back failed.\n"); } return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; @@ -504,7 +507,7 @@ static int hpet_assign_irq(struct hpet_dev *dev) { unsigned int irq; - irq = create_irq(); + irq = create_irq_nr(0, -1); if (!irq) return -EINVAL; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index a8f1b80..f365470 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -421,6 +421,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) dr6_p = (unsigned long *)ERR_PTR(args->err); dr6 = *dr6_p; + /* If it's a single step, TRAP bits are random */ + if (dr6 & DR_STEP) + return NOTIFY_DONE; + /* Do an early return if no trap bits are set in DR6 */ if ((dr6 & DR_TRAP_BITS) == 0) return NOTIFY_DONE; diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index e1af7c0..67381a2 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -66,7 +66,6 @@ struct microcode_amd { unsigned int mpb[0]; }; -#define UCODE_MAX_SIZE 2048 #define UCODE_CONTAINER_SECTION_HDR 8 #define UCODE_CONTAINER_HEADER_SIZE 12 @@ -125,6 +124,37 @@ static int get_matching_microcode(int cpu, void *mc, int rev) return 1; } +static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) +{ + struct cpuinfo_x86 *c = &cpu_data(cpu); + unsigned int max_size, actual_size; + +#define F1XH_MPB_MAX_SIZE 2048 +#define F14H_MPB_MAX_SIZE 1824 +#define F15H_MPB_MAX_SIZE 4096 + + switch (c->x86) { + case 0x14: + max_size = F14H_MPB_MAX_SIZE; + break; + case 0x15: + max_size = F15H_MPB_MAX_SIZE; + break; + default: + max_size = F1XH_MPB_MAX_SIZE; + break; + } + + actual_size = buf[4] + (buf[5] << 8); + + if (actual_size > size || actual_size > max_size) { + pr_err("section size mismatch\n"); + return 0; + } + + return actual_size; +} + static int apply_microcode_amd(int cpu) { u32 rev, dummy; @@ -162,11 +192,11 @@ static int get_ucode_data(void *to, const u8 *from, size_t n) } static void * -get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) +get_next_ucode(int cpu, const u8 *buf, unsigned int size, unsigned int *mc_size) { - unsigned int total_size; + unsigned int actual_size = 0; u8 section_hdr[UCODE_CONTAINER_SECTION_HDR]; - void *mc; + void *mc = NULL; if (get_ucode_data(section_hdr, buf, UCODE_CONTAINER_SECTION_HDR)) return NULL; @@ -176,23 +206,18 @@ get_next_ucode(const u8 *buf, unsigned int size, unsigned int *mc_size) return NULL; } - total_size = (unsigned long) (section_hdr[4] + (section_hdr[5] << 8)); - - if (total_size > size || total_size > UCODE_MAX_SIZE) { - pr_err("error: size mismatch\n"); + actual_size = verify_ucode_size(cpu, buf, size); + if (!actual_size) return NULL; - } - mc = vmalloc(UCODE_MAX_SIZE); - if (mc) { - memset(mc, 0, UCODE_MAX_SIZE); - if (get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, - total_size)) { - vfree(mc); - mc = NULL; - } else - *mc_size = total_size + UCODE_CONTAINER_SECTION_HDR; - } + mc = vmalloc(actual_size); + if (!mc) + return NULL; + + memset(mc, 0, actual_size); + get_ucode_data(mc, buf + UCODE_CONTAINER_SECTION_HDR, actual_size); + *mc_size = actual_size + UCODE_CONTAINER_SECTION_HDR; + return mc; } @@ -258,7 +283,7 @@ generic_load_microcode(int cpu, const u8 *data, size_t size) unsigned int uninitialized_var(mc_size); struct microcode_header_amd *mc_header; - mc = get_next_ucode(ucode_ptr, leftover, &mc_size); + mc = get_next_ucode(cpu, ucode_ptr, leftover, &mc_size); if (!mc) break; diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 3561702..2573689 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c @@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { - if (mc) - vfree(mc); + vfree(mc); mc = vmalloc(mc_size); if (!mc) break; @@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, if (get_ucode_data(mc, ucode_ptr, mc_size) || microcode_sanity_check(mc) < 0) { - vfree(mc); break; } if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { - if (new_mc) - vfree(new_mc); + vfree(new_mc); new_rev = mc_header.rev; new_mc = mc; mc = NULL; /* trigger new vmalloc */ @@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, leftover -= mc_size; } - if (mc) - vfree(mc); + vfree(mc); if (leftover) { - if (new_mc) - vfree(new_mc); + vfree(new_mc); state = UCODE_ERROR; goto out; } @@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, goto out; } - if (uci->mc) - vfree(uci->mc); + vfree(uci->mc); uci->mc = (struct microcode_intel *)new_mc; pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index d86dbf7..d7b6f7f 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c @@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } +static void __init smp_register_lapic_address(unsigned long address) +{ + mp_lapic_addr = address; + + set_fixmap_nocache(FIX_APIC_BASE, address); + if (boot_cpu_physical_apicid == -1U) { + boot_cpu_physical_apicid = read_apic_id(); + apic_version[boot_cpu_physical_apicid] = + GET_APIC_VERSION(apic_read(APIC_LVR)); + } +} + static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) { char str[16]; @@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) if (early) return 1; + /* Initialize the lapic mapping */ + if (!acpi_lapic) + smp_register_lapic_address(mpc->lapic); + if (mpc->oemptr) x86_init.mpparse.smp_read_mpc_oem(mpc); diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 8297160..a23b382 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c @@ -117,6 +117,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, unsigned long flags; int ret = -EIO; int i; + int restarts = 0; spin_lock_irqsave(&ec_lock, flags); @@ -173,7 +174,9 @@ restart: if (wait_on_obf(0x6c, 1)) { printk(KERN_ERR "olpc-ec: timeout waiting for" " EC to provide data!\n"); - goto restart; + if (restarts++ < 10) + goto restart; + goto err; } outbuf[i] = inb(0x68); printk(KERN_DEBUG "olpc-ec: received 0x%x\n", diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 0f7f130..870e069 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c @@ -80,6 +80,9 @@ static u32 gart_unmapped_entry; #define AGPEXTERN #endif +/* GART can only remap to physical addresses < 1TB */ +#define GART_MAX_PHYS_ADDR (1ULL << 40) + /* backdoor interface to AGP driver */ AGPEXTERN int agp_memory_reserved; AGPEXTERN __u32 *agp_gatt_table; @@ -211,9 +214,13 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, size_t size, int dir, unsigned long align_mask) { unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE); - unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); + unsigned long iommu_page; int i; + if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR)) + return bad_dma_addr; + + iommu_page = alloc_iommu(dev, npages, align_mask); if (iommu_page == -1) { if (!nonforced_iommu(dev, phys_mem, size)) return phys_mem; diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e7e3521..553b02f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -525,42 +525,6 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) return (edx & MWAIT_EDX_C1); } -/* - * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e. - * For more information see - * - Erratum #400 for NPT family 0xf and family 0x10 CPUs - * - Erratum #365 for family 0x11 (not affected because C1e not in use) - */ -static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) -{ - u64 val; - if (c->x86_vendor != X86_VENDOR_AMD) - goto no_c1e_idle; - - /* Family 0x0f models < rev F do not have C1E */ - if (c->x86 == 0x0F && c->x86_model >= 0x40) - return 1; - - if (c->x86 == 0x10) { - /* - * check OSVW bit for CPUs that are not affected - * by erratum #400 - */ - if (cpu_has(c, X86_FEATURE_OSVW)) { - rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); - if (val >= 2) { - rdmsrl(MSR_AMD64_OSVW_STATUS, val); - if (!(val & BIT(1))) - goto no_c1e_idle; - } - } - return 1; - } - -no_c1e_idle: - return 0; -} - static cpumask_var_t c1e_mask; static int c1e_detected; @@ -638,7 +602,8 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) */ printk(KERN_INFO "using mwait in idle threads.\n"); pm_idle = mwait_idle; - } else if (check_c1e_idle(c)) { + } else if (cpu_has_amd_erratum(amd_erratum_400)) { + /* E400: APIC timer interrupt does not wake up CPU from C1e */ printk(KERN_INFO "using C1E aware idle routine\n"); pm_idle = c1e_idle; } else diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 239427c..a4f07c1 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -120,6 +120,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) static atomic64_t last_value = ATOMIC64_INIT(0); +void pvclock_resume(void) +{ + atomic64_set(&last_value, 0); +} + cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) { struct pvclock_shadow_time shadow; diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e3af342..76a0d71 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -641,7 +641,7 @@ void native_machine_shutdown(void) /* O.K Now that I'm on the appropriate processor, * stop all of the others. */ - smp_send_stop(); + stop_other_cpus(); #endif lapic_shutdown(); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b4ae4ac..6600cfd 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1008,6 +1008,8 @@ void __init setup_arch(char **cmdline_p) paging_init(); x86_init.paging.pagetable_setup_done(swapper_pg_dir); + setup_trampoline_page_table(); + tboot_probe(); #ifdef CONFIG_X86_64 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d801210..513deac 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -159,10 +159,10 @@ asmlinkage void smp_reboot_interrupt(void) irq_exit(); } -static void native_smp_send_stop(void) +static void native_stop_other_cpus(int wait) { unsigned long flags; - unsigned long wait; + unsigned long timeout; if (reboot_force) return; @@ -179,9 +179,12 @@ static void native_smp_send_stop(void) if (num_online_cpus() > 1) { apic->send_IPI_allbutself(REBOOT_VECTOR); - /* Don't wait longer than a second */ - wait = USEC_PER_SEC; - while (num_online_cpus() > 1 && wait--) + /* + * Don't wait longer than a second if the caller + * didn't ask us to wait. + */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && (wait || timeout--)) udelay(1); } @@ -227,7 +230,7 @@ struct smp_ops smp_ops = { .smp_prepare_cpus = native_smp_prepare_cpus, .smp_cpus_done = native_smp_cpus_done, - .smp_send_stop = native_smp_send_stop, + .stop_other_cpus = native_stop_other_cpus, .smp_send_reschedule = native_smp_send_reschedule, .cpu_up = native_cpu_up, diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c4f33b2..40eb0f9 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -73,7 +73,6 @@ #ifdef CONFIG_X86_32 u8 apicid_2_node[MAX_APICID]; -static int low_mappings; #endif /* State of each CPU */ @@ -91,6 +90,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) + +/* + * We need this for trampoline_base protection from concurrent accesses when + * off- and onlining cores wildly. + */ +static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); + +void cpu_hotplug_driver_lock() +{ + mutex_lock(&x86_cpu_hotplug_driver_mutex); +} + +void cpu_hotplug_driver_unlock() +{ + mutex_unlock(&x86_cpu_hotplug_driver_mutex); +} + +ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } +ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } #else static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; #define get_idle_for_cpu(x) (idle_thread_array[(x)]) @@ -281,6 +299,18 @@ notrace static void __cpuinit start_secondary(void *unused) * fragile that we want to limit the things done here to the * most necessary things. */ + +#ifdef CONFIG_X86_32 + /* + * Switch away from the trampoline page-table + * + * Do this before cpu_init() because it needs to access per-cpu + * data which may not be mapped in the trampoline page-table. + */ + load_cr3(swapper_pg_dir); + __flush_tlb_all(); +#endif + vmi_bringup(); cpu_init(); preempt_disable(); @@ -299,12 +329,6 @@ notrace static void __cpuinit start_secondary(void *unused) legacy_pic->chip->unmask(0); } -#ifdef CONFIG_X86_32 - while (low_mappings) - cpu_relax(); - __flush_tlb_all(); -#endif - /* This must be done before setting cpu_online_mask */ set_cpu_sibling_map(raw_smp_processor_id()); wmb(); @@ -754,6 +778,7 @@ do_rest: #ifdef CONFIG_X86_32 /* Stack for startup_32 can be just as for start_secondary onwards */ irq_ctx_init(cpu); + initial_page_table = __pa(&trampoline_pg_dir); #else clear_tsk_thread_flag(c_idle.idle, TIF_FORK); initial_gs = per_cpu_offset(cpu); @@ -816,6 +841,13 @@ do_rest: if (cpumask_test_cpu(cpu, cpu_callin_mask)) break; /* It has booted */ udelay(100); + /* + * Allow other tasks to run while we wait for the + * AP to come online. This also gives a chance + * for the MTRR work(triggered by the AP coming online) + * to be completed in the stop machine context. + */ + schedule(); } if (cpumask_test_cpu(cpu, cpu_callin_mask)) @@ -894,20 +926,8 @@ int __cpuinit native_cpu_up(unsigned int cpu) per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; -#ifdef CONFIG_X86_32 - /* init low mem mapping */ - clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, - min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); - flush_tlb_all(); - low_mappings = 1; - err = do_boot_cpu(apicid, cpu); - zap_low_mappings(false); - low_mappings = 0; -#else - err = do_boot_cpu(apicid, cpu); -#endif if (err) { pr_debug("do_boot_cpu failed %d\n", err); return -EIO; @@ -1367,11 +1387,94 @@ void play_dead_common(void) local_irq_disable(); } +#define MWAIT_SUBSTATE_MASK 0xf +#define MWAIT_SUBSTATE_SIZE 4 + +#define CPUID_MWAIT_LEAF 5 +#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 + +/* + * We need to flush the caches before going to sleep, lest we have + * dirty data in our caches when we come back up. + */ +static inline void mwait_play_dead(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned int highest_cstate = 0; + unsigned int highest_subcstate = 0; + int i; + void *mwait_ptr; + + if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) + return; + if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) + return; + if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return; + + eax = CPUID_MWAIT_LEAF; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* + * eax will be 0 if EDX enumeration is not valid. + * Initialized below to cstate, sub_cstate value when EDX is valid. + */ + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { + eax = 0; + } else { + edx >>= MWAIT_SUBSTATE_SIZE; + for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { + if (edx & MWAIT_SUBSTATE_MASK) { + highest_cstate = i; + highest_subcstate = edx & MWAIT_SUBSTATE_MASK; + } + } + eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | + (highest_subcstate - 1); + } + + /* + * This should be a memory location in a cache line which is + * unlikely to be touched by other processors. The actual + * content is immaterial as it is not actually modified in any way. + */ + mwait_ptr = ¤t_thread_info()->flags; + + wbinvd(); + + while (1) { + /* + * The CLFLUSH is a workaround for erratum AAI65 for + * the Xeon 7400 series. It's not clear it is actually + * needed, but it should be harmless in either case. + * The WBINVD is insufficient due to the spurious-wakeup + * case where we return around the loop. + */ + clflush(mwait_ptr); + __monitor(mwait_ptr, 0, 0); + mb(); + __mwait(eax, 0); + } +} + +static inline void hlt_play_dead(void) +{ + if (current_cpu_data.x86 >= 4) + wbinvd(); + + while (1) { + native_halt(); + } +} + void native_play_dead(void) { play_dead_common(); tboot_shutdown(TB_SHUTDOWN_WFS); - wbinvd_halt(); + + mwait_play_dead(); /* Only returns on failure */ + hlt_play_dead(); } #else /* ... !CONFIG_HOTPLUG_CPU */ diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c index c652ef6..e2a5952 100644 --- a/arch/x86/kernel/trampoline.c +++ b/arch/x86/kernel/trampoline.c @@ -1,6 +1,7 @@ #include #include +#include #include #if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP) @@ -37,3 +38,19 @@ unsigned long __trampinit setup_trampoline(void) memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE); return virt_to_phys(trampoline_base); } + +void __init setup_trampoline_page_table(void) +{ +#ifdef CONFIG_X86_32 + /* Copy kernel address range */ + clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + KERNEL_PGD_PTRS); + + /* Initialize low mappings */ + clone_pgd_range(trampoline_pg_dir, + swapper_pg_dir + KERNEL_PGD_BOUNDARY, + min_t(unsigned long, KERNEL_PGD_PTRS, + KERNEL_PGD_BOUNDARY)); +#endif +} diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 725ef4d..4d0f3ed 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -568,6 +568,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) if (regs->flags & X86_VM_MASK) { handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); + preempt_conditional_cli(regs); return; } diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 9faf91a..97cdbe8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -104,10 +104,14 @@ int __init notsc_setup(char *str) __setup("notsc", notsc_setup); +static int no_sched_irq_time; + static int __init tsc_setup(char *str) { if (!strcmp(str, "reliable")) tsc_clocksource_reliable = 1; + if (!strncmp(str, "noirqtime", 9)) + no_sched_irq_time = 1; return 1; } @@ -626,6 +630,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) local_irq_restore(flags); } +static unsigned long long cyc2ns_suspend; + +void save_sched_clock_state(void) +{ + if (!sched_clock_stable) + return; + + cyc2ns_suspend = sched_clock(); +} + +/* + * Even on processors with invariant TSC, TSC gets reset in some the + * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to + * arbitrary value (still sync'd across cpu's) during resume from such sleep + * states. To cope up with this, recompute the cyc2ns_offset for each cpu so + * that sched_clock() continues from the point where it was left off during + * suspend. + */ +void restore_sched_clock_state(void) +{ + unsigned long long offset; + unsigned long flags; + int cpu; + + if (!sched_clock_stable) + return; + + local_irq_save(flags); + + __get_cpu_var(cyc2ns_offset) = 0; + offset = cyc2ns_suspend - sched_clock(); + + for_each_possible_cpu(cpu) + per_cpu(cyc2ns_offset, cpu) = offset; + + local_irq_restore(flags); +} + #ifdef CONFIG_CPU_FREQ /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency @@ -764,6 +806,7 @@ void mark_tsc_unstable(char *reason) if (!tsc_unstable) { tsc_unstable = 1; sched_clock_stable = 0; + disable_sched_clock_irqtime(); printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); /* Change only the rating, when not registered */ if (clocksource_tsc.mult) @@ -952,6 +995,9 @@ void __init tsc_init(void) /* now allow native_sched_clock() to use rdtsc */ tsc_disabled = 0; + if (!no_sched_irq_time) + enable_sched_clock_irqtime(); + lpj = ((u64)tsc_khz * 1000); do_div(lpj, HZ); lpj_fine = lpj; diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5ffb562..61fb985 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -551,8 +551,14 @@ cannot_handle: int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) { if (VMPI.is_vm86pus) { - if ((trapno == 3) || (trapno == 1)) - return_to_32bit(regs, VM86_TRAP + (trapno << 8)); + if ((trapno == 3) || (trapno == 1)) { + KVM86->regs32->ax = VM86_TRAP + (trapno << 8); + /* setting this flag forces the code in entry_32.S to + call save_v86_state() and change the stack pointer + to KVM86->regs32 */ + set_thread_flag(TIF_IRET); + return 0; + } do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); return 0; } diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 37e68fc..aa8bf4f 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c @@ -305,7 +305,8 @@ void __cpuinit xsave_init(void) */ static void __init setup_xstate_init(void) { - init_xstate_buf = alloc_bootmem(xstate_size); + init_xstate_buf = alloc_bootmem_align(xstate_size, + __alignof__(struct xsave_struct)); init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; } diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 5ac0bb4..582c8fc 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -345,10 +345,10 @@ static u32 group_table[] = { DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 0, 0, 0, 0, [Group4*8] = - ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, + ByteOp | DstMem | SrcNone | ModRM | Lock, ByteOp | DstMem | SrcNone | ModRM | Lock, 0, 0, 0, 0, 0, 0, [Group5*8] = - DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, + DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock, SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps, SrcMem | ModRM | Stack, 0, @@ -1712,17 +1712,16 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) { struct decode_cache *c = &ctxt->decode; - u64 old = c->dst.orig_val; + u64 old = c->dst.orig_val64; if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { - c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); ctxt->eflags &= ~EFLG_ZF; } else { - c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | - (u32) c->regs[VCPU_REGS_RBX]; + c->dst.val64 = ((u64)c->regs[VCPU_REGS_RCX] << 32) | + (u32) c->regs[VCPU_REGS_RBX]; ctxt->eflags |= EFLG_ZF; } @@ -2535,7 +2534,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ctxt->vcpu); if (rc != X86EMUL_CONTINUE) goto done; - c->src.orig_val = c->src.val; + c->src.orig_val64 = c->src.val64; } if (c->src2.type == OP_MEM) { diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 93825ff..8a33246 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -553,6 +553,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) s->irq_request_opaque = kvm; s->pics[0].pics_state = s; s->pics[1].pics_state = s; + s->pics[0].isr_ack = 0xff; + s->pics[1].isr_ack = 0xff; /* * Initialize PIO device diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h index cd1f362..4227b1a 100644 --- a/arch/x86/kvm/irq.h +++ b/arch/x86/kvm/irq.h @@ -45,7 +45,6 @@ struct kvm_kpic_state { u8 irr; /* interrupt request register */ u8 imr; /* interrupt mask register */ u8 isr; /* interrupt service register */ - u8 isr_ack; /* interrupt ack detection */ u8 priority_add; /* highest irq priority */ u8 irq_base; u8 read_reg_select; @@ -58,6 +57,7 @@ struct kvm_kpic_state { u8 init4; /* true if 4 byte init */ u8 elcr; /* PIIX edge/trigger selection */ u8 elcr_mask; + u8 isr_ack; /* interrupt ack detection */ struct kvm_pic *pics_state; }; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index b1ed0a1..92b6ca4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -799,8 +799,12 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, ret = handler(kvm, &memslot->rmap[gfn_offset], data); for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { - int idx = gfn_offset; - idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); + unsigned long idx; + int nr; + + nr = KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL+j); + idx = (memslot->base_gfn+gfn_offset) / nr - + memslot->base_gfn / nr; ret |= handler(kvm, &memslot->lpage_info[j][idx].rmap_pde, data); diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 2331bdc..e34452b 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -324,8 +324,32 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, break; } - if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) - continue; + if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { + struct kvm_mmu_page *child; + unsigned direct_access; + + if (level != gw->level) + continue; + + /* + * For the direct sp, if the guest pte's dirty bit + * changed form clean to dirty, it will corrupt the + * sp's access: allow writable in the read-only sp, + * so we should update the spte at this point to get + * a new sp with the correct access. + */ + direct_access = gw->pt_access & gw->pte_access; + if (!is_dirty_gpte(gw->ptes[gw->level - 1])) + direct_access &= ~ACC_WRITE_MASK; + + child = page_header(*sptep & PT64_BASE_ADDR_MASK); + if (child->role.access == direct_access) + continue; + + mmu_page_remove_parent_pte(child, sptep); + __set_spte(sptep, shadow_trap_nonpresent_pte); + kvm_flush_remote_tlbs(vcpu->kvm); + } if (is_large_pte(*sptep)) { rmap_remove(vcpu->kvm, sptep); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ce438e0..d103e15 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -87,6 +87,14 @@ struct nested_state { /* A VMEXIT is required but not yet emulated */ bool exit_required; + /* + * If we vmexit during an instruction emulation we need this to restore + * the l1 guest rip after the emulation + */ + unsigned long vmexit_rip; + unsigned long vmexit_rsp; + unsigned long vmexit_rax; + /* cache for intercepts of the guest */ u16 intercept_cr_read; u16 intercept_cr_write; @@ -766,7 +774,6 @@ static void init_vmcb(struct vcpu_svm *svm) control->iopm_base_pa = iopm_base; control->msrpm_base_pa = __pa(svm->msrpm); - control->tsc_offset = 0; control->int_ctl = V_INTR_MASKING_MASK; init_seg(&save->es); @@ -902,6 +909,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; svm->asid_generation = 0; init_vmcb(svm); + svm->vmcb->control.tsc_offset = 0-native_read_tsc(); fx_init(&svm->vcpu); svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; @@ -1201,8 +1209,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) if (old == new) { /* cr0 write with ts and mp unchanged */ svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; - if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) + if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) { + svm->nested.vmexit_rip = kvm_rip_read(vcpu); + svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); + svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX); return; + } } } @@ -2398,6 +2410,23 @@ static int emulate_on_interception(struct vcpu_svm *svm) return 1; } +static int cr0_write_interception(struct vcpu_svm *svm) +{ + struct kvm_vcpu *vcpu = &svm->vcpu; + int r; + + r = emulate_instruction(&svm->vcpu, 0, 0, 0); + + if (svm->nested.vmexit_rip) { + kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip); + kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp); + kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax); + svm->nested.vmexit_rip = 0; + } + + return r == EMULATE_DONE; +} + static int cr8_write_interception(struct vcpu_svm *svm) { struct kvm_run *kvm_run = svm->vcpu.run; @@ -2671,7 +2700,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_READ_CR4] = emulate_on_interception, [SVM_EXIT_READ_CR8] = emulate_on_interception, [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, - [SVM_EXIT_WRITE_CR0] = emulate_on_interception, + [SVM_EXIT_WRITE_CR0] = cr0_write_interception, [SVM_EXIT_WRITE_CR3] = emulate_on_interception, [SVM_EXIT_WRITE_CR4] = emulate_on_interception, [SVM_EXIT_WRITE_CR8] = cr8_write_interception, @@ -3067,8 +3096,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) sync_lapic_to_cr8(vcpu); save_host_msrs(vcpu); - fs_selector = kvm_read_fs(); - gs_selector = kvm_read_gs(); + savesegment(fs, fs_selector); + savesegment(gs, gs_selector); ldt_selector = kvm_read_ldt(); svm->vmcb->save.cr2 = vcpu->arch.cr2; /* required for live migration with NPT */ @@ -3155,10 +3184,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; - kvm_load_fs(fs_selector); - kvm_load_gs(gs_selector); - kvm_load_ldt(ldt_selector); load_host_msrs(vcpu); + kvm_load_ldt(ldt_selector); + loadsegment(fs, fs_selector); +#ifdef CONFIG_X86_64 + load_gs_index(gs_selector); + wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); +#else + loadsegment(gs, gs_selector); +#endif reload_tss(vcpu); @@ -3253,6 +3287,10 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu) static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) { switch (func) { + case 0x80000001: + if (nested) + entry->ecx |= (1 << 2); /* Set SVM bit */ + break; case 0x8000000A: entry->eax = 1; /* SVM revision 1 */ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ee03679..f1abe23 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -177,6 +177,7 @@ static u64 construct_eptp(unsigned long root_hpa); static DEFINE_PER_CPU(struct vmcs *, vmxarea); static DEFINE_PER_CPU(struct vmcs *, current_vmcs); static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); +static DEFINE_PER_CPU(struct desc_ptr, host_gdt); static unsigned long *vmx_io_bitmap_a; static unsigned long *vmx_io_bitmap_b; @@ -745,7 +746,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) */ vmx->host_state.ldt_sel = kvm_read_ldt(); vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; - vmx->host_state.fs_sel = kvm_read_fs(); + savesegment(fs, vmx->host_state.fs_sel); if (!(vmx->host_state.fs_sel & 7)) { vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); vmx->host_state.fs_reload_needed = 0; @@ -753,7 +754,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) vmcs_write16(HOST_FS_SELECTOR, 0); vmx->host_state.fs_reload_needed = 1; } - vmx->host_state.gs_sel = kvm_read_gs(); + savesegment(gs, vmx->host_state.gs_sel); if (!(vmx->host_state.gs_sel & 7)) vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); else { @@ -770,10 +771,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) #endif #ifdef CONFIG_X86_64 - if (is_long_mode(&vmx->vcpu)) { - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); + if (is_long_mode(&vmx->vcpu)) wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); - } #endif for (i = 0; i < vmx->save_nmsrs; ++i) kvm_set_shared_msr(vmx->guest_msrs[i].index, @@ -783,35 +783,30 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) static void __vmx_load_host_state(struct vcpu_vmx *vmx) { - unsigned long flags; - if (!vmx->host_state.loaded) return; ++vmx->vcpu.stat.host_state_reload; vmx->host_state.loaded = 0; - if (vmx->host_state.fs_reload_needed) - kvm_load_fs(vmx->host_state.fs_sel); +#ifdef CONFIG_X86_64 + if (is_long_mode(&vmx->vcpu)) + rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); +#endif if (vmx->host_state.gs_ldt_reload_needed) { kvm_load_ldt(vmx->host_state.ldt_sel); - /* - * If we have to reload gs, we must take care to - * preserve our gs base. - */ - local_irq_save(flags); - kvm_load_gs(vmx->host_state.gs_sel); #ifdef CONFIG_X86_64 - wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE)); + load_gs_index(vmx->host_state.gs_sel); +#else + loadsegment(gs, vmx->host_state.gs_sel); #endif - local_irq_restore(flags); } + if (vmx->host_state.fs_reload_needed) + loadsegment(fs, vmx->host_state.fs_sel); reload_tss(); #ifdef CONFIG_X86_64 - if (is_long_mode(&vmx->vcpu)) { - rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); - wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); - } + wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif + load_gdt(&__get_cpu_var(host_gdt)); } static void vmx_load_host_state(struct vcpu_vmx *vmx) @@ -1314,6 +1309,8 @@ static int hardware_enable(void *garbage) ept_sync_global(); + store_gdt(&__get_cpu_var(host_gdt)); + return 0; } @@ -2514,8 +2511,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ - vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */ - vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */ + vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ + vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ #ifdef CONFIG_X86_64 rdmsrl(MSR_FS_BASE, a); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7fa89c3..eee5cdd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1910,9 +1910,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, 0 /* Reserved, XSAVE, OSXSAVE */; /* cpuid 0x80000001.ecx */ const u32 kvm_supported_word6_x86_features = - F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | + F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | - F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | + F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | 0 /* SKINIT */ | 0 /* WDT */; /* all calls to cpuid_count() should be made on the same cpu */ @@ -2220,6 +2220,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; + events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = @@ -2233,13 +2234,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); + events->nmi.pad = 0; events->sipi_vector = vcpu->arch.sipi_vector; events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SIPI_VECTOR | KVM_VCPUEVENT_VALID_SHADOW); - + memset(&events->reserved, 0, sizeof(events->reserved)); vcpu_put(vcpu); } @@ -2289,6 +2291,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, dbgregs->dr6 = vcpu->arch.dr6; dbgregs->dr7 = vcpu->arch.dr7; dbgregs->flags = 0; + memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); vcpu_put(vcpu); } @@ -2756,6 +2759,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) sizeof(ps->channels)); ps->flags = kvm->arch.vpit->pit_state.flags; mutex_unlock(&kvm->arch.vpit->pit_state.lock); + memset(&ps->reserved, 0, sizeof(ps->reserved)); return r; } @@ -2825,10 +2829,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, if (is_dirty) { struct kvm_memslots *slots, *old_slots; - spin_lock(&kvm->mmu_lock); - kvm_mmu_slot_remove_write_access(kvm, log->slot); - spin_unlock(&kvm->mmu_lock); - slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); if (!slots) goto out_free; @@ -2841,6 +2841,11 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, synchronize_srcu_expedited(&kvm->srcu); dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; kfree(old_slots); + + spin_lock(&kvm->mmu_lock); + kvm_mmu_slot_remove_write_access(kvm, log->slot); + spin_unlock(&kvm->mmu_lock); + } r = 0; @@ -3152,6 +3157,7 @@ long kvm_arch_vm_ioctl(struct file *filp, now_ns = timespec_to_ns(&now); user_ns.clock = kvm->arch.kvmclock_offset + now_ns; user_ns.flags = 0; + memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; if (copy_to_user(argp, &user_ns, sizeof(user_ns))) @@ -5438,6 +5444,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, int user_alloc) { int npages = memslot->npages; + int map_flags = MAP_PRIVATE | MAP_ANONYMOUS; + + /* Prevent internal slot pages from being moved by fork()/COW. */ + if (memslot->id >= KVM_MEMORY_SLOTS) + map_flags = MAP_SHARED | MAP_ANONYMOUS; /*To keep backward compatibility with older userspace, *x86 needs to hanlde !user_alloc case. @@ -5450,7 +5461,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, userspace_addr = do_mmap(NULL, 0, npages * PAGE_SIZE, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, + map_flags, 0); up_write(¤t->mm->mmap_sem); diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S index 4a5979a..2cda60a 100644 --- a/arch/x86/lib/atomic64_386_32.S +++ b/arch/x86/lib/atomic64_386_32.S @@ -25,150 +25,172 @@ CFI_ADJUST_CFA_OFFSET -4 .endm -.macro BEGIN func reg -$v = \reg - -ENTRY(atomic64_\func\()_386) - CFI_STARTPROC - LOCK $v - -.macro RETURN - UNLOCK $v +#define BEGIN(op) \ +.macro endp; \ + CFI_ENDPROC; \ +ENDPROC(atomic64_##op##_386); \ +.purgem endp; \ +.endm; \ +ENTRY(atomic64_##op##_386); \ + CFI_STARTPROC; \ + LOCK v; + +#define ENDP endp + +#define RET \ + UNLOCK v; \ ret -.endm - -.macro END_ - CFI_ENDPROC -ENDPROC(atomic64_\func\()_386) -.purgem RETURN -.purgem END_ -.purgem END -.endm - -.macro END -RETURN -END_ -.endm -.endm -BEGIN read %ecx - movl ($v), %eax - movl 4($v), %edx -END - -BEGIN set %esi - movl %ebx, ($v) - movl %ecx, 4($v) -END - -BEGIN xchg %esi - movl ($v), %eax - movl 4($v), %edx - movl %ebx, ($v) - movl %ecx, 4($v) -END - -BEGIN add %ecx - addl %eax, ($v) - adcl %edx, 4($v) -END - -BEGIN add_return %ecx - addl ($v), %eax - adcl 4($v), %edx - movl %eax, ($v) - movl %edx, 4($v) -END - -BEGIN sub %ecx - subl %eax, ($v) - sbbl %edx, 4($v) -END - -BEGIN sub_return %ecx +#define RET_ENDP \ + RET; \ + ENDP + +#define v %ecx +BEGIN(read) + movl (v), %eax + movl 4(v), %edx +RET_ENDP +#undef v + +#define v %esi +BEGIN(set) + movl %ebx, (v) + movl %ecx, 4(v) +RET_ENDP +#undef v + +#define v %esi +BEGIN(xchg) + movl (v), %eax + movl 4(v), %edx + movl %ebx, (v) + movl %ecx, 4(v) +RET_ENDP +#undef v + +#define v %ecx +BEGIN(add) + addl %eax, (v) + adcl %edx, 4(v) +RET_ENDP +#undef v + +#define v %ecx +BEGIN(add_return) + addl (v), %eax + adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP +#undef v + +#define v %ecx +BEGIN(sub) + subl %eax, (v) + sbbl %edx, 4(v) +RET_ENDP +#undef v + +#define v %ecx +BEGIN(sub_return) negl %edx negl %eax sbbl $0, %edx - addl ($v), %eax - adcl 4($v), %edx - movl %eax, ($v) - movl %edx, 4($v) -END - -BEGIN inc %esi - addl $1, ($v) - adcl $0, 4($v) -END - -BEGIN inc_return %esi - movl ($v), %eax - movl 4($v), %edx + addl (v), %eax + adcl 4(v), %edx + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP +#undef v + +#define v %esi +BEGIN(inc) + addl $1, (v) + adcl $0, 4(v) +RET_ENDP +#undef v + +#define v %esi +BEGIN(inc_return) + movl (v), %eax + movl 4(v), %edx addl $1, %eax adcl $0, %edx - movl %eax, ($v) - movl %edx, 4($v) -END - -BEGIN dec %esi - subl $1, ($v) - sbbl $0, 4($v) -END - -BEGIN dec_return %esi - movl ($v), %eax - movl 4($v), %edx + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP +#undef v + +#define v %esi +BEGIN(dec) + subl $1, (v) + sbbl $0, 4(v) +RET_ENDP +#undef v + +#define v %esi +BEGIN(dec_return) + movl (v), %eax + movl 4(v), %edx subl $1, %eax sbbl $0, %edx - movl %eax, ($v) - movl %edx, 4($v) -END + movl %eax, (v) + movl %edx, 4(v) +RET_ENDP +#undef v -BEGIN add_unless %ecx +#define v %ecx +BEGIN(add_unless) addl %eax, %esi adcl %edx, %edi - addl ($v), %eax - adcl 4($v), %edx + addl (v), %eax + adcl 4(v), %edx cmpl %eax, %esi je 3f 1: - movl %eax, ($v) - movl %edx, 4($v) + movl %eax, (v) + movl %edx, 4(v) movl $1, %eax 2: -RETURN + RET 3: cmpl %edx, %edi jne 1b xorl %eax, %eax jmp 2b -END_ +ENDP +#undef v -BEGIN inc_not_zero %esi - movl ($v), %eax - movl 4($v), %edx +#define v %esi +BEGIN(inc_not_zero) + movl (v), %eax + movl 4(v), %edx testl %eax, %eax je 3f 1: addl $1, %eax adcl $0, %edx - movl %eax, ($v) - movl %edx, 4($v) + movl %eax, (v) + movl %edx, 4(v) movl $1, %eax 2: -RETURN + RET 3: testl %edx, %edx jne 1b jmp 2b -END_ +ENDP +#undef v -BEGIN dec_if_positive %esi - movl ($v), %eax - movl 4($v), %edx +#define v %esi +BEGIN(dec_if_positive) + movl (v), %eax + movl 4(v), %edx subl $1, %eax sbbl $0, %edx js 1f - movl %eax, ($v) - movl %edx, 4($v) + movl %eax, (v) + movl %edx, 4(v) 1: -END +RET_ENDP +#undef v diff --git a/arch/x86/lib/semaphore_32.S b/arch/x86/lib/semaphore_32.S index 648fe47..f35eec7 100644 --- a/arch/x86/lib/semaphore_32.S +++ b/arch/x86/lib/semaphore_32.S @@ -36,7 +36,7 @@ */ #ifdef CONFIG_SMP ENTRY(__write_lock_failed) - CFI_STARTPROC simple + CFI_STARTPROC FRAME 2: LOCK_PREFIX addl $ RW_LOCK_BIAS,(%eax) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index f627779..544ed25 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -802,8 +802,10 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address, up_read(&mm->mmap_sem); /* Kernel mode? Handle exceptions or die: */ - if (!(error_code & PF_USER)) + if (!(error_code & PF_USER)) { no_context(regs, error_code, address); + return; + } /* User-space => ok to do another page fault: */ if (is_prefetch(regs, error_code, address)) @@ -829,6 +831,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, unsigned long address, unsigned int fault) { if (fault & VM_FAULT_OOM) { + /* Kernel mode? Handle exceptions or die: */ + if (!(error_code & PF_USER)) { + up_read(¤t->mm->mmap_sem); + no_context(regs, error_code, address); + return; + } + out_of_memory(regs, error_code, address); } else { if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON)) diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 5d0e67f..e5d5e2c 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -45,6 +45,8 @@ struct kmmio_fault_page { * Protected by kmmio_lock, when linked into kmmio_page_table. */ int count; + + bool scheduled_for_release; }; struct kmmio_delayed_release { @@ -398,8 +400,11 @@ static void release_kmmio_fault_page(unsigned long page, BUG_ON(f->count < 0); if (!f->count) { disarm_kmmio_fault_page(f); - f->release_next = *release_list; - *release_list = f; + if (!f->scheduled_for_release) { + f->release_next = *release_list; + *release_list = f; + f->scheduled_for_release = true; + } } } @@ -471,8 +476,10 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) prevp = &f->release_next; } else { *prevp = f->release_next; + f->release_next = NULL; + f->scheduled_for_release = false; } - f = f->release_next; + f = *prevp; } spin_unlock_irqrestore(&kmmio_lock, flags); @@ -510,6 +517,9 @@ void unregister_kmmio_probe(struct kmmio_probe *p) kmmio_count--; spin_unlock_irqrestore(&kmmio_lock, flags); + if (!release_list) + return; + drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC); if (!drelease) { pr_crit("leaking kmmio_fault_page objects.\n"); diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5c4ee42..2a72049 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -160,8 +160,7 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) * section 8.1: in PAE mode we explicitly have to flush the * TLB via cr3 if the top-level pgd is changed... */ - if (mm == current->active_mm) - write_cr3(read_cr3()); + flush_tlb_mm(mm); } #else /* !CONFIG_X86_PAE */ diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index f9897f7..9c0d0d3 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -420,9 +420,11 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end) return -1; } - for_each_node_mask(i, nodes_parsed) - e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT, - nodes[i].end >> PAGE_SHIFT); + for (i = 0; i < num_node_memblks; i++) + e820_register_active_regions(memblk_nodeid[i], + node_memblk_range[i].start >> PAGE_SHIFT, + node_memblk_range[i].end >> PAGE_SHIFT); + /* for out of order entries in SRAT */ sort_node_map(); if (!nodes_cover_memory(nodes)) { diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c index 8565d94..38868ad 100644 --- a/arch/x86/mm/testmmiotrace.c +++ b/arch/x86/mm/testmmiotrace.c @@ -90,6 +90,27 @@ static void do_test(unsigned long size) iounmap(p); } +/* + * Tests how mmiotrace behaves in face of multiple ioremap / iounmaps in + * a short time. We had a bug in deferred freeing procedure which tried + * to free this region multiple times (ioremap can reuse the same address + * for many mappings). + */ +static void do_test_bulk_ioremapping(void) +{ + void __iomem *p; + int i; + + for (i = 0; i < 10; ++i) { + p = ioremap_nocache(mmio_address, PAGE_SIZE); + if (p) + iounmap(p); + } + + /* Force freeing. If it will crash we will know why. */ + synchronize_rcu(); +} + static int __init init(void) { unsigned long size = (read_far) ? (8 << 20) : (16 << 10); @@ -104,6 +125,7 @@ static int __init init(void) "and writing 16 kB of rubbish in there.\n", size >> 10, mmio_address); do_test(size); + do_test_bulk_ioremapping(); pr_info("All done.\n"); return 0; } diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index b28d2f1..f1575c9 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -568,8 +568,13 @@ static int __init init_sysfs(void) int error; error = sysdev_class_register(&oprofile_sysclass); - if (!error) - error = sysdev_register(&device_oprofile); + if (error) + return error; + + error = sysdev_register(&device_oprofile); + if (error) + sysdev_class_unregister(&oprofile_sysclass); + return error; } @@ -580,8 +585,10 @@ static void exit_sysfs(void) } #else -#define init_sysfs() do { } while (0) -#define exit_sysfs() do { } while (0) + +static inline int init_sysfs(void) { return 0; } +static inline void exit_sysfs(void) { } + #endif /* CONFIG_PM */ static int __init p4_init(char **cpu_type) @@ -634,6 +641,18 @@ static int __init ppro_init(char **cpu_type) if (force_arch_perfmon && cpu_has_arch_perfmon) return 0; + /* + * Documentation on identifying Intel processors by CPU family + * and model can be found in the Intel Software Developer's + * Manuals (SDM): + * + * http://www.intel.com/products/processor/manuals/ + * + * As of May 2010 the documentation for this was in the: + * "Intel 64 and IA-32 Architectures Software Developer's + * Manual Volume 3B: System Programming Guide", "Table B-1 + * CPUID Signature Values of DisplayFamily_DisplayModel". + */ switch (cpu_model) { case 0 ... 2: *cpu_type = "i386/ppro"; @@ -652,15 +671,19 @@ static int __init ppro_init(char **cpu_type) case 14: *cpu_type = "i386/core"; break; - case 15: case 23: + case 0x0f: + case 0x16: + case 0x17: + case 0x1d: *cpu_type = "i386/core_2"; break; + case 0x1a: + case 0x1e: case 0x2e: - case 26: spec = &op_arch_perfmon_spec; *cpu_type = "i386/core_i7"; break; - case 28: + case 0x1c: *cpu_type = "i386/atom"; break; default: @@ -682,6 +705,8 @@ int __init op_nmi_init(struct oprofile_operations *ops) char *cpu_type = NULL; int ret = 0; + using_nmi = 0; + if (!cpu_has_apic) return -ENODEV; @@ -761,7 +786,10 @@ int __init op_nmi_init(struct oprofile_operations *ops) mux_init(ops); - init_sysfs(); + ret = init_sysfs(); + if (ret) + return ret; + using_nmi = 1; printk(KERN_INFO "oprofile: using NMI interrupt.\n"); return 0; diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b67a6b5..4262331 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c @@ -484,21 +484,29 @@ static int __init_ibs_nmi(void) return 0; } -/* initialize the APIC for the IBS interrupts if available */ +/* + * check and reserve APIC extended interrupt LVT offset for IBS if + * available + * + * init_ibs() preforms implicitly cpu-local operations, so pin this + * thread to its current CPU + */ + static void init_ibs(void) { - ibs_caps = get_ibs_caps(); + preempt_disable(); + ibs_caps = get_ibs_caps(); if (!ibs_caps) - return; + goto out; - if (__init_ibs_nmi()) { + if (__init_ibs_nmi() < 0) ibs_caps = 0; - return; - } + else + printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); - printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", - (unsigned)ibs_caps); +out: + preempt_enable(); } static int (*create_arch_files)(struct super_block *sb, struct dentry *root); diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 2ec04c4..15466c0 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -34,6 +34,15 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "x3800"), }, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */ + /* 2006 AMD HT/VIA system with two host bridges */ + { + .callback = set_use_crs, + .ident = "ASRock ALiveSATA2-GLAN", + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), + }, + }, {} }; diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 1290ba5..9c57cb1 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -113,6 +113,7 @@ static void __save_processor_state(struct saved_context *ctxt) void save_processor_state(void) { __save_processor_state(&saved_context); + save_sched_clock_state(); } #ifdef CONFIG_X86_32 EXPORT_SYMBOL(save_processor_state); @@ -229,6 +230,7 @@ static void __restore_processor_state(struct saved_context *ctxt) void restore_processor_state(void) { __restore_processor_state(&saved_context); + restore_sched_clock_state(); } #ifdef CONFIG_X86_32 EXPORT_SYMBOL(restore_processor_state); diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 6b4ffed..dd78ef6 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) export CPPFLAGS_vdso.lds += -P -C -VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \ +VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so @@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y) += sysenter vdso32-images = $(vdso32.so-y:%=vdso32-%.so) CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) -VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1 +VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1 # This makes sure the $(obj) subdirectory exists even though vdso32/ # is not a kbuild sub-make subdirectory. diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 65d8d79..25d787c 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -927,7 +927,7 @@ static const struct pv_init_ops xen_init_ops __initdata = { }; static const struct pv_time_ops xen_time_ops __initdata = { - .sched_clock = xen_sched_clock, + .sched_clock = xen_clocksource_read, }; static const struct pv_cpu_ops xen_cpu_ops __initdata = { @@ -1000,10 +1000,6 @@ static void xen_reboot(int reason) { struct sched_shutdown r = { .reason = reason }; -#ifdef CONFIG_SMP - smp_send_stop(); -#endif - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) BUG(); } diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 914f046..6adff93 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1641,9 +1641,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { pte_t pte; - if (pfn > max_pfn_mapped) - max_pfn_mapped = pfn; - if (!pte_none(pte_page[pteidx])) continue; @@ -1687,6 +1684,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, pud_t *l3; pmd_t *l2; + /* max_pfn_mapped is the last pfn mapped in the initial memory + * mappings. Considering that on Xen after the kernel mappings we + * have the mappings of some pages that don't exist in pfn space, we + * set max_pfn_mapped to the last real pfn mapped. */ + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + /* Zap identity mapping */ init_level4_pgt[0] = __pgd(0); @@ -1750,9 +1753,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, { pmd_t *kernel_pmd; - max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + - xen_start_info->nr_pt_frames * PAGE_SIZE + - 512*1024); + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index a29693f..d2dfbf5 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -398,9 +398,9 @@ static void stop_self(void *v) BUG(); } -static void xen_smp_send_stop(void) +static void xen_stop_other_cpus(int wait) { - smp_call_function(stop_self, NULL, 0); + smp_call_function(stop_self, NULL, wait); } static void xen_smp_send_reschedule(int cpu) @@ -468,7 +468,7 @@ static const struct smp_ops xen_smp_ops __initdata = { .cpu_disable = xen_cpu_disable, .play_dead = xen_play_dead, - .smp_send_stop = xen_smp_send_stop, + .stop_other_cpus = xen_stop_other_cpus, .smp_send_reschedule = xen_smp_send_reschedule, .send_call_func_ipi = xen_smp_send_call_function_ipi, diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index b3c6c59..41eb583 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -155,45 +155,6 @@ static void do_stolen_accounting(void) account_idle_ticks(ticks); } -/* - * Xen sched_clock implementation. Returns the number of unstolen - * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED - * states. - */ -unsigned long long xen_sched_clock(void) -{ - struct vcpu_runstate_info state; - cycle_t now; - u64 ret; - s64 offset; - - /* - * Ideally sched_clock should be called on a per-cpu basis - * anyway, so preempt should already be disabled, but that's - * not current practice at the moment. - */ - preempt_disable(); - - now = xen_clocksource_read(); - - get_runstate_snapshot(&state); - - WARN_ON(state.state != RUNSTATE_running); - - offset = now - state.state_entry_time; - if (offset < 0) - offset = 0; - - ret = state.time[RUNSTATE_blocked] + - state.time[RUNSTATE_running] + - offset; - - preempt_enable(); - - return ret; -} - - /* Get the TSC speed from Xen */ unsigned long xen_tsc_khz(void) { @@ -464,6 +425,8 @@ void xen_timer_resume(void) { int cpu; + pvclock_resume(); + if (xen_clockevent != &xen_vcpuop_clockevent) return; diff --git a/block/blk-map.c b/block/blk-map.c index 9083cf0..30a7e51 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, unaligned = 1; break; } + if (!iov[i].iov_len) + return -EINVAL; } if (unaligned || (q->dma_pad_mask & len) || map_data) diff --git a/block/blk-merge.c b/block/blk-merge.c index 5e7dc99..01d85b5 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -22,7 +22,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, return 0; fbio = bio; - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + cluster = blk_queue_cluster(q); seg_size = 0; phys_size = nr_phys_segs = 0; for_each_bio(bio) { @@ -88,7 +88,7 @@ EXPORT_SYMBOL(blk_recount_segments); static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, struct bio *nxt) { - if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) + if (!blk_queue_cluster(q)) return 0; if (bio->bi_seg_back_size + nxt->bi_seg_front_size > @@ -124,7 +124,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, int nsegs, cluster; nsegs = 0; - cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + cluster = blk_queue_cluster(q); /* * for each bio in rq diff --git a/block/blk-settings.c b/block/blk-settings.c index f5ed5a1..331763a 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -108,7 +108,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->alignment_offset = 0; lim->io_opt = 0; lim->misaligned = 0; - lim->no_cluster = 0; + lim->cluster = 1; } EXPORT_SYMBOL(blk_set_default_limits); @@ -326,7 +326,7 @@ EXPORT_SYMBOL(blk_queue_logical_block_size); * hardware can operate on without reverting to read-modify-write * operations. */ -void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) +void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) { q->limits.physical_block_size = size; @@ -451,15 +451,6 @@ EXPORT_SYMBOL(blk_queue_io_opt); void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) { blk_stack_limits(&t->limits, &b->limits, 0); - - if (!t->queue_lock) - WARN_ON_ONCE(1); - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { - unsigned long flags; - spin_lock_irqsave(t->queue_lock, flags); - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); - spin_unlock_irqrestore(t->queue_lock, flags); - } } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -530,7 +521,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm(t->io_opt, b->io_opt); - t->no_cluster |= b->no_cluster; + t->cluster &= b->cluster; t->discard_zeroes_data &= b->discard_zeroes_data; /* Physical block size a multiple of the logical block size? */ @@ -626,7 +617,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset) { struct request_queue *t = disk->queue; - struct request_queue *b = bdev_get_queue(bdev); if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; @@ -637,17 +627,6 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", top, bottom); } - - if (!t->queue_lock) - WARN_ON_ONCE(1); - else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { - unsigned long flags; - - spin_lock_irqsave(t->queue_lock, flags); - if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) - queue_flag_clear(QUEUE_FLAG_CLUSTER, t); - spin_unlock_irqrestore(t->queue_lock, flags); - } } EXPORT_SYMBOL(disk_stack_limits); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 306759b..3eede79 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -114,7 +114,7 @@ static ssize_t queue_max_segments_show(struct request_queue *q, char *page) static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) { - if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) + if (blk_queue_cluster(q)) return queue_var_show(queue_max_segment_size(q), (page)); return queue_var_show(PAGE_CACHE_SIZE, (page)); @@ -502,8 +502,10 @@ int blk_register_queue(struct gendisk *disk) return ret; ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); - if (ret < 0) + if (ret < 0) { + blk_trace_remove_sysfs(dev); return ret; + } kobject_uevent(&q->kobj, KOBJ_ADD); diff --git a/block/bsg.c b/block/bsg.c index 82d5882..0c00870 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -426,7 +426,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, /* * fill in all the output members */ - hdr->device_status = status_byte(rq->errors); + hdr->device_status = rq->errors & 0xff; hdr->transport_status = host_byte(rq->errors); hdr->driver_status = driver_byte(rq->errors); hdr->info = 0; diff --git a/block/genhd.c b/block/genhd.c index 59a2db6..315afd2 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -541,13 +541,15 @@ void add_disk(struct gendisk *disk) disk->major = MAJOR(devt); disk->first_minor = MINOR(devt); + /* Register BDI before referencing it from bdev */ + bdi = &disk->queue->backing_dev_info; + bdi_register_dev(bdi, disk_devt(disk)); + blk_register_region(disk_devt(disk), disk->minors, NULL, exact_match, exact_lock, disk); register_disk(disk); blk_register_queue(disk); - bdi = &disk->queue->backing_dev_info; - bdi_register_dev(bdi, disk_devt(disk)); retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, "bdi"); WARN_ON(retval); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index a8b5a10..4f4230b 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c @@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, if (hdr->iovec_count) { const int size = sizeof(struct sg_iovec) * hdr->iovec_count; size_t iov_data_len; - struct sg_iovec *iov; + struct sg_iovec *sg_iov; + struct iovec *iov; + int i; - iov = kmalloc(size, GFP_KERNEL); - if (!iov) { + sg_iov = kmalloc(size, GFP_KERNEL); + if (!sg_iov) { ret = -ENOMEM; goto out; } - if (copy_from_user(iov, hdr->dxferp, size)) { - kfree(iov); + if (copy_from_user(sg_iov, hdr->dxferp, size)) { + kfree(sg_iov); ret = -EFAULT; goto out; } + /* + * Sum up the vecs, making sure they don't overflow + */ + iov = (struct iovec *) sg_iov; + iov_data_len = 0; + for (i = 0; i < hdr->iovec_count; i++) { + if (iov_data_len + iov[i].iov_len < iov_data_len) { + kfree(sg_iov); + ret = -EINVAL; + goto out; + } + iov_data_len += iov[i].iov_len; + } + /* SG_IO howto says that the shorter of the two wins */ - iov_data_len = iov_length((struct iovec *)iov, - hdr->iovec_count); if (hdr->dxfer_len < iov_data_len) { - hdr->iovec_count = iov_shorten((struct iovec *)iov, + hdr->iovec_count = iov_shorten(iov, hdr->iovec_count, hdr->dxfer_len); iov_data_len = hdr->dxfer_len; } - ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, + ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count, iov_data_len, GFP_KERNEL); - kfree(iov); + kfree(sg_iov); } else if (hdr->dxfer_len) ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, GFP_KERNEL); diff --git a/crypto/Kconfig b/crypto/Kconfig index 9d9434f..df332c1 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -96,6 +96,14 @@ config CRYPTO_MANAGER2 select CRYPTO_BLKCIPHER2 select CRYPTO_PCOMP +config CRYPTO_MANAGER_TESTS + bool "Run algolithms' self-tests" + default y + depends on CRYPTO_MANAGER2 + help + Run cryptomanager's tests for the new crypto algorithms being + registered. + config CRYPTO_GF128MUL tristate "GF(2^128) multiplication functions (EXPERIMENTAL)" depends on EXPERIMENTAL diff --git a/crypto/algboss.c b/crypto/algboss.c index c3c196b..40bd391 100644 --- a/crypto/algboss.c +++ b/crypto/algboss.c @@ -206,6 +206,7 @@ err: return NOTIFY_OK; } +#ifdef CONFIG_CRYPTO_MANAGER_TESTS static int cryptomgr_test(void *data) { struct crypto_test_param *param = data; @@ -266,6 +267,7 @@ err_put_module: err: return NOTIFY_OK; } +#endif /* CONFIG_CRYPTO_MANAGER_TESTS */ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, void *data) @@ -273,8 +275,10 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg, switch (msg) { case CRYPTO_MSG_ALG_REQUEST: return cryptomgr_schedule_probe(data); +#ifdef CONFIG_CRYPTO_MANAGER_TESTS case CRYPTO_MSG_ALG_REGISTER: return cryptomgr_schedule_test(data); +#endif } return NOTIFY_DONE; diff --git a/crypto/testmgr.c b/crypto/testmgr.c index 5c8aaa0..abd980c 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -22,6 +22,17 @@ #include #include "internal.h" + +#ifndef CONFIG_CRYPTO_MANAGER_TESTS + +/* a perfect nop */ +int alg_test(const char *driver, const char *alg, u32 type, u32 mask) +{ + return 0; +} + +#else + #include "testmgr.h" /* @@ -2530,4 +2541,7 @@ notest: non_fips_alg: return -EINVAL; } + +#endif /* CONFIG_CRYPTO_MANAGER_TESTS */ + EXPORT_SYMBOL_GPL(alg_test); diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 147a7e6..9f82a38 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -853,6 +853,7 @@ struct acpi_bit_register_info { ACPI_BITMASK_POWER_BUTTON_STATUS | \ ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ ACPI_BITMASK_RT_CLOCK_STATUS | \ + ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \ ACPI_BITMASK_WAKE_STATUS) #define ACPI_BITMASK_TIMER_ENABLE 0x0001 diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index d555b37..6b0b5d0 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -300,10 +300,25 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, * we must enter this object into the namespace. The created * object is temporary and will be deleted upon completion of * the execution of this method. + * + * Note 10/2010: Except for the Scope() op. This opcode does + * not actually create a new object, it refers to an existing + * object. However, for Scope(), we want to indeed open a + * new scope. */ - status = acpi_ds_load2_begin_op(walk_state, NULL); + if (op->common.aml_opcode != AML_SCOPE_OP) { + status = + acpi_ds_load2_begin_op(walk_state, NULL); + } else { + status = + acpi_ds_scope_stack_push(op->named.node, + op->named.node-> + type, walk_state); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } } - break; case AML_CLASS_EXECUTE: diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 864dd46..18645f4 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include "apei-internal.h" diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index dc58402..68919e2 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -98,6 +98,7 @@ enum { * due to bad math. */ ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, + ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, }; struct acpi_battery { @@ -413,6 +414,8 @@ static int acpi_battery_get_info(struct acpi_battery *battery) result = extract_package(battery, buffer.pointer, info_offsets, ARRAY_SIZE(info_offsets)); kfree(buffer.pointer); + if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) + battery->full_charge_capacity = battery->design_capacity; return result; } @@ -449,6 +452,10 @@ static int acpi_battery_get_state(struct acpi_battery *battery) battery->rate_now != -1) battery->rate_now = abs((s16)battery->rate_now); + if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) + && battery->capacity_now >= 0 && battery->capacity_now <= 100) + battery->capacity_now = (battery->capacity_now * + battery->full_charge_capacity) / 100; return result; } @@ -562,6 +569,33 @@ static void acpi_battery_quirks(struct acpi_battery *battery) } } +/* + * According to the ACPI spec, some kinds of primary batteries can + * report percentage battery remaining capacity directly to OS. + * In this case, it reports the Last Full Charged Capacity == 100 + * and BatteryPresentRate == 0xFFFFFFFF. + * + * Now we found some battery reports percentage remaining capacity + * even if it's rechargeable. + * https://bugzilla.kernel.org/show_bug.cgi?id=15979 + * + * Handle this correctly so that they won't break userspace. + */ +static void acpi_battery_quirks2(struct acpi_battery *battery) +{ + if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)) + return ; + + if (battery->full_charge_capacity == 100 && + battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN && + battery->capacity_now >=0 && battery->capacity_now <= 100) { + set_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags); + battery->full_charge_capacity = battery->design_capacity; + battery->capacity_now = (battery->capacity_now * + battery->full_charge_capacity) / 100; + } +} + static int acpi_battery_update(struct acpi_battery *battery) { int result, old_present = acpi_battery_present(battery); @@ -587,7 +621,9 @@ static int acpi_battery_update(struct acpi_battery *battery) if (!battery->bat.dev) sysfs_add_battery(battery); #endif - return acpi_battery_get_state(battery); + result = acpi_battery_get_state(battery); + acpi_battery_quirks2(battery); + return result; } /* -------------------------------------------------------------------------- diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 2bb28b9..51cada7 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -226,6 +226,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { }, }, { + .callback = dmi_disable_osi_vista, + .ident = "Toshiba Satellite L355", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"), + }, + }, + { .callback = dmi_disable_osi_win7, .ident = "ASUS K50IJ", .matches = { @@ -233,6 +241,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), }, }, + { + .callback = dmi_disable_osi_vista, + .ident = "Toshiba P305D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index c1d23cd..a33fbc9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id) static struct dmi_system_id dsdt_dmi_table[] __initdata = { /* - * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. + * Invoke DSDT corruption work-around on all Toshiba Satellite. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 */ { .callback = set_copy_dsdt, - .ident = "TOSHIBA Satellite A505", + .ident = "TOSHIBA Satellite", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), - }, - }, - { - .callback = set_copy_dsdt, - .ident = "TOSHIBA Satellite L505D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), }, }, {} diff --git a/drivers/acpi/debug.c b/drivers/acpi/debug.c index 146135e..469f049 100644 --- a/drivers/acpi/debug.c +++ b/drivers/acpi/debug.c @@ -258,7 +258,7 @@ static int acpi_debugfs_init(void) if (!acpi_dir) goto err; - cm_dentry = debugfs_create_file("custom_method", S_IWUGO, + cm_dentry = debugfs_create_file("custom_method", S_IWUSR, acpi_dir, NULL, &cm_fops); if (!cm_dentry) goto err; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 5f2027d..2d4b732 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1006,6 +1006,9 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { ec_flag_msi, "MSI hardware", { DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, { + ec_flag_msi, "MSI hardware", { + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, + { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, {}, diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e9699aa..bec561c 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id) static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { { - set_no_mwait, "IFL91 board", { - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), - DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), - DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, - { set_no_mwait, "Extensa 5220", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -352,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void) acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, early_init_pdc, NULL, NULL, NULL); + acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL); } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2862c78..4882bc1 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -362,6 +362,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) return 0; } +static int __init init_nvs_nosave(const struct dmi_system_id *d) +{ + acpi_nvs_nosave(); + return 0; +} + static struct dmi_system_id __initdata acpisleep_dmi_table[] = { { .callback = init_old_suspend_ordering, @@ -396,6 +402,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR11M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Everex StepNote Series", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index f252253..deb228c 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -89,6 +89,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int ahci_pci_device_resume(struct pci_dev *pdev); #endif +static struct scsi_host_template ahci_sht = { + AHCI_SHT("ahci"), +}; + static struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, .hardreset = ahci_vt8251_hardreset, @@ -244,6 +248,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */ { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */ + { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */ + { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */ + { PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 7113c57..22a20e8 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -72,6 +72,7 @@ enum { AHCI_CMD_RESET = (1 << 8), AHCI_CMD_CLR_BUSY = (1 << 10), + RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ @@ -297,7 +298,17 @@ struct ahci_host_priv { extern int ahci_ignore_sss; -extern struct scsi_host_template ahci_sht; +extern struct device_attribute *ahci_shost_attrs[]; +extern struct device_attribute *ahci_sdev_attrs[]; + +#define AHCI_SHT(drv_name) \ + ATA_NCQ_SHT(drv_name), \ + .can_queue = AHCI_MAX_CMDS - 1, \ + .sg_tablesize = AHCI_MAX_SG, \ + .dma_boundary = AHCI_DMA_BOUNDARY, \ + .shost_attrs = ahci_shost_attrs, \ + .sdev_attrs = ahci_sdev_attrs + extern struct ata_port_operations ahci_ops; void ahci_save_initial_config(struct device *dev, diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 5e11b16..9366743 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -23,6 +23,10 @@ #include #include "ahci.h" +static struct scsi_host_template ahci_platform_sht = { + AHCI_SHT("ahci_platform"), +}; + static int __init ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -140,7 +144,7 @@ static int __init ahci_probe(struct platform_device *pdev) ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, - &ahci_sht); + &ahci_platform_sht); if (rc) goto err0; diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 7409f98..3971bc0 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -158,6 +158,7 @@ struct piix_map_db { struct piix_host_priv { const int *map; u32 saved_iocfg; + spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */ void __iomem *sidpr; }; @@ -951,12 +952,15 @@ static int piix_sidpr_scr_read(struct ata_link *link, unsigned int reg, u32 *val) { struct piix_host_priv *hpriv = link->ap->host->private_data; + unsigned long flags; if (reg >= ARRAY_SIZE(piix_sidx_map)) return -EINVAL; + spin_lock_irqsave(&hpriv->sidpr_lock, flags); piix_sidpr_sel(link, reg); *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); + spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); return 0; } @@ -964,12 +968,15 @@ static int piix_sidpr_scr_write(struct ata_link *link, unsigned int reg, u32 val) { struct piix_host_priv *hpriv = link->ap->host->private_data; + unsigned long flags; if (reg >= ARRAY_SIZE(piix_sidx_map)) return -EINVAL; + spin_lock_irqsave(&hpriv->sidpr_lock, flags); piix_sidpr_sel(link, reg); iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); + spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); return 0; } @@ -1566,6 +1573,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; + spin_lock_init(&hpriv->sidpr_lock); /* Save IOCFG, this will be used for cable detection, quirk * detection and restoration on detach. This is necessary diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 81e772a..b5aecd0 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -121,7 +121,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, ahci_read_em_buffer, ahci_store_em_buffer); -static struct device_attribute *ahci_shost_attrs[] = { +struct device_attribute *ahci_shost_attrs[] = { &dev_attr_link_power_management_policy, &dev_attr_em_message_type, &dev_attr_em_message, @@ -132,22 +132,14 @@ static struct device_attribute *ahci_shost_attrs[] = { &dev_attr_em_buffer, NULL }; +EXPORT_SYMBOL_GPL(ahci_shost_attrs); -static struct device_attribute *ahci_sdev_attrs[] = { +struct device_attribute *ahci_sdev_attrs[] = { &dev_attr_sw_activity, &dev_attr_unload_heads, NULL }; - -struct scsi_host_template ahci_sht = { - ATA_NCQ_SHT("ahci"), - .can_queue = AHCI_MAX_CMDS - 1, - .sg_tablesize = AHCI_MAX_SG, - .dma_boundary = AHCI_DMA_BOUNDARY, - .shost_attrs = ahci_shost_attrs, - .sdev_attrs = ahci_sdev_attrs, -}; -EXPORT_SYMBOL_GPL(ahci_sht); +EXPORT_SYMBOL_GPL(ahci_sdev_attrs); struct ata_port_operations ahci_ops = { .inherits = &sata_pmp_port_ops, @@ -1320,7 +1312,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, /* issue the first D2H Register FIS */ msecs = 0; now = jiffies; - if (time_after(now, deadline)) + if (time_after(deadline, now)) msecs = jiffies_to_msecs(deadline - now); tf.ctl |= ATA_SRST; @@ -1832,12 +1824,24 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) { struct ahci_port_priv *pp = qc->ap->private_data; - u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + u8 *rx_fis = pp->rx_fis; if (pp->fbs_enabled) - d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; + rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; + + /* + * After a successful execution of an ATA PIO data-in command, + * the device doesn't send D2H Reg FIS to update the TF and + * the host should take TF and E_Status from the preceding PIO + * Setup FIS. + */ + if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && + !(qc->flags & ATA_QCFLAG_FAILED)) { + ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); + qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + } else + ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); - ata_tf_from_fis(d2h_fis, &qc->result_tf); return true; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ddf8e48..c77a167 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4946,9 +4946,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) { struct ata_device *dev = qc->dev; - if (ata_tag_internal(qc->tag)) - return; - if (ata_is_nodata(qc->tf.protocol)) return; @@ -4992,14 +4989,23 @@ void ata_qc_complete(struct ata_queued_cmd *qc) if (unlikely(qc->err_mask)) qc->flags |= ATA_QCFLAG_FAILED; - if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { - /* always fill result TF for failed qc */ + /* + * Finish internal commands without any further processing + * and always with the result TF filled. + */ + if (unlikely(ata_tag_internal(qc->tag))) { fill_result_tf(qc); + __ata_qc_complete(qc); + return; + } - if (!ata_tag_internal(qc->tag)) - ata_qc_schedule_eh(qc); - else - __ata_qc_complete(qc); + /* + * Non-internal qc has failed. Fill the result TF and + * summon EH. + */ + if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { + fill_result_tf(qc); + ata_qc_schedule_eh(qc); return; } @@ -5434,6 +5440,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, */ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) { + unsigned int ehi_flags = ATA_EHI_QUIET; int rc; /* @@ -5442,7 +5449,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) */ ata_lpm_enable(host); - rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); + /* + * On some hardware, device fails to respond after spun down + * for suspend. As the device won't be used before being + * resumed, we don't need to touch the device. Ask EH to skip + * the usual stuff and proceed directly to suspend. + * + * http://thread.gmane.org/gmane.linux.ide/46764 + */ + if (mesg.event == PM_EVENT_SUSPEND) + ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; + + rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1); if (rc == 0) host->dev->power.power_state = mesg; return rc; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f77a673..da5a55b 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3234,6 +3234,10 @@ static int ata_eh_skip_recovery(struct ata_link *link) if (link->flags & ATA_LFLAG_DISABLED) return 1; + /* skip if explicitly requested */ + if (ehc->i.flags & ATA_EHI_NO_RECOVERY) + return 1; + /* thaw frozen port and recover failed devices */ if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link)) return 0; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a54273d..fb9e3ee 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2577,8 +2577,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * * If door lock fails, always clear sdev->locked to * avoid this infinite loop. + * + * This may happen before SCSI scan is complete. Make + * sure qc->dev->sdev isn't NULL before dereferencing. */ - if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) + if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) qc->dev->sdev->locked = 0; qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index efa4a18..804d8f8 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -418,6 +418,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) if (ioaddr->ctl_addr) iowrite8(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; + ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { @@ -453,6 +454,8 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->device, ioaddr->device_addr); VPRINTK("device 0x%X\n", tf->device); } + + ata_wait_idle(ap); } EXPORT_SYMBOL_GPL(ata_sff_tf_load); @@ -1512,11 +1515,10 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap, if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) return ata_sff_idle_irq(ap); break; - case HSM_ST: - case HSM_ST_LAST: - break; - default: + case HSM_ST_IDLE: return ata_sff_idle_irq(ap); + default: + break; } /* check main status, clearing INTRQ if needed */ @@ -2735,10 +2737,6 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; - /* see ata_dma_blacklisted() */ - BUG_ON((ap->flags & ATA_FLAG_PIO_POLLING) && - qc->tf.protocol == ATAPI_PROT_DMA); - /* defer PIO handling to sff_qc_issue */ if (!ata_is_dma(qc->tf.protocol)) return ata_sff_qc_issue(qc); diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c index 9f5da1c..905ff76 100644 --- a/drivers/ata/pata_cmd64x.c +++ b/drivers/ata/pata_cmd64x.c @@ -121,14 +121,8 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m if (pair) { struct ata_timing tp; - ata_timing_compute(pair, pair->pio_mode, &tp, T, 0); ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP); - if (pair->dma_mode) { - ata_timing_compute(pair, pair->dma_mode, - &tp, T, 0); - ata_timing_merge(&tp, &t, &t, ATA_TIMING_SETUP); - } } } diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c index f087ab5..8f56ef6 100644 --- a/drivers/ata/pata_mpc52xx.c +++ b/drivers/ata/pata_mpc52xx.c @@ -610,7 +610,7 @@ static struct scsi_host_template mpc52xx_ata_sht = { }; static struct ata_port_operations mpc52xx_ata_port_ops = { - .inherits = &ata_sff_port_ops, + .inherits = &ata_bmdma_port_ops, .sff_dev_select = mpc52xx_ata_dev_select, .set_piomode = mpc52xx_ata_set_piomode, .set_dmamode = mpc52xx_ata_set_dmamode, diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 5e65988..ac8d7d9 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -417,6 +417,8 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) tf->lbam, tf->lbah); } + + ata_wait_idle(ap); } static int via_port_start(struct ata_port *ap) diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index a476cd9..d57f6eb 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1898,19 +1898,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc) * LOCKING: * Inherited from caller. */ -static void mv_bmdma_stop(struct ata_queued_cmd *qc) +static void mv_bmdma_stop_ap(struct ata_port *ap) { - struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); u32 cmd; /* clear start/stop bit */ cmd = readl(port_mmio + BMDMA_CMD); - cmd &= ~ATA_DMA_START; - writelfl(cmd, port_mmio + BMDMA_CMD); + if (cmd & ATA_DMA_START) { + cmd &= ~ATA_DMA_START; + writelfl(cmd, port_mmio + BMDMA_CMD); + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_sff_dma_pause(ap); + } +} - /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ - ata_sff_dma_pause(ap); +static void mv_bmdma_stop(struct ata_queued_cmd *qc) +{ + mv_bmdma_stop_ap(qc->ap); } /** @@ -1934,8 +1940,21 @@ static u8 mv_bmdma_status(struct ata_port *ap) reg = readl(port_mmio + BMDMA_STATUS); if (reg & ATA_DMA_ACTIVE) status = ATA_DMA_ACTIVE; - else + else if (reg & ATA_DMA_ERR) status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; + else { + /* + * Just because DMA_ACTIVE is 0 (DMA completed), + * this does _not_ mean the device is "done". + * So we should not yet be signalling ATA_DMA_INTR + * in some cases. Eg. DSM/TRIM, and perhaps others. + */ + mv_bmdma_stop_ap(ap); + if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) + status = 0; + else + status = ATA_DMA_INTR; + } return status; } @@ -1995,6 +2014,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) switch (tf->protocol) { case ATA_PROT_DMA: + if (tf->command == ATA_CMD_DSM) + return; + /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: @@ -2094,6 +2116,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) return; + if (tf->command == ATA_CMD_DSM) + return; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) @@ -2289,6 +2313,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) switch (qc->tf.protocol) { case ATA_PROT_DMA: + if (qc->tf.command == ATA_CMD_DSM) { + if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ + return AC_ERR_OTHER; + break; /* use bmdma for this */ + } + /* fall thru */ case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 4730c42..c51b8d2 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -538,7 +538,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) return 0; } -static void svia_configure(struct pci_dev *pdev) +static void svia_configure(struct pci_dev *pdev, int board_id) { u8 tmp8; @@ -577,7 +577,7 @@ static void svia_configure(struct pci_dev *pdev) } /* - * vt6421 has problems talking to some drives. The following + * vt6420/1 has problems talking to some drives. The following * is the fix from Joseph Chan . * * When host issues HOLD, device may send up to 20DW of data @@ -596,8 +596,9 @@ static void svia_configure(struct pci_dev *pdev) * * https://bugzilla.kernel.org/show_bug.cgi?id=15173 * http://article.gmane.org/gmane.linux.ide/46352 + * http://thread.gmane.org/gmane.linux.kernel/1062139 */ - if (pdev->device == 0x3249) { + if (board_id == vt6420 || board_id == vt6421) { pci_read_config_byte(pdev, 0x52, &tmp8); tmp8 |= 1 << 2; pci_write_config_byte(pdev, 0x52, tmp8); @@ -652,7 +653,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - svia_configure(pdev); + svia_configure(pdev, board_id); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index ded76c4..6ba851b 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -695,7 +695,7 @@ void solos_bh(unsigned long card_arg) size); } if (atmdebug) { - dev_info(&card->dev->dev, "Received: device %d\n", port); + dev_info(&card->dev->dev, "Received: port %d\n", port); dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", size, le16_to_cpu(header->vpi), le16_to_cpu(header->vci)); @@ -781,7 +781,8 @@ static struct atm_vcc *find_vcc(struct atm_dev *dev, short vpi, int vci) sk_for_each(s, node, head) { vcc = atm_sk(s); if (vcc->dev == dev && vcc->vci == vci && - vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE) + vcc->vpi == vpi && vcc->qos.rxtp.traffic_class != ATM_NONE && + test_bit(ATM_VF_READY, &vcc->flags)) goto out; } vcc = NULL; @@ -907,6 +908,10 @@ static void pclose(struct atm_vcc *vcc) clear_bit(ATM_VF_ADDR, &vcc->flags); clear_bit(ATM_VF_READY, &vcc->flags); + /* Hold up vcc_destroy_socket() (our caller) until solos_bh() in the + tasklet has finished processing any incoming packets (and, more to + the point, using the vcc pointer). */ + tasklet_unlock_wait(&card->tlet); return; } @@ -1010,8 +1015,15 @@ static uint32_t fpga_tx(struct solos_card *card) /* Clean up and free oldskb now it's gone */ if (atmdebug) { + struct pkt_hdr *header = (void *)oldskb->data; + int size = le16_to_cpu(header->size); + + skb_pull(oldskb, sizeof(*header)); dev_info(&card->dev->dev, "Transmitted: port %d\n", port); + dev_info(&card->dev->dev, "size: %d VPI: %d VCI: %d\n", + size, le16_to_cpu(header->vpi), + le16_to_cpu(header->vci)); print_buffer(oldskb); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 941fcb8..cb1cedc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -59,6 +59,7 @@ void device_pm_init(struct device *dev) { dev->power.status = DPM_ON; init_completion(&dev->power.completion); + complete_all(&dev->power.completion); pm_runtime_init(dev); } diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index c5d4111..37a2d4f 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -173,6 +173,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) printk("Sending %x - down to controller\n", c->busaddr ); #endif /* CCISS_DEBUG */ writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); + readl(h->vaddr + SA5_REQUEST_PORT_OFFSET); h->commands_outstanding++; if ( h->commands_outstanding > h->max_outstanding) h->max_outstanding = h->commands_outstanding; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 7258c95..60bbfcf 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2371,11 +2371,7 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) static void consider_delay_probes(struct drbd_conf *mdev) { - if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93) - return; - - if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt) - drbd_send_delay_probes(mdev); + return; } static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel) @@ -2660,9 +2656,24 @@ static void drbd_unplug_fn(struct request_queue *q) static void drbd_set_defaults(struct drbd_conf *mdev) { - mdev->sync_conf.after = DRBD_AFTER_DEF; - mdev->sync_conf.rate = DRBD_RATE_DEF; - mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF; + /* This way we get a compile error when sync_conf grows, + and we forgot to initialize it here */ + mdev->sync_conf = (struct syncer_conf) { + /* .rate = */ DRBD_RATE_DEF, + /* .after = */ DRBD_AFTER_DEF, + /* .al_extents = */ DRBD_AL_EXTENTS_DEF, + /* .dp_volume = */ DRBD_DP_VOLUME_DEF, + /* .dp_interval = */ DRBD_DP_INTERVAL_DEF, + /* .throttle_th = */ DRBD_RS_THROTTLE_TH_DEF, + /* .hold_off_th = */ DRBD_RS_HOLD_OFF_TH_DEF, + /* .verify_alg = */ {}, 0, + /* .cpu_mask = */ {}, 0, + /* .csums_alg = */ {}, 0, + /* .use_rle = */ 0 + }; + + /* Have to use that way, because the layout differs between + big endian and little endian */ mdev->state = (union drbd_state) { { .role = R_SECONDARY, .peer = R_UNKNOWN, diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 8a549db..8403fd5 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2368,7 +2368,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) pkt_shrink_pktlist(pd); } -static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) +static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) { if (dev_minor >= MAX_WRITERS) return NULL; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 258bc2a..3c108bc 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -203,6 +203,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) struct virtio_blk *vblk = disk->private_data; struct request *req; struct bio *bio; + int err; bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); @@ -216,7 +217,10 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) } req->cmd_type = REQ_TYPE_SPECIAL; - return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); + err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); + blk_put_request(req); + + return err; } static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 82ed403..d95f0b4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -69,7 +69,7 @@ struct blk_shadow { static const struct block_device_operations xlvbd_block_fops; -#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) +#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5d9cc53..c9e2dc8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -59,6 +59,12 @@ static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Apple iMac11,1 */ + { USB_DEVICE(0x05ac, 0x8215) }, + + /* Apple MacBookPro8,2 */ + { USB_DEVICE(0x05ac, 0x821a) }, + /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800) }, diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 76a1abb..283b127 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -258,9 +258,16 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_DBG("tty %p", tty); + /* FIXME: This btw is bogus, nothing requires the old ldisc to clear + the pointer */ if (hu) return -EEXIST; + /* Error if the tty has no write op instead of leaving an exploitable + hole */ + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) { BT_ERR("Can't allocate control structure"); return -ENFILE; diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d836a71..e38fe2be 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -12,6 +12,7 @@ #include #include "agp.h" #include "intel-agp.h" +#include #include "intel-gtt.c" @@ -805,6 +806,8 @@ static const struct intel_driver_description { "G45/G43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, "B43", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG, + "B43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, "G41", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, @@ -815,17 +818,27 @@ static const struct intel_driver_description { "HD Graphics", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, - "Sandybridge", NULL, &intel_i965_driver }, - { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, - "Sandybridge", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, + "Sandybridge", NULL, &intel_gen6_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, + "Sandybridge", NULL, &intel_gen6_driver }, { 0, 0, NULL, NULL, NULL } }; static int __devinit intel_gmch_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge) { - int i; + int i, mask; bridge->driver = NULL; for (i = 0; intel_agp_chipsets[i].name != NULL; i++) { @@ -845,14 +858,19 @@ static int __devinit intel_gmch_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); - if (bridge->driver->mask_memory == intel_i965_mask_memory) { - if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36))) - dev_err(&intel_private.pcidev->dev, - "set gfx device dma mask 36bit failed!\n"); - else - pci_set_consistent_dma_mask(intel_private.pcidev, - DMA_BIT_MASK(36)); - } + if (bridge->driver->mask_memory == intel_gen6_mask_memory) + mask = 40; + else if (bridge->driver->mask_memory == intel_i965_mask_memory) + mask = 36; + else + mask = 32; + + if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask))) + dev_err(&intel_private.pcidev->dev, + "set gfx device dma mask %d-bit failed!\n", mask); + else + pci_set_consistent_dma_mask(intel_private.pcidev, + DMA_BIT_MASK(mask)); return 1; } @@ -1030,12 +1048,14 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), ID(PCI_DEVICE_ID_INTEL_B43_HB), + ID(PCI_DEVICE_ID_INTEL_B43_1_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), + ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB), { } }; diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 2547465..4c8949a 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -60,6 +60,12 @@ #define I810_PTE_LOCAL 0x00000002 #define I810_PTE_VALID 0x00000001 #define I830_PTE_SYSTEM_CACHED 0x00000006 +/* GT PTE cache control fields */ +#define GEN6_PTE_UNCACHED 0x00000002 +#define GEN6_PTE_LLC 0x00000004 +#define GEN6_PTE_LLC_MLC 0x00000006 +#define GEN6_PTE_GFDT 0x00000008 + #define I810_SMRAM_MISCC 0x70 #define I810_GFX_MEM_WIN_SIZE 0x00010000 #define I810_GFX_MEM_WIN_32M 0x00010000 @@ -178,6 +184,8 @@ #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 +#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90 +#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 @@ -194,10 +202,16 @@ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 -#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */ +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A /* cover 915 and 945 variants */ #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ @@ -224,7 +238,8 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB) #define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB) #define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index a754715..16cfbab 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -25,6 +25,10 @@ #define USE_PCI_DMA_API 1 #endif +/* Max amount of stolen space, anything above will be returned to Linux */ +int intel_max_stolen = 32 * 1024 * 1024; +EXPORT_SYMBOL(intel_max_stolen); + static const struct aper_size_info_fixed intel_i810_sizes[] = { {64, 16384, 4}, @@ -45,6 +49,26 @@ static struct gatt_mask intel_i810_masks[] = .type = INTEL_AGP_CACHED_MEMORY} }; +#define INTEL_AGP_UNCACHED_MEMORY 0 +#define INTEL_AGP_CACHED_MEMORY_LLC 1 +#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2 +#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3 +#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4 + +static struct gatt_mask intel_gen6_masks[] = +{ + {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED, + .type = INTEL_AGP_UNCACHED_MEMORY }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC, + .type = INTEL_AGP_CACHED_MEMORY_LLC }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT, + .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC, + .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC }, + {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT, + .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT }, +}; + static struct _intel_private { struct pci_dev *pcidev; /* device one */ u8 __iomem *registers; @@ -171,13 +195,6 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, off_t pg_start, int mask_type) { int i, j; - u32 cache_bits = 0; - - if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) - { - cache_bits = I830_PTE_SYSTEM_CACHED; - } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, @@ -310,6 +327,23 @@ static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge, return 0; } +static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge, + int type) +{ + unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT; + + if (type_mask == AGP_USER_UNCACHED_MEMORY) + return INTEL_AGP_UNCACHED_MEMORY; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) + return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT : + INTEL_AGP_CACHED_MEMORY_LLC_MLC; + else /* set 'normal'/'cached' to LLC by default */ + return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT : + INTEL_AGP_CACHED_MEMORY_LLC; +} + + static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, int type) { @@ -497,7 +531,7 @@ static void intel_i830_init_gtt_entries(void) pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); - if (IS_I965) { + if (IS_G33 || IS_I965) { u32 pgetbl_ctl; pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); @@ -530,22 +564,6 @@ static void intel_i830_init_gtt_entries(void) size = 512; } size += 4; /* add in BIOS popup space */ - } else if (IS_G33 && !IS_PINEVIEW) { - /* G33's GTT size defined in gmch_ctrl */ - switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { - case G33_PGETBL_SIZE_1M: - size = 1024; - break; - case G33_PGETBL_SIZE_2M: - size = 2048; - break; - default: - dev_info(&agp_bridge->dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & G33_PGETBL_SIZE_MASK)); - size = 512; - } - size += 4; } else if (IS_G4X || IS_PINEVIEW) { /* On 4 series hardware, GTT stolen is separate from graphics * stolen, ignore it in stolen gtt entries counting. However, @@ -581,8 +599,7 @@ static void intel_i830_init_gtt_entries(void) gtt_entries = 0; break; } - } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { + } else if (IS_SNB) { /* * SandyBridge has new memory control reg at 0x50.w */ @@ -710,7 +727,12 @@ static void intel_i830_init_gtt_entries(void) break; } } - if (gtt_entries > 0) { + if (!local && gtt_entries > intel_max_stolen) { + dev_info(&agp_bridge->dev->dev, + "detected %dK stolen memory, trimming to %dK\n", + gtt_entries / KB(1), intel_max_stolen / KB(1)); + gtt_entries = intel_max_stolen / KB(4); + } else if (gtt_entries > 0) { dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", gtt_entries / KB(1), local ? "local" : "stolen"); gtt_entries /= KB(4); @@ -1052,11 +1074,11 @@ static void intel_i9xx_setup_flush(void) intel_i915_setup_chipset_flush(); } - if (intel_private.ifp_resource.start) { + if (intel_private.ifp_resource.start) intel_private.i9xx_flush_page = ioremap_nocache(intel_private.ifp_resource.start, PAGE_SIZE); - if (!intel_private.i9xx_flush_page) - dev_info(&intel_private.pcidev->dev, "can't ioremap flush page - no chipset flushing"); - } + if (!intel_private.i9xx_flush_page) + dev_err(&intel_private.pcidev->dev, + "can't ioremap flush page - no chipset flushing\n"); } static int intel_i9xx_configure(void) @@ -1147,7 +1169,7 @@ static int intel_i915_insert_entries(struct agp_memory *mem, off_t pg_start, mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); - if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY && + if (!IS_SNB && mask_type != 0 && mask_type != AGP_PHYS_MEMORY && mask_type != INTEL_AGP_CACHED_MEMORY) goto out_err; @@ -1212,24 +1234,31 @@ static int intel_i915_get_gtt_size(void) int size; if (IS_G33) { - u16 gmch_ctrl; + u32 pgetbl_ctl; + pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL); - /* G33's GTT size defined in gmch_ctrl */ - pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl); - switch (gmch_ctrl & I830_GMCH_GMS_MASK) { - case I830_GMCH_GMS_STOLEN_512: + switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { + case I965_PGETBL_SIZE_128KB: + size = 128; + break; + case I965_PGETBL_SIZE_256KB: + size = 256; + break; + case I965_PGETBL_SIZE_512KB: size = 512; break; - case I830_GMCH_GMS_STOLEN_1024: + case I965_PGETBL_SIZE_1MB: size = 1024; break; - case I830_GMCH_GMS_STOLEN_8192: - size = 8*1024; + case I965_PGETBL_SIZE_2MB: + size = 2048; + break; + case I965_PGETBL_SIZE_1_5MB: + size = 1024 + 512; break; default: - dev_info(&agp_bridge->dev->dev, - "unknown page table size 0x%x, assuming 512KB\n", - (gmch_ctrl & I830_GMCH_GMS_MASK)); + dev_info(&intel_private.pcidev->dev, + "unknown page table size, assuming 512KB\n"); size = 512; } } else { @@ -1261,14 +1290,6 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &temp); pci_read_config_dword(intel_private.pcidev, I915_PTEADDR, &temp2); - gtt_map_size = intel_i915_get_gtt_size(); - - intel_private.gtt = ioremap(temp2, gtt_map_size); - if (!intel_private.gtt) - return -ENOMEM; - - intel_private.gtt_total_size = gtt_map_size / 4; - temp &= 0xfff80000; intel_private.registers = ioremap(temp, 128 * 4096); @@ -1277,6 +1298,14 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) return -ENOMEM; } + gtt_map_size = intel_i915_get_gtt_size(); + + intel_private.gtt = ioremap(temp2, gtt_map_size); + if (!intel_private.gtt) + return -ENOMEM; + + intel_private.gtt_total_size = gtt_map_size / 4; + temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000; global_cache_flush(); /* FIXME: ? */ @@ -1309,6 +1338,16 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } +static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, + dma_addr_t addr, int type) +{ + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + + /* Type checking must be done elsewhere */ + return addr | bridge->driver->masks[type].mask; +} + static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) { u16 snb_gmch_ctl; @@ -1328,6 +1367,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) break; case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: + case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB: *gtt_offset = MB(2); pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); @@ -1517,6 +1557,39 @@ static const struct agp_bridge_driver intel_i965_driver = { #endif }; +static const struct agp_bridge_driver intel_gen6_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_i830_sizes, + .size_type = FIXED_APER_SIZE, + .num_aperture_sizes = 4, + .needs_scratch_page = true, + .configure = intel_i9xx_configure, + .fetch_size = intel_i9xx_fetch_size, + .cleanup = intel_i915_cleanup, + .mask_memory = intel_gen6_mask_memory, + .masks = intel_gen6_masks, + .agp_enable = intel_i810_agp_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = intel_i965_create_gatt_table, + .free_gatt_table = intel_i830_free_gatt_table, + .insert_memory = intel_i915_insert_entries, + .remove_memory = intel_i915_remove_entries, + .alloc_by_type = intel_i830_alloc_by_type, + .free_by_type = intel_i810_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = intel_gen6_type_to_mask_type, + .chipset_flush = intel_i915_chipset_flush, +#ifdef USE_PCI_DMA_API + .agp_map_page = intel_agp_map_page, + .agp_unmap_page = intel_agp_unmap_page, + .agp_map_memory = intel_agp_map_memory, + .agp_unmap_memory = intel_agp_unmap_memory, +#endif +}; + static const struct agp_bridge_driver intel_g33_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_i830_sizes, diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index a0a1829..f8e7d89 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -479,6 +479,21 @@ static int hpet_ioctl_ieon(struct hpet_dev *devp) if (irq) { unsigned long irq_flags; + if (devp->hd_flags & HPET_SHARED_IRQ) { + /* + * To prevent the interrupt handler from seeing an + * unwanted interrupt status bit, program the timer + * so that it will not fire in the near future ... + */ + writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK, + &timer->hpet_config); + write_counter(read_counter(&hpet->hpet_mc), + &timer->hpet_compare); + /* ... and clear any left-over status. */ + isr = 1 << (devp - devp->hd_hpets->hp_dev); + writel(isr, &hpet->hpet_isr); + } + sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev)); irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : IRQF_DISABLED; @@ -970,6 +985,8 @@ static int hpet_acpi_add(struct acpi_device *device) return -ENODEV; if (!data.hd_address || !data.hd_nirqs) { + if (data.hd_address) + iounmap(data.hd_address); printk("%s: no address or irqs in _CRS\n", __func__); return -ENODEV; } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 094bdc3..b1a3014 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -1662,6 +1662,17 @@ static int check_hotmod_int_op(const char *curr, const char *option, return 0; } +static struct smi_info *smi_info_alloc(void) +{ + struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); + + if (info) { + spin_lock_init(&info->si_lock); + spin_lock_init(&info->msg_lock); + } + return info; +} + static int hotmod_handler(const char *val, struct kernel_param *kp) { char *str = kstrdup(val, GFP_KERNEL); @@ -1776,7 +1787,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) } if (op == HM_ADD) { - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) { rv = -ENOMEM; goto out; @@ -1838,7 +1849,7 @@ static __devinit void hardcode_find_bmc(void) if (!ports[i] && !addrs[i]) continue; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) return; @@ -2025,7 +2036,7 @@ static __devinit int try_init_spmi(struct SPMITable *spmi) else addr_space = IPMI_IO_ADDR_SPACE; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) { printk(KERN_ERR PFX "Could not allocate SI data (3)\n"); return -ENOMEM; @@ -2129,7 +2140,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, if (!acpi_dev) return -ENODEV; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) return -ENOMEM; @@ -2176,6 +2187,14 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->io.addr_data = res->start; info->io.regspacing = DEFAULT_REGSPACING; + res = pnp_get_resource(dev, + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? + IORESOURCE_IO : IORESOURCE_MEM, + 1); + if (res) { + if (res->start > info->io.addr_data) + info->io.regspacing = res->start - info->io.addr_data; + } info->io.regsize = DEFAULT_REGSPACING; info->io.regshift = 0; @@ -2299,7 +2318,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) { struct smi_info *info; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) { printk(KERN_ERR PFX "Could not allocate SI data\n"); return; @@ -2400,7 +2419,7 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev, int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; struct smi_info *info; - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) return -ENOMEM; @@ -2538,7 +2557,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev, return -EINVAL; } - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) { dev_err(&dev->dev, @@ -2980,7 +2999,7 @@ static __devinit void default_find_bmc(void) if (check_legacy_ioport(ipmi_defaults[i].port)) continue; #endif - info = kzalloc(sizeof(*info), GFP_KERNEL); + info = smi_info_alloc(); if (!info) return; @@ -3103,9 +3122,6 @@ static int try_smi_init(struct smi_info *new_smi) goto out_err; } - spin_lock_init(&(new_smi->si_lock)); - spin_lock_init(&(new_smi->msg_lock)); - /* Do low-level detection first. */ if (new_smi->handlers->detect(new_smi->si_sm)) { if (new_smi->addr_source) diff --git a/drivers/char/mem.c b/drivers/char/mem.c index f54dab8..1f528fa 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -788,10 +788,11 @@ static const struct file_operations zero_fops = { /* * capabilities for /dev/zero * - permits private mappings, "copies" are taken of the source of zeros + * - no writeback happens */ static struct backing_dev_info zero_bdi = { .name = "char/mem", - .capabilities = BDI_CAP_MAP_COPY, + .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, }; static const struct file_operations full_fops = { @@ -916,7 +917,7 @@ static int __init chr_dev_init(void) NULL, devlist[minor].name); } - return 0; + return tty_init(); } fs_initcall(chr_dev_init); diff --git a/drivers/char/n_gsm.c b/drivers/char/n_gsm.c index e4089c4..a5eeacd 100644 --- a/drivers/char/n_gsm.c +++ b/drivers/char/n_gsm.c @@ -717,8 +717,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) if (msg->len < 128) *--dp = (msg->len << 1) | EA; else { - *--dp = (msg->len >> 6) | EA; - *--dp = (msg->len & 127) << 1; + *--dp = (msg->len >> 7); /* bits 7 - 15 */ + *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ } } @@ -969,6 +969,8 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data, { struct gsm_msg *msg; msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); + if (msg == NULL) + return; msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index a663800..18af923 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c @@ -1611,6 +1611,8 @@ static int ntty_install(struct tty_driver *driver, struct tty_struct *tty) ret = tty_init_termios(tty); if (ret == 0) { tty_driver_kref_get(driver); + tty->count++; + tty->driver_data = port; driver->ttys[tty->index] = tty; } return ret; @@ -1639,7 +1641,7 @@ static int ntty_activate(struct tty_port *tport, struct tty_struct *tty) static int ntty_open(struct tty_struct *tty, struct file *filp) { - struct port *port = get_port_by_tty(tty); + struct port *port = tty->driver_data; return tty_port_open(&port->port, tty, filp); } diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 308903e..824d67c 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -4132,6 +4132,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); + memset(&new_line, 0, size); + switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c index 74f00b5..9445f48 100644 --- a/drivers/char/ramoops.c +++ b/drivers/char/ramoops.c @@ -27,7 +27,6 @@ #include #define RAMOOPS_KERNMSG_HDR "====" -#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval)) #define RECORD_SIZE 4096 @@ -63,8 +62,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, struct ramoops_context, dump); unsigned long s1_start, s2_start; unsigned long l1_cpy, l2_cpy; - int res; - char *buf; + int res, hdr_size; + char *buf, *buf_orig; struct timeval timestamp; /* Only dump oopses if dump_oops is set */ @@ -72,6 +71,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, return; buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE)); + buf_orig = buf; + memset(buf, '\0', RECORD_SIZE); res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR); buf += res; @@ -79,8 +80,9 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper, res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec); buf += res; - l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE)); - l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy); + hdr_size = buf - buf_orig; + l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size)); + l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy); s2_start = l2 - l2_cpy; s1_start = l1 - l1_cpy; diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 05ad4a1..2df6c26 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -954,7 +954,7 @@ int tpm_open(struct inode *inode, struct file *file) return -EBUSY; } - chip->data_buffer = kmalloc(TPM_BUFSIZE * sizeof(u8), GFP_KERNEL); + chip->data_buffer = kzalloc(TPM_BUFSIZE, GFP_KERNEL); if (chip->data_buffer == NULL) { clear_bit(0, &chip->is_open); put_device(chip->dev); diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index cc1e985..d8210ca 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c @@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work) spin_lock_irqsave(&tty->buf.lock, flags); if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { - struct tty_buffer *head; + struct tty_buffer *head, *tail = tty->buf.tail; + int seen_tail = 0; while ((head = tty->buf.head) != NULL) { int count; char *char_buf; @@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work) if (!count) { if (head->next == NULL) break; + /* + There's a possibility tty might get new buffer + added during the unlock window below. We could + end up spinning in here forever hogging the CPU + completely. To avoid this let's have a rest each + time we processed the tail buffer. + */ + if (tail == head) + seen_tail = 1; tty->buf.head = head->next; tty_buffer_free(tty, head); continue; @@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work) line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; - if (!tty->receive_room) { + if (!tty->receive_room || seen_tail) { schedule_delayed_work(&tty->buf.work, 1); break; } diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index d71f0fc..507441a 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -3128,7 +3128,7 @@ static struct cdev tty_cdev, console_cdev; * Ok, now we can initialize the rest of the tty devices and can count * on memory allocations, interrupts etc.. */ -static int __init tty_init(void) +int __init tty_init(void) { cdev_init(&tty_cdev, &tty_fops); if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) || @@ -3149,4 +3149,4 @@ static int __init tty_init(void) #endif return 0; } -module_init(tty_init); + diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index 500e740..236628f 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -47,6 +47,7 @@ static DEFINE_SPINLOCK(tty_ldisc_lock); static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); +static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle); /* Line disc dispatch table */ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; @@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld) return; } local_irq_restore(flags); + wake_up(&tty_ldisc_idle); } /** @@ -450,6 +452,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld) /* BKL here locks verus a hangup event */ lock_kernel(); ret = ld->ops->open(tty); + if (ret) + clear_bit(TTY_LDISC_OPEN, &tty->flags); unlock_kernel(); return ret; } @@ -530,6 +534,23 @@ static int tty_ldisc_halt(struct tty_struct *tty) } /** + * tty_ldisc_wait_idle - wait for the ldisc to become idle + * @tty: tty to wait for + * + * Wait for the line discipline to become idle. The discipline must + * have been halted for this to guarantee it remains idle. + */ +static int tty_ldisc_wait_idle(struct tty_struct *tty) +{ + int ret; + ret = wait_event_interruptible_timeout(tty_ldisc_idle, + atomic_read(&tty->ldisc->users) == 1, 5 * HZ); + if (ret < 0) + return ret; + return ret > 0 ? 0 : -EBUSY; +} + +/** * tty_set_ldisc - set line discipline * @tty: the terminal to set * @ldisc: the line discipline @@ -632,8 +653,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) flush_scheduled_work(); + retval = tty_ldisc_wait_idle(tty); + mutex_lock(&tty->ldisc_mutex); lock_kernel(); + + /* handle wait idle failure locked */ + if (retval) { + tty_ldisc_put(new_ldisc); + goto enable; + } + if (test_bit(TTY_HUPPED, &tty->flags)) { /* We were raced by the hangup method. It will have stomped the ldisc data and closed the ldisc down */ @@ -667,6 +697,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty_ldisc_put(o_ldisc); +enable: /* * Allow ldisc referencing to occur again */ @@ -712,9 +743,12 @@ static void tty_reset_termios(struct tty_struct *tty) * state closed */ -static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) +static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) { - struct tty_ldisc *ld; + struct tty_ldisc *ld = tty_ldisc_get(ldisc); + + if (IS_ERR(ld)) + return -1; tty_ldisc_close(tty, tty->ldisc); tty_ldisc_put(tty->ldisc); @@ -722,10 +756,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) /* * Switch the line discipline back */ - ld = tty_ldisc_get(ldisc); - BUG_ON(IS_ERR(ld)); tty_ldisc_assign(tty, ld); tty_set_termios_ldisc(tty, ldisc); + + return 0; } /** @@ -787,13 +821,16 @@ void tty_ldisc_hangup(struct tty_struct *tty) a FIXME */ if (tty->ldisc) { /* Not yet closed */ if (reset == 0) { - tty_ldisc_reinit(tty, tty->termios->c_line); - err = tty_ldisc_open(tty, tty->ldisc); + + if (!tty_ldisc_reinit(tty, tty->termios->c_line)) + err = tty_ldisc_open(tty, tty->ldisc); + else + err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { - tty_ldisc_reinit(tty, N_TTY); + BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 942a982..0f69c5e 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -459,9 +459,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, /* * Wait till the host acknowledges it pushed out the data we - * sent. This is done for ports in blocking mode or for data - * from the hvc_console; the tty operations are performed with - * spinlocks held so we can't sleep here. + * sent. This is done for data from the hvc_console; the tty + * operations are performed with spinlocks held so we can't + * sleep here. An alternative would be to copy the data to a + * buffer and relax the spinning requirement. The downside is + * we need to kmalloc a GFP_ATOMIC buffer each time the + * console driver writes something out. */ while (!virtqueue_get_buf(out_vq, &len)) cpu_relax(); @@ -596,6 +599,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, ssize_t ret; bool nonblock; + /* Userspace could be out to fool us */ + if (!count) + return 0; + port = filp->private_data; nonblock = filp->f_flags & O_NONBLOCK; @@ -622,6 +629,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, goto free_buf; } + /* + * We now ask send_buf() to not spin for generic ports -- we + * can re-use the same code path that non-blocking file + * descriptors take for blocking file descriptors since the + * wait is already done and we're certain the write will go + * through to the host. + */ + nonblock = true; ret = send_buf(port, buf, count, nonblock); if (nonblock && ret > 0) @@ -642,7 +657,7 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) poll_wait(filp, &port->waitqueue, wait); ret = 0; - if (port->inbuf) + if (!will_read_block(port)) ret |= POLLIN | POLLRDNORM; if (!will_write_block(port)) ret |= POLLOUT; diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index cb19dbc..85cf230 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -503,6 +503,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, struct kbd_struct * kbd; unsigned int console; unsigned char ucval; + unsigned int uival; void __user *up = (void __user *)arg; int i, perm; int ret = 0; @@ -657,7 +658,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, break; case KDGETMODE: - ucval = vc->vc_mode; + uival = vc->vc_mode; goto setint; case KDMAPDISP: @@ -695,7 +696,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, break; case KDGKBMODE: - ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW : + uival = ((kbd->kbdmode == VC_RAW) ? K_RAW : (kbd->kbdmode == VC_MEDIUMRAW) ? K_MEDIUMRAW : (kbd->kbdmode == VC_UNICODE) ? K_UNICODE : K_XLATE); @@ -717,9 +718,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, break; case KDGKBMETA: - ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT); + uival = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT); setint: - ret = put_user(ucval, (int __user *)arg); + ret = put_user(uival, (int __user *)arg); break; case KDGETKEYCODE: @@ -949,7 +950,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, for (i = 0; i < MAX_NR_CONSOLES; ++i) if (! VT_IS_IN_USE(i)) break; - ucval = i < MAX_NR_CONSOLES ? (i+1) : -1; + uival = i < MAX_NR_CONSOLES ? (i+1) : -1; goto setint; /* diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 2e992bc..8a515ba 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count)); + : "d" (control_word), "b" (key), "c" (initial)); asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 6d3a73b..5216c8a 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -97,6 +97,7 @@ struct ioat_chan_common { #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 #define IOAT_RESHAPE_PENDING 4 + #define IOAT_RUN 5 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 3c8b32a..effd140 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -287,7 +287,10 @@ void ioat2_timer_event(unsigned long data) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "%s: Channel halted (%x)\n", __func__, chanerr); - BUG_ON(is_ioat_bug(chanerr)); + if (test_bit(IOAT_RUN, &chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; } /* if we haven't made progress and we have already @@ -492,6 +495,8 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf return ring; } +void ioat2_free_chan_resources(struct dma_chan *c); + /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring * @chan: channel to be initialized */ @@ -500,6 +505,7 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent **ring; + u64 status; int order; /* have we already been set up? */ @@ -540,7 +546,20 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); - return 1 << ioat->alloc_order; + /* check that we got off the ground */ + udelay(5); + status = ioat_chansts(chan); + if (is_ioat_active(status) || is_ioat_idle(status)) { + set_bit(IOAT_RUN, &chan->state); + return 1 << ioat->alloc_order; + } else { + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + + dev_WARN(to_dev(chan), + "failed to start channel chanerr: %#x\n", chanerr); + ioat2_free_chan_resources(c); + return -EFAULT; + } } bool reshape_ring(struct ioat2_dma_chan *ioat, int order) @@ -778,6 +797,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) del_timer_sync(&chan->timer); device->cleanup_fn((unsigned long) c); device->reset_hw(chan); + clear_bit(IOAT_RUN, &chan->state); spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->prep_lock); @@ -859,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_tx_status = ioat_tx_status; + dma->device_tx_status = ioat_dma_tx_status; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 1cdd22e..d0f4990 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -361,7 +361,10 @@ static void ioat3_timer_event(unsigned long data) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "%s: Channel halted (%x)\n", __func__, chanerr); - BUG_ON(is_ioat_bug(chanerr)); + if (test_bit(IOAT_RUN, &chan->state)) + BUG_ON(is_ioat_bug(chanerr)); + else /* we never got off the ground */ + return; } /* if we haven't made progress and we have already diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 86c5ae9..a25f5f6 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause) static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { - u32 val = (1 << (1 + (chan->idx * 16))); + u32 val = ~(1 << (chan->idx * 16)); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } @@ -449,7 +449,7 @@ mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; - __mv_xor_slot_cleanup(chan); + mv_xor_slot_cleanup(chan); } static struct mv_xor_desc_slot * diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ac9f798..0e03506 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -178,7 +178,7 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth) default: amd64_printk(KERN_ERR, "Unsupported family!\n"); - break; + return -EINVAL; } return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth, min_scrubrate); @@ -1430,7 +1430,7 @@ static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, u64 chan_off; if (hi_range_sel) { - if (!(dct_sel_base_addr & 0xFFFFF800) && + if (!(dct_sel_base_addr & 0xFFFF0000) && hole_valid && (sys_addr >= 0x100000000ULL)) chan_off = hole_off << 16; else @@ -1567,7 +1567,7 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", hole_off, hole_valid, intlv_sel); - if (intlv_en || + if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) return -EINVAL; @@ -1679,7 +1679,7 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, * ganged. Otherwise @chan should already contain the channel at * this point. */ - if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL) + if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) chan = get_channel_from_ecc_syndrome(mci, syndrome); if (chan >= 0) diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 3630308..11c095d 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -575,14 +575,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) return NULL; } - /* marking MCI offline */ - mci->op_state = OP_OFFLINE; - del_mc_from_global_list(mci); mutex_unlock(&mem_ctls_mutex); - /* flush workq processes and remove sysfs */ + /* flush workq processes */ edac_mc_workq_teardown(mci); + + /* marking MCI offline */ + mci->op_state = OP_OFFLINE; + + /* remove from sysfs */ edac_remove_sysfs_mci_device(mci); edac_printk(KERN_INFO, EDAC_MC, diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e0187d1..0fd5b85 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { ATTR_COUNTER(0), ATTR_COUNTER(1), ATTR_COUNTER(2), + { .attr = { .name = NULL } } }; static struct mcidev_sysfs_group i7core_udimm_counters = { diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9f627e7..0942efb 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -684,7 +684,7 @@ static void ar_context_tasklet(unsigned long data) d = &ab->descriptor; if (d->res_count == 0) { - size_t size, rest, offset; + size_t size, size2, rest, pktsize, size3, offset; dma_addr_t start_bus; void *start; @@ -695,25 +695,61 @@ static void ar_context_tasklet(unsigned long data) */ offset = offsetof(struct ar_buffer, data); - start = buffer = ab; + start = ab; start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; + buffer = ab->data; ab = ab->next; d = &ab->descriptor; - size = buffer + PAGE_SIZE - ctx->pointer; + size = start + PAGE_SIZE - ctx->pointer; + /* valid buffer data in the next page */ rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); + /* what actually fits in this page */ + size2 = min(rest, (size_t)PAGE_SIZE - offset - size); memmove(buffer, ctx->pointer, size); - memcpy(buffer + size, ab->data, rest); - ctx->current_buffer = ab; - ctx->pointer = (void *) ab->data + rest; - end = buffer + size + rest; + memcpy(buffer + size, ab->data, size2); + + while (size > 0) { + void *next = handle_ar_packet(ctx, buffer); + pktsize = next - buffer; + if (pktsize >= size) { + /* + * We have handled all the data that was + * originally in this page, so we can now + * continue in the next page. + */ + buffer = next; + break; + } + /* move the next packet to the start of the buffer */ + memmove(buffer, next, size + size2 - pktsize); + size -= pktsize; + /* fill up this page again */ + size3 = min(rest - size2, + (size_t)PAGE_SIZE - offset - size - size2); + memcpy(buffer + size + size2, + (void *) ab->data + size2, size3); + size2 += size3; + } - while (buffer < end) - buffer = handle_ar_packet(ctx, buffer); + if (rest > 0) { + /* handle the packets that are fully in the next page */ + buffer = (void *) ab->data + + (buffer - (start + offset + size)); + end = (void *) ab->data + rest; - dma_free_coherent(ohci->card.device, PAGE_SIZE, - start, start_bus); - ar_context_add_page(ctx); + while (buffer < end) + buffer = handle_ar_packet(ctx, buffer); + + ctx->current_buffer = ab; + ctx->pointer = end; + + dma_free_coherent(ohci->card.device, PAGE_SIZE, + start, start_bus); + ar_context_add_page(ctx); + } else { + ctx->pointer = start + PAGE_SIZE; + } } else { buffer = ctx->pointer; ctx->pointer = end = @@ -2439,7 +2475,7 @@ static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; - u32 bus_options, max_receive, link_speed, version, link_enh; + u32 bus_options, max_receive, link_speed, version; u64 guid; int i, err, n_ir, n_it; size_t size; @@ -2492,23 +2528,6 @@ static int __devinit pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; - /* TI OHCI-Lynx and compatible: set recommended configuration bits. */ - if (dev->vendor == PCI_VENDOR_ID_TI) { - pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh); - - /* adjust latency of ATx FIFO: use 1.7 KB threshold */ - link_enh &= ~TI_LinkEnh_atx_thresh_mask; - link_enh |= TI_LinkEnh_atx_thresh_1_7K; - - /* use priority arbitration for asynchronous responses */ - link_enh |= TI_LinkEnh_enab_unfair; - - /* required for aPhyEnhanceEnable to work */ - link_enh |= TI_LinkEnh_enab_accel; - - pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh); - } - ar_context_init(&ohci->ar_request_ctx, ohci, OHCI1394_AsReqRcvContextControlSet); diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index 3bc9a5d..d49e146 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h @@ -154,12 +154,4 @@ #define OHCI1394_phy_tcode 0xe -/* TI extensions */ - -#define PCI_CFG_TI_LinkEnh 0xf4 -#define TI_LinkEnh_enab_accel 0x00000002 -#define TI_LinkEnh_enab_unfair 0x00000080 -#define TI_LinkEnh_atx_thresh_mask 0x00003000 -#define TI_LinkEnh_atx_thresh_1_7K 0x00001000 - #endif /* _FIREWIRE_OHCI_H */ diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index aa9bc9e..dad21fd 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) } /* generate SMI */ + /* inb to force posted write through and make SMI happen now */ asm volatile ( - "outb %b0,%w1" + "outb %b0,%w1\n" + "inb %w1" : /* no output args */ : "a" (smi_cmd->command_code), "d" (smi_cmd->command_address), diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c index e23c068..d3e55a0 100644 --- a/drivers/gpio/cs5535-gpio.c +++ b/drivers/gpio/cs5535-gpio.c @@ -56,6 +56,29 @@ static struct cs5535_gpio_chip { * registers, see include/linux/cs5535.h. */ +static void errata_outl(struct cs5535_gpio_chip *chip, u32 val, + unsigned int reg) +{ + unsigned long addr = chip->base + 0x80 + reg; + + /* + * According to the CS5536 errata (#36), after suspend + * a write to the high bank GPIO register will clear all + * non-selected bits; the recommended workaround is a + * read-modify-write operation. + * + * Don't apply this errata to the edge status GPIOs, as writing + * to their lower bits will clear them. + */ + if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) { + if (val & 0xffff) + val |= (inl(addr) & 0xffff); /* ignore the high bits */ + else + val |= (inl(addr) ^ (val >> 16)); + } + outl(val, addr); +} + static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, unsigned int reg) { @@ -64,7 +87,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, outl(1 << offset, chip->base + reg); else /* high bank register */ - outl(1 << (offset - 16), chip->base + 0x80 + reg); + errata_outl(chip, 1 << (offset - 16), reg); } void cs5535_gpio_set(unsigned offset, unsigned int reg) @@ -86,7 +109,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset, outl(1 << (offset + 16), chip->base + reg); else /* high bank register */ - outl(1 << offset, chip->base + 0x80 + reg); + errata_outl(chip, 1 << offset, reg); } void cs5535_gpio_clear(unsigned offset, unsigned int reg) diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c index 2762698..897e057 100644 --- a/drivers/gpio/rdc321x-gpio.c +++ b/drivers/gpio/rdc321x-gpio.c @@ -135,7 +135,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) struct rdc321x_gpio *rdc321x_gpio_dev; struct rdc321x_gpio_pdata *pdata; - pdata = pdev->dev.platform_data; + pdata = platform_get_drvdata(pdev); if (!pdata) { dev_err(&pdev->dev, "no platform data supplied\n"); return -ENODEV; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 88910e5..83dac28 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -96,7 +96,10 @@ config DRM_I830 config DRM_I915 tristate "i915 driver" depends on AGP_INTEL + # we need shmfs for the swappable backing store, and in particular + # the shmem_readpage() which depends upon tmpfs select SHMEM + select TMPFS select DRM_KMS_HELPER select FB_CFB_FILLRECT select FB_CFB_COPYAREA diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 57cea01..5f32f72 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -155,12 +155,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, { DRM_MODE_CONNECTOR_Component, "Component", 0 }, - { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, - { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, - { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, - { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, + { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, + { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, { DRM_MODE_CONNECTOR_TV, "TV", 0 }, - { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 }, + { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 9b2a541..25d70d6 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -610,13 +610,13 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; if (mode_changed) { - old_fb = set->crtc->fb; - set->crtc->fb = set->fb; set->crtc->enabled = (set->mode != NULL); if (set->mode != NULL) { DRM_DEBUG_KMS("attempting to set mode from" " userspace\n"); drm_mode_debug_printmodeline(set->mode); + old_fb = set->crtc->fb; + set->crtc->fb = set->fb; if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, old_fb)) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 4a66201..c9736ed 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -502,7 +502,9 @@ long drm_ioctl(struct file *filp, retcode = -EFAULT; goto err_i1; } - } + } else + memset(kdata, 0, _IOC_SIZE(cmd)); + if (ioctl->flags & DRM_UNLOCKED) retcode = func(dev, kdata, file_priv); else { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 33dad3f..f353cdf 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -451,28 +451,6 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); -/** - * Called after the last reference to the object has been lost. - * Must be called without holding struct_mutex - * - * Frees the object - */ -void -drm_gem_object_free_unlocked(struct kref *kref) -{ - struct drm_gem_object *obj = (struct drm_gem_object *) kref; - struct drm_device *dev = obj->dev; - - if (dev->driver->gem_free_object_unlocked != NULL) - dev->driver->gem_free_object_unlocked(obj); - else if (dev->driver->gem_free_object != NULL) { - mutex_lock(&dev->struct_mutex); - dev->driver->gem_free_object(obj); - mutex_unlock(&dev->struct_mutex); - } -} -EXPORT_SYMBOL(drm_gem_object_free_unlocked); - static void drm_gem_object_ref_bug(struct kref *list_kref) { BUG(); @@ -517,6 +495,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_reference(obj); + + mutex_lock(&obj->dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_open); @@ -524,7 +506,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - drm_gem_object_unreference_unlocked(obj); + mutex_lock(&obj->dev->struct_mutex); + drm_vm_close_locked(vma); + drm_gem_object_unreference(obj); + mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index a263b70..0ddd109 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -545,7 +545,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; - int crtc, ret = 0; + int ret = 0; + unsigned int crtc; /* If drm_vblank_init() hasn't been called yet, just no-op */ if (!dev->num_crtcs) diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c3b13fb..3813e84 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) mutex_unlock(&dev->struct_mutex); } -/** - * \c close method for all virtual memory types. - * - * \param vma virtual memory area. - * - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and - * free it. - */ -static void drm_vm_close(struct vm_area_struct *vma) +void drm_vm_close_locked(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) vma->vm_start, vma->vm_end - vma->vm_start); atomic_dec(&dev->vma_count); - mutex_lock(&dev->struct_mutex); list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma == vma) { list_del(&pt->head); @@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) break; } } +} + +/** + * \c close method for all virtual memory types. + * + * \param vma virtual memory area. + * + * Search the \p vma private data entry in drm_device::vmalist, unlink it, and + * free it. + */ +static void drm_vm_close(struct vm_area_struct *vma) +{ + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_close_locked(vma); mutex_unlock(&dev->struct_mutex); } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2305a12..bdd39fb 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -40,6 +40,8 @@ #include #include +extern int intel_max_stolen; /* from AGP driver */ + /** * Sets up the hardware status page for devices that need a physical address * in the register. @@ -610,8 +612,10 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, ret = copy_from_user(cliprects, batch->cliprects, batch->num_cliprects * sizeof(struct drm_clip_rect)); - if (ret != 0) + if (ret != 0) { + ret = -EFAULT; goto fail_free; + } } mutex_lock(&dev->struct_mutex); @@ -652,8 +656,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, return -ENOMEM; ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz); - if (ret != 0) + if (ret != 0) { + ret = -EFAULT; goto fail_batch_free; + } if (cmdbuf->num_cliprects) { cliprects = kcalloc(cmdbuf->num_cliprects, @@ -666,8 +672,10 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, ret = copy_from_user(cliprects, cmdbuf->cliprects, cmdbuf->num_cliprects * sizeof(struct drm_clip_rect)); - if (ret != 0) + if (ret != 0) { + ret = -EFAULT; goto fail_clip_free; + } } mutex_lock(&dev->struct_mutex); @@ -875,7 +883,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; u32 temp_lo, temp_hi = 0; u64 mchbar_addr; - int ret = 0; + int ret; if (IS_I965G(dev)) pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); @@ -885,22 +893,23 @@ intel_alloc_mchbar_resource(struct drm_device *dev) /* If ACPI doesn't have it, assume we need to allocate it ourselves */ #ifdef CONFIG_PNP if (mchbar_addr && - pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { - ret = 0; - goto out; - } + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) + return 0; #endif /* Get some space for it */ - ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, + dev_priv->mch_res.name = "i915 MCHBAR"; + dev_priv->mch_res.flags = IORESOURCE_MEM; + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, + &dev_priv->mch_res, MCHBAR_SIZE, MCHBAR_SIZE, PCIBIOS_MIN_MEM, - 0, pcibios_align_resource, + 0, pcibios_align_resource, dev_priv->bridge_dev); if (ret) { DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); dev_priv->mch_res.start = 0; - goto out; + return ret; } if (IS_I965G(dev)) @@ -909,8 +918,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev) pci_write_config_dword(dev_priv->bridge_dev, reg, lower_32_bits(dev_priv->mch_res.start)); -out: - return ret; + return 0; } /* Setup MCHBAR if possible, return true if we should disable it again */ @@ -1770,9 +1778,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) } } - div_u64(diff, diff1); + diff = div_u64(diff, diff1); ret = ((m * diff) + c); - div_u64(ret, 10); + ret = div_u64(ret, 10); dev_priv->last_count1 = total_count; dev_priv->last_time1 = now; @@ -1841,7 +1849,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) /* More magic constants... */ diff = diff * 1181; - div_u64(diff, diffms * 10); + diff = div_u64(diff, diffms * 10); dev_priv->gfx_power = diff; } @@ -2071,6 +2079,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } + /* overlay on gen2 is broken and can't address above 1G */ + if (IS_GEN2(dev)) + dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); + dev_priv->regs = ioremap(base, size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); @@ -2104,6 +2116,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_iomapfree; + if (prealloc_size > intel_max_stolen) { + DRM_INFO("detected %dM stolen memory, trimming to %dM\n", + prealloc_size >> 20, intel_max_stolen >> 20); + prealloc_size = intel_max_stolen; + } + dev_priv->wq = create_singlethread_workqueue("i915"); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); @@ -2204,6 +2222,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); + /* XXX Prevent module unload due to memory corruption bugs. */ + __module_get(THIS_MODULE); + return 0; out_workqueue_free: @@ -2276,6 +2297,9 @@ int i915_driver_unload(struct drm_device *dev) i915_gem_lastclose(dev); intel_cleanup_overlay(dev); + + if (!I915_NEED_GFX_HWS(dev)) + i915_free_hws(dev); } intel_teardown_mchbar(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 423dc90..194e0c4 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -175,12 +175,18 @@ static const struct pci_device_id pciidlist[] = { INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), + INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), + INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), {0, 0, 0} }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5aa747f..dc5d2ef 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,6 +34,7 @@ #include #include #include +#include static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); @@ -465,8 +466,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->size || args->size > obj->size || args->offset + args->size > obj->size) { - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; + ret = -EINVAL; + goto err; + } + + if (!access_ok(VERIFY_WRITE, + (char __user *)(uintptr_t)args->data_ptr, + args->size)) { + ret = -EFAULT; + goto err; } if (i915_gem_object_needs_bit17_swizzle(obj)) { @@ -478,8 +486,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, file_priv); } +err: drm_gem_object_unreference_unlocked(obj); - return ret; } @@ -568,8 +576,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - if (!access_ok(VERIFY_READ, user_data, remain)) - return -EFAULT; mutex_lock(&dev->struct_mutex); @@ -928,8 +934,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->size || args->size > obj->size || args->offset + args->size > obj->size) { - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; + ret = -EINVAL; + goto err; + } + + if (!access_ok(VERIFY_READ, + (char __user *)(uintptr_t)args->data_ptr, + args->size)) { + ret = -EFAULT; + goto err; } /* We can only do the GTT pwrite on untiled buffers, as otherwise @@ -963,8 +976,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, DRM_INFO("pwrite failed %d\n", ret); #endif +err: drm_gem_object_unreference_unlocked(obj); - return ret; } @@ -3367,6 +3380,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc->offset, reloc->read_domains, reloc->write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } if (reloc->write_domain & I915_GEM_DOMAIN_CPU || @@ -3709,6 +3724,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret != 0) { DRM_ERROR("copy %d cliprects failed: %d\n", args->num_cliprects, ret); + ret = -EFAULT; goto pre_mutex_err; } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cf41c67..140f017 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -312,6 +312,7 @@ #define MI_MODE 0x0209c # define VS_TIMER_DISPATCH (1 << 6) +# define MI_FLUSH_ENABLE (1 << 11) #define SCPD0 0x0209c /* 915+ only */ #define IER 0x020a0 @@ -2199,9 +2200,17 @@ #define WM1_LP_SR_EN (1<<31) #define WM1_LP_LATENCY_SHIFT 24 #define WM1_LP_LATENCY_MASK (0x7f<<24) +#define WM1_LP_FBC_LP1_MASK (0xf<<20) +#define WM1_LP_FBC_LP1_SHIFT 20 #define WM1_LP_SR_MASK (0x1ff<<8) #define WM1_LP_SR_SHIFT 8 #define WM1_LP_CURSOR_MASK (0x3f) +#define WM2_LP_ILK 0x4510c +#define WM2_LP_EN (1<<31) +#define WM3_LP_ILK 0x45110 +#define WM3_LP_EN (1<<31) +#define WM1S_LP_ILK 0x45120 +#define WM1S_LP_EN (1<<31) /* Memory latency timer register */ #define MLTR_ILK 0x11222 @@ -2700,6 +2709,9 @@ #define FDI_RXB_CHICKEN 0xc2010 #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) +#define SOUTH_DSPCLK_GATE_D 0xc2020 +#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) + /* CPU: FDI_TX */ #define FDI_TXA_CTL 0x60100 #define FDI_TXB_CTL 0x61100 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5e21b31..660c67c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -42,6 +42,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); +static void intel_crtc_update_cursor(struct drm_crtc *crtc); typedef struct { /* given values */ @@ -1501,6 +1502,7 @@ static void ironlake_enable_pll_edp (struct drm_crtc *crtc) dpa_ctl = I915_READ(DP_A); dpa_ctl |= DP_PLL_ENABLE; I915_WRITE(DP_A, dpa_ctl); + POSTING_READ(DP_A); udelay(200); } @@ -2312,6 +2314,9 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) int pipe = intel_crtc->pipe; bool enabled; + if (intel_crtc->dpms_mode == mode) + return; + dev_priv->display.dpms(crtc, mode); intel_crtc->dpms_mode = mode; @@ -2371,11 +2376,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; + if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ if (mode->clock * 3 > 27000 * 4) return MODE_CLOCK_HIGH; } + + /* XXX some encoders set the crtcinfo, others don't. + * Obviously we need some form of conflict resolution here... + */ + if (adjusted_mode->crtc_htotal == 0) + drm_mode_set_crtcinfo(adjusted_mode, 0); + return true; } @@ -3201,8 +3214,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, reg_value = I915_READ(WM1_LP_ILK); reg_value &= ~(WM1_LP_LATENCY_MASK | WM1_LP_SR_MASK | WM1_LP_CURSOR_MASK); - reg_value |= WM1_LP_SR_EN | - (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | + reg_value |= (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) | (sr_wm << WM1_LP_SR_SHIFT) | cursor_wm; I915_WRITE(WM1_LP_ILK, reg_value); @@ -3403,6 +3415,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; } + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc); + if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, @@ -3939,6 +3954,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) } } +/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ +static void intel_crtc_update_cursor(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int x = intel_crtc->cursor_x; + int y = intel_crtc->cursor_y; + uint32_t base, pos; + bool visible; + + pos = 0; + + if (crtc->fb) { + base = intel_crtc->cursor_addr; + if (x > (int) crtc->fb->width) + base = 0; + + if (y > (int) crtc->fb->height) + base = 0; + } else + base = 0; + + if (x < 0) { + if (x + intel_crtc->cursor_width < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; + + if (y < 0) { + if (y + intel_crtc->cursor_height < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; + + visible = base != 0; + if (!visible && !intel_crtc->cursor_visble) + return; + + I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); + if (intel_crtc->cursor_visble != visible) { + uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); + if (base) { + /* Hooray for CUR*CNTR differences */ + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= pipe << 28; /* Connect to correct pipe */ + } else { + cntl &= ~(CURSOR_FORMAT_MASK); + cntl |= CURSOR_ENABLE; + cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; + } + } else { + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + cntl |= CURSOR_MODE_DISABLE; + } else { + cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); + } + } + I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); + + intel_crtc->cursor_visble = visible; + } + /* and commit changes on next vblank */ + I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); + + if (visible) + intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); +} + static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, @@ -3949,11 +4043,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_gem_object *bo; struct drm_i915_gem_object *obj_priv; - int pipe = intel_crtc->pipe; - uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; - uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; - uint32_t temp = I915_READ(control); - size_t addr; + uint32_t addr; int ret; DRM_DEBUG_KMS("\n"); @@ -3961,12 +4051,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); - temp |= CURSOR_MODE_DISABLE; - } else { - temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); - } addr = 0; bo = NULL; mutex_lock(&dev->struct_mutex); @@ -4008,7 +4092,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->gtt_offset; } else { - ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); + ret = i915_gem_attach_phys_object(dev, bo, + (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; @@ -4019,21 +4104,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!IS_I9XX(dev)) I915_WRITE(CURSIZE, (height << 12) | width); - /* Hooray for CUR*CNTR differences */ - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; - temp |= (pipe << 28); /* Connect to correct pipe */ - } else { - temp &= ~(CURSOR_FORMAT_MASK); - temp |= CURSOR_ENABLE; - temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; - } - finish: - I915_WRITE(control, temp); - I915_WRITE(base, addr); - if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { if (intel_crtc->cursor_bo != bo) @@ -4047,6 +4118,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = bo; + intel_crtc->cursor_width = width; + intel_crtc->cursor_height = height; + + intel_crtc_update_cursor(crtc); return 0; fail_unpin: @@ -4060,34 +4135,12 @@ fail: static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - int pipe = intel_crtc->pipe; - uint32_t temp = 0; - uint32_t adder; - if (crtc->fb) { - intel_fb = to_intel_framebuffer(crtc->fb); - intel_mark_busy(dev, intel_fb->obj); - } + intel_crtc->cursor_x = x; + intel_crtc->cursor_y = y; - if (x < 0) { - temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; - x = -x; - } - if (y < 0) { - temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; - y = -y; - } - - temp |= x << CURSOR_X_SHIFT; - temp |= y << CURSOR_Y_SHIFT; - - adder = intel_crtc->cursor_addr; - I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); - I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); + intel_crtc_update_cursor(crtc); return 0; } @@ -4774,14 +4827,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->pending_flip_obj = obj; if (intel_crtc->plane) - flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else - flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - /* Wait for any previous flip to finish */ - if (IS_GEN3(dev)) - while (I915_READ(ISR) & flip_mask) - ; + if (IS_GEN3(dev) || IS_GEN2(dev)) { + BEGIN_LP_RING(2); + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); + OUT_RING(0); + ADVANCE_LP_RING(); + } /* Offset into the new buffer for cases of shared fbs between CRTCs */ offset = obj_priv->gtt_offset; @@ -4795,12 +4850,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, OUT_RING(offset | obj_priv->tiling_mode); pipesrc = I915_READ(pipesrc_reg); OUT_RING(pipesrc & 0x0fff0fff); - } else { + } else if (IS_GEN3(dev)) { OUT_RING(MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); OUT_RING(fb->pitch); OUT_RING(offset); OUT_RING(MI_NOOP); + } else { + OUT_RING(MI_DISPLAY_FLIP | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); + OUT_RING(fb->pitch); + OUT_RING(offset); + OUT_RING(MI_NOOP); } ADVANCE_LP_RING(); @@ -4864,7 +4925,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; intel_crtc->cursor_addr = 0; - intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; + intel_crtc->dpms_mode = -1; drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_crtc->busy = false; @@ -5355,6 +5416,13 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); /* + * On Ibex Peak and Cougar Point, we need to disable clock + * gating for the panel power sequencer or it will fail to + * start up when no ports are active. + */ + I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + + /* * According to the spec the following bits should be set in * order to enable memory self-refresh * The bit 22/21 of 0x42004 @@ -5371,6 +5439,9 @@ void intel_init_clock_gating(struct drm_device *dev) I915_WRITE(DISP_ARB_CTL, (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); + I915_WRITE(WM3_LP_ILK, 0); + I915_WRITE(WM2_LP_ILK, 0); + I915_WRITE(WM1_LP_ILK, 0); } return; } else if (IS_G4X(dev)) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5dde80f..15ae3b6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -229,7 +229,6 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, uint32_t ch_data = ch_ctl + 4; int i; int recv_bytes; - uint32_t ctl; uint32_t status; uint32_t aux_clock_divider; int try, precharge; @@ -253,41 +252,43 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, else precharge = 5; + if (I915_READ(ch_ctl) & DP_AUX_CH_CTL_SEND_BUSY) { + DRM_ERROR("dp_aux_ch not started status 0x%08x\n", + I915_READ(ch_ctl)); + return -EBUSY; + } + /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ - for (i = 0; i < send_bytes; i += 4) { - uint32_t d = pack_aux(send + i, send_bytes - i); - - I915_WRITE(ch_data + i, d); - } - - ctl = (DP_AUX_CH_CTL_SEND_BUSY | - DP_AUX_CH_CTL_TIME_OUT_400us | - (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | - (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR); + for (i = 0; i < send_bytes; i += 4) + I915_WRITE(ch_data + i, + pack_aux(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ - I915_WRITE(ch_ctl, ctl); - (void) I915_READ(ch_ctl); + I915_WRITE(ch_ctl, + DP_AUX_CH_CTL_SEND_BUSY | + DP_AUX_CH_CTL_TIME_OUT_400us | + (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | + (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | + (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); for (;;) { - udelay(100); status = I915_READ(ch_ctl); if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) break; + udelay(100); } /* Clear done status and any errors */ - I915_WRITE(ch_ctl, (status | - DP_AUX_CH_CTL_DONE | - DP_AUX_CH_CTL_TIME_OUT_ERROR | - DP_AUX_CH_CTL_RECEIVE_ERROR)); - (void) I915_READ(ch_ctl); - if ((status & DP_AUX_CH_CTL_TIME_OUT_ERROR) == 0) + I915_WRITE(ch_ctl, + status | + DP_AUX_CH_CTL_DONE | + DP_AUX_CH_CTL_TIME_OUT_ERROR | + DP_AUX_CH_CTL_RECEIVE_ERROR); + if (status & DP_AUX_CH_CTL_DONE) break; } @@ -314,15 +315,12 @@ intel_dp_aux_ch(struct intel_encoder *intel_encoder, /* Unload any bytes sent back from the other side */ recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); - if (recv_bytes > recv_size) recv_bytes = recv_size; - for (i = 0; i < recv_bytes; i += 4) { - uint32_t d = I915_READ(ch_data + i); - - unpack_aux(d, recv + i, recv_bytes - i); - } + for (i = 0; i < recv_bytes; i += 4) + unpack_aux(I915_READ(ch_data + i), + recv + i, recv_bytes - i); return recv_bytes; } @@ -418,6 +416,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, uint16_t address = algo_data->address; uint8_t msg[5]; uint8_t reply[2]; + unsigned retry; int msg_bytes; int reply_bytes; int ret; @@ -452,14 +451,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, break; } - for (;;) { - ret = intel_dp_aux_ch(intel_encoder, - msg, msg_bytes, - reply, reply_bytes); + for (retry = 0; retry < 5; retry++) { + ret = intel_dp_aux_ch(intel_encoder, + msg, msg_bytes, + reply, reply_bytes); if (ret < 0) { DRM_DEBUG_KMS("aux_ch failed %d\n", ret); return ret; } + + switch (reply[0] & AUX_NATIVE_REPLY_MASK) { + case AUX_NATIVE_REPLY_ACK: + /* I2C-over-AUX Reply field is only valid + * when paired with AUX ACK. + */ + break; + case AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("aux_ch native nack\n"); + return -EREMOTEIO; + case AUX_NATIVE_REPLY_DEFER: + udelay(100); + continue; + default: + DRM_ERROR("aux_ch invalid native reply 0x%02x\n", + reply[0]); + return -EREMOTEIO; + } + switch (reply[0] & AUX_I2C_REPLY_MASK) { case AUX_I2C_REPLY_ACK: if (mode == MODE_I2C_READ) { @@ -467,17 +485,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, } return reply_bytes - 1; case AUX_I2C_REPLY_NACK: - DRM_DEBUG_KMS("aux_ch nack\n"); + DRM_DEBUG_KMS("aux_i2c nack\n"); return -EREMOTEIO; case AUX_I2C_REPLY_DEFER: - DRM_DEBUG_KMS("aux_ch defer\n"); + DRM_DEBUG_KMS("aux_i2c defer\n"); udelay(100); break; default: - DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); + DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); return -EREMOTEIO; } } + + DRM_ERROR("too many retries, giving up\n"); + return -EREMOTEIO; } static int diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 2f7970b..2702652 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -143,8 +143,6 @@ struct intel_crtc { struct drm_crtc base; enum pipe pipe; enum plane plane; - struct drm_gem_object *cursor_bo; - uint32_t cursor_addr; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; bool busy; /* is scanout buffer being updated frequently? */ @@ -153,6 +151,12 @@ struct intel_crtc { struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; int fdi_lanes; + + struct drm_gem_object *cursor_bo; + uint32_t cursor_addr; + int16_t cursor_x, cursor_y; + int16_t cursor_width, cursor_height; + bool cursor_visble; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0eab8df..1266ab3 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -811,6 +811,14 @@ static const struct dmi_system_id intel_no_lvds[] = { }, { .callback = intel_no_lvds_dmi_callback, + .ident = "AOpen i915GMm-HFS", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"), + DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"), + }, + }, + { + .callback = intel_no_lvds_dmi_callback, .ident = "Aopen i945GTt-VFA", .matches = { DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), @@ -988,8 +996,6 @@ void intel_lvds_init(struct drm_device *dev) intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); intel_encoder->crtc_mask = (1 << 1); - if (IS_I965G(dev)) - intel_encoder->crtc_mask |= (1 << 0); drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d7ad513..3d691c4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_device *dev, || rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; - /* check alingment constrains */ + /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: /* not implemented */ @@ -990,7 +990,10 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - stride_mask = 63; + if (IS_I830(dev) || IS_845G(dev)) + stride_mask = 255; + else + stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; @@ -1361,6 +1364,12 @@ void intel_setup_overlay(struct drm_device *dev) goto out_free_bo; } overlay->flip_addr = overlay->reg_bo->gtt_offset; + + ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); + if (ret) { + DRM_ERROR("failed to move overlay register bo into the GTT\n"); + goto out_unpin_bo; + } } else { ret = i915_gem_attach_phys_object(dev, reg_bo, I915_GEM_PHYS_OVERLAY_REGS); @@ -1392,6 +1401,8 @@ void intel_setup_overlay(struct drm_device *dev) DRM_INFO("initialized overlay support\n"); return; +out_unpin_bo: + i915_gem_object_unpin(reg_bo); out_free_bo: drm_gem_object_unreference(reg_bo); out_free: diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 26362f8..4d9c5ce 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -203,9 +203,13 @@ static int init_render_ring(struct drm_device *dev, { drm_i915_private_t *dev_priv = dev->dev_private; int ret = init_ring_common(dev, ring); + int mode; + if (IS_I9XX(dev) && !IS_GEN3(dev)) { - I915_WRITE(MI_MODE, - (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); + mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; + if (IS_GEN6(dev)) + mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; + I915_WRITE(MI_MODE, mode); } return ret; } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 76993ac..b7485d0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1580,10 +1580,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); - if (sdvo_priv->is_tv) { - /* add 30ms delay when the output type is SDVO-TV */ - mdelay(30); - } + /* add 30ms delay when the output type might be TV */ + if (sdvo_priv->caps.output_flags & + (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) + mdelay(30); status = intel_sdvo_read_response(intel_encoder, &response, 2); DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c697191..0e24c80 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -522,6 +522,12 @@ struct drm_nouveau_private { struct work_struct irq_work; struct work_struct hpd_work; + struct { + spinlock_t lock; + uint32_t hpd0_bits; + uint32_t hpd1_bits; + } hpd_state; + struct list_head vbl_waiting; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 53360f1..4a3e366 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -52,6 +52,7 @@ nouveau_irq_preinstall(struct drm_device *dev) if (dev_priv->card_type == NV_50) { INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); + spin_lock_init(&dev_priv->hpd_state.lock); INIT_LIST_HEAD(&dev_priv->vbl_waiting); } } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 580a5d1..1a4408b 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -930,11 +930,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) struct drm_connector *connector; const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; uint32_t unplug_mask, plug_mask, change_mask; - uint32_t hpd0, hpd1 = 0; + uint32_t hpd0, hpd1; - hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); + spin_lock_irq(&dev_priv->hpd_state.lock); + hpd0 = dev_priv->hpd_state.hpd0_bits; + dev_priv->hpd_state.hpd0_bits = 0; + hpd1 = dev_priv->hpd_state.hpd1_bits; + dev_priv->hpd_state.hpd1_bits = 0; + spin_unlock_irq(&dev_priv->hpd_state.lock); + + hpd0 &= nv_rd32(dev, 0xe050); if (dev_priv->chipset >= 0x90) - hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); + hpd1 &= nv_rd32(dev, 0xe070); plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); @@ -976,10 +983,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); } - nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); - if (dev_priv->chipset >= 0x90) - nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); - drm_helper_hpd_irq_event(dev); } @@ -990,8 +993,22 @@ nv50_display_irq_handler(struct drm_device *dev) uint32_t delayed = 0; if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { - if (!work_pending(&dev_priv->hpd_work)) - queue_work(dev_priv->wq, &dev_priv->hpd_work); + uint32_t hpd0_bits, hpd1_bits = 0; + + hpd0_bits = nv_rd32(dev, 0xe054); + nv_wr32(dev, 0xe054, hpd0_bits); + + if (dev_priv->chipset >= 0x90) { + hpd1_bits = nv_rd32(dev, 0xe074); + nv_wr32(dev, 0xe074, hpd1_bits); + } + + spin_lock(&dev_priv->hpd_state.lock); + dev_priv->hpd_state.hpd0_bits |= hpd0_bits; + dev_priv->hpd_state.hpd1_bits |= hpd1_bits; + spin_unlock(&dev_priv->hpd_state.lock); + + queue_work(dev_priv->wq, &dev_priv->hpd_work); } while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 5f21df3..3fcae54 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -141,6 +141,8 @@ nv50_instmem_init(struct drm_device *dev) chan->file_priv = (struct drm_file *)-2; dev_priv->fifos[0] = dev_priv->fifos[127] = chan; + INIT_LIST_HEAD(&chan->ramht_refs); + /* Channel's PRAMIN object + heap */ ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0, NULL, &chan->ramin); diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 1d56983..b913bf1 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -131,7 +131,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_INDEX: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((index >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + @@ -141,7 +141,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_DATA: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((data >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + @@ -151,7 +151,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, case ATOM_IIO_MOVE_ATTR: temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << - CU8(base + 2)); + CU8(base + 3)); temp |= ((ctx-> io_attr >> CU8(base + 2)) & (0xFFFFFFFF >> (32 - diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 8c2d647..6e7b039 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -259,7 +259,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); - atombios_blank_crtc(crtc, ATOM_ENABLE); + if (radeon_crtc->enabled) + atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); @@ -527,6 +528,21 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, pll->algo = PLL_ALGO_LEGACY; pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; } + /* There is some evidence (often anecdotal) that RV515/RV620 LVDS + * (on some boards at least) prefers the legacy algo. I'm not + * sure whether this should handled generically or on a + * case-by-case quirk basis. Both algos should work fine in the + * majority of cases. + */ + if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) && + ((rdev->family == CHIP_RV515) || + (rdev->family == CHIP_RV620))) { + /* allow the user to overrride just in case */ + if (radeon_new_pll == 1) + pll->algo = PLL_ALGO_NEW; + else + pll->algo = PLL_ALGO_LEGACY; + } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; @@ -992,11 +1008,11 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) { - WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); - WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); + WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } else { - WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, 0); - WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, 0); + WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); + WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(fb_location)); } } WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1caf625..c500616 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -658,6 +658,43 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev) return 0; } +static int evergreen_cp_start(struct radeon_device *rdev) +{ + int r; + uint32_t cp_me; + + r = radeon_ring_lock(rdev, 7); + if (r) { + DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); + return r; + } + radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); + radeon_ring_write(rdev, 0x1); + radeon_ring_write(rdev, 0x0); + radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); + radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); + radeon_ring_write(rdev, 0); + radeon_ring_write(rdev, 0); + radeon_ring_unlock_commit(rdev); + + cp_me = 0xff; + WREG32(CP_ME_CNTL, cp_me); + + r = radeon_ring_lock(rdev, 4); + if (r) { + DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); + return r; + } + /* init some VGT regs */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); + radeon_ring_write(rdev, (VGT_VERTEX_REUSE_BLOCK_CNTL - PACKET3_SET_CONTEXT_REG_START) >> 2); + radeon_ring_write(rdev, 0xe); + radeon_ring_write(rdev, 0x10); + radeon_ring_unlock_commit(rdev); + + return 0; +} + int evergreen_cp_resume(struct radeon_device *rdev) { u32 tmp; @@ -702,7 +739,7 @@ int evergreen_cp_resume(struct radeon_device *rdev) rdev->cp.rptr = RREG32(CP_RB_RPTR); rdev->cp.wptr = RREG32(CP_RB_WPTR); - r600_cp_start(rdev); + evergreen_cp_start(rdev); rdev->cp.ready = true; r = radeon_ring_test(rdev); if (r) { @@ -1083,7 +1120,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(RCU_IND_INDEX, 0x203); efuse_straps_3 = RREG32(RCU_IND_DATA); - efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; + efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); switch(efuse_box_bit_127_124) { case 0x0: @@ -1106,14 +1143,25 @@ static void evergreen_gpu_init(struct radeon_device *rdev) EVERGREEN_MAX_BACKENDS_MASK)); break; } - } else - gb_backend_map = - evergreen_get_tile_pipe_to_backend_map(rdev, - rdev->config.evergreen.max_tile_pipes, - rdev->config.evergreen.max_backends, - ((EVERGREEN_MAX_BACKENDS_MASK << - rdev->config.evergreen.max_backends) & - EVERGREEN_MAX_BACKENDS_MASK)); + } else { + switch (rdev->family) { + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + gb_backend_map = 0x66442200; + break; + case CHIP_JUNIPER: + gb_backend_map = 0x00006420; + break; + default: + gb_backend_map = + evergreen_get_tile_pipe_to_backend_map(rdev, + rdev->config.evergreen.max_tile_pipes, + rdev->config.evergreen.max_backends, + ((EVERGREEN_MAX_BACKENDS_MASK << + rdev->config.evergreen.max_backends) & + EVERGREEN_MAX_BACKENDS_MASK)); + } + } WREG32(GB_BACKEND_MAP, gb_backend_map); WREG32(GB_ADDR_CONFIG, gb_addr_config); @@ -1341,6 +1389,8 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.visible_vram_size = rdev->mc.aper_size; + /* limit it to the aperture size for now as there is no blit support in 2.6.35/36*/ + rdev->mc.real_vram_size = rdev->mc.visible_vram_size; r600_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -1356,7 +1406,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev) static int evergreen_gpu_soft_reset(struct radeon_device *rdev) { struct evergreen_mc_save save; - u32 srbm_reset = 0; u32 grbm_reset = 0; dev_info(rdev->dev, "GPU softreset \n"); @@ -1395,16 +1444,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) udelay(50); WREG32(GRBM_SOFT_RESET, 0); (void)RREG32(GRBM_SOFT_RESET); - - /* reset all the system blocks */ - srbm_reset = SRBM_SOFT_RESET_ALL_MASK; - - dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset); - WREG32(SRBM_SOFT_RESET, srbm_reset); - (void)RREG32(SRBM_SOFT_RESET); - udelay(50); - WREG32(SRBM_SOFT_RESET, 0); - (void)RREG32(SRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", @@ -1415,10 +1454,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) RREG32(GRBM_STATUS_SE1)); dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", RREG32(SRBM_STATUS)); - /* After reset we need to reinit the asic as GPU often endup in an - * incoherent state. - */ - atom_asic_init(rdev->mode_info.atom_context); evergreen_mc_resume(rdev, &save); return 0; } @@ -2030,6 +2065,11 @@ int evergreen_resume(struct radeon_device *rdev) { int r; + /* reset the asic, the gfx blocks are often in a bad state + * after the driver is unloaded or after a resume + */ + if (radeon_asic_reset(rdev)) + dev_warn(rdev->dev, "GPU reset failed !\n"); /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, * posting will perform necessary task to bring back GPU into good * shape. @@ -2131,6 +2171,11 @@ int evergreen_init(struct radeon_device *rdev) r = radeon_atombios_init(rdev); if (r) return r; + /* reset the asic, the gfx blocks are often in a bad state + * after the driver is unloaded or after a resume + */ + if (radeon_asic_reset(rdev)) + dev_warn(rdev->dev, "GPU reset failed !\n"); /* Post card if necessary */ if (!evergreen_card_posted(rdev)) { if (!rdev->bios) { diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index a89a15a..a3378ba 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2321,6 +2321,9 @@ void r100_vram_init_sizes(struct radeon_device *rdev) /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - * Novell bug 204882 + along with lots of ubuntu ones */ + if (rdev->mc.aper_size > config_aper_size) + config_aper_size = rdev->mc.aper_size; + if (config_aper_size > rdev->mc.real_vram_size) rdev->mc.mc_vram_size = config_aper_size; else @@ -3229,6 +3232,8 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, for (u = 0; u < track->num_texture; u++) { if (!track->textures[u].enabled) continue; + if (track->textures[u].lookup_disable) + continue; robj = track->textures[u].robj; if (robj == NULL) { DRM_ERROR("No texture bound to unit %u\n", u); @@ -3462,6 +3467,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track track->textures[i].robj = NULL; /* CS IB emission code makes sure texture unit are disabled */ track->textures[i].enabled = false; + track->textures[i].lookup_disable = false; track->textures[i].roundup_w = true; track->textures[i].roundup_h = true; if (track->separate_cube) diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index f47cdca..af65600 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h @@ -46,6 +46,7 @@ struct r100_cs_track_texture { unsigned height_11; bool use_pitch; bool enabled; + bool lookup_disable; bool roundup_w; bool roundup_h; unsigned compress_format; diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 0266d72..d2408c3 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -447,6 +447,8 @@ int r200_packet0_check(struct radeon_cs_parser *p, track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); } + if (idx_value & R200_TXFORMAT_LOOKUP_DISABLE) + track->textures[i].lookup_disable = true; switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) { case R200_TXFORMAT_I8: case R200_TXFORMAT_RGB332: diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e100f69..abc2a66 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -869,7 +869,20 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) u32 tmp; /* flush hdp cache so updates hit vram */ - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && + !(rdev->flags & RADEON_IS_AGP)) { + void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + u32 tmp; + + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL + * This seems to cause problems on some AGP cards. Just use the old + * method for them. + */ + WREG32(HDP_DEBUG1, 0); + tmp = readl((void __iomem *)ptr); + } else + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); @@ -1176,8 +1189,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->vram_end, mc->real_vram_size >> 20); } else { u64 base = 0; - if (rdev->flags & RADEON_IS_IGP) - base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + if (rdev->flags & RADEON_IS_IGP) { + base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + } radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, mc); @@ -2094,10 +2109,7 @@ int r600_cp_start(struct radeon_device *rdev) } radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5)); radeon_ring_write(rdev, 0x1); - if (rdev->family >= CHIP_CEDAR) { - radeon_ring_write(rdev, 0x0); - radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1); - } else if (rdev->family >= CHIP_RV770) { + if (rdev->family >= CHIP_RV770) { radeon_ring_write(rdev, 0x0); radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1); } else { @@ -3512,5 +3524,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) */ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) { - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. + * This seems to cause problems on some AGP cards. Just use the old + * method for them. + */ + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && + rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { + void __iomem *ptr = (void *)rdev->vram_scratch.ptr; + u32 tmp; + + WREG32(HDP_DEBUG1, 0); + tmp = readl((void __iomem *)ptr); + } else + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); } diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index d13622a..863e2bf 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -623,8 +623,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); @@ -717,8 +717,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev, int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; - src_gpu_addr = src_gpu_addr & ~255; - dst_gpu_addr = dst_gpu_addr & ~255; + src_gpu_addr = src_gpu_addr & ~255ULL; + dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d84612a..33cda01 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h @@ -86,6 +86,7 @@ #define R600_HDP_NONSURFACE_BASE 0x2c04 #define R600_BUS_CNTL 0x5420 +# define R600_BIOS_ROM_DIS (1 << 1) #define R600_CONFIG_CNTL 0x5424 #define R600_CONFIG_MEMSIZE 0x5428 #define R600_CONFIG_F0_BASE 0x542C diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 59c1f87..84bc28e 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -245,6 +245,7 @@ #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C +#define HDP_DEBUG1 0x2F34 #define MC_VM_AGP_TOP 0x2184 #define MC_VM_AGP_BOT 0x2188 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f94dc6..e0865c7 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -997,6 +997,11 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +/* VRAM scratch page for HDP bug */ +struct r700_vram_scratch { + struct radeon_bo *robj; + volatile uint32_t *ptr; +}; /* * Core structure, functions and helpers. @@ -1060,6 +1065,7 @@ struct radeon_device { const struct firmware *pfp_fw; /* r6/700 PFP firmware */ const struct firmware *rlc_fw; /* r6/700 RLC firmware */ struct r600_blit r600_blit; + struct r700_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ struct r600_ih ih; /* r6/700 interrupt ring */ struct workqueue_struct *wq; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 10673ae..f8dc72d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -84,6 +84,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((i == 4) && + (gpio->usClkMaskRegisterIndex == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } + if (gpio->sucI2cId.ucAccess == id) { i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; @@ -206,6 +214,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, uint16_t *line_mux, struct radeon_hpd *hpd) { + struct radeon_device *rdev = dev->dev_private; /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x791e) && @@ -225,6 +234,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *connector_type = DRM_MODE_CONNECTOR_DVID; } + /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ + if ((dev->pdev->device == 0x796e) && + (dev->pdev->subsystem_vendor == 0x1462) && + (dev->pdev->subsystem_device == 0x7302)) { + if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) + return false; + } + /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && @@ -271,6 +289,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *line_mux = 0x90; } + /* mac rv630, rv730, others */ + if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && + (*connector_type == DRM_MODE_CONNECTOR_DVII)) { + *connector_type = DRM_MODE_CONNECTOR_9PinDIN; + *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; + } + /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && @@ -308,13 +333,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } } - /* Acer laptop reports DVI-D as DVI-I */ + /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */ if ((dev->pdev->device == 0x95c4) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { + struct radeon_gpio_rec gpio; + if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && - (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { + gpio = radeon_lookup_gpio(rdev, 6); + *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); *connector_type = DRM_MODE_CONNECTOR_DVID; + } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { + gpio = radeon_lookup_gpio(rdev, 7); + *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); + } } /* XFX Pine Group device rv730 reports no VGA DDC lines @@ -1049,7 +1083,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) } break; case 2: - if (igp_info->info_2.ucMemoryType & 0x0f) + if (igp_info->info_2.ulBootUpSidePortClock) return true; break; default: @@ -2091,7 +2125,7 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; /* tell the bios not to handle mode switching */ - bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE); + bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); @@ -2142,10 +2176,13 @@ void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) else bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); - if (lock) + if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; - else + bios_6_scratch &= ~ATOM_S6_ACC_MODE; + } else { bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; + bios_6_scratch |= ATOM_S6_ACC_MODE; + } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 2c92137..89a8eed 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } return true; } + static bool r700_read_disabled_bios(struct radeon_device *rdev) { uint32_t viph_control; @@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) bool r; viph_control = RREG32(RADEON_VIPH_CONTROL); - bus_cntl = RREG32(RADEON_BUS_CNTL); + bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); @@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) /* disable VIP */ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); /* enable the rom */ - WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | @@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) cg_spll_status = RREG32(R600_CG_SPLL_STATUS); } WREG32(RADEON_VIPH_CONTROL, viph_control); - WREG32(RADEON_BUS_CNTL, bus_cntl); + WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); @@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) bool r; viph_control = RREG32(RADEON_VIPH_CONTROL); - bus_cntl = RREG32(RADEON_BUS_CNTL); + bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); @@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) /* disable VIP */ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); /* enable the rom */ - WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | @@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) /* restore regs */ WREG32(RADEON_VIPH_CONTROL, viph_control); - WREG32(RADEON_BUS_CNTL, bus_cntl); + WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index adccbc2..346dc6d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -1083,6 +1083,8 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.load_detect_property, 1); connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1096,6 +1098,8 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1122,6 +1126,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.load_detect_property, 1); } + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_DVII) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: @@ -1142,6 +1151,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.coherent_mode_property, 1); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_HDMIB) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: @@ -1172,6 +1186,9 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); + connector->interlace_allowed = true; + /* in theory with a DP to VGA converter... */ + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1187,6 +1204,8 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.tv_std_property, radeon_atombios_get_tv_info(rdev)); } + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); @@ -1206,6 +1225,8 @@ radeon_add_atom_connector(struct drm_device *dev, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; } @@ -1277,6 +1298,8 @@ radeon_add_legacy_connector(struct drm_device *dev, rdev->mode_info.load_detect_property, 1); connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1290,6 +1313,8 @@ radeon_add_legacy_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1307,6 +1332,11 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); } subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_DVII) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1329,6 +1359,8 @@ radeon_add_legacy_connector(struct drm_device *dev, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); } + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); @@ -1342,6 +1374,8 @@ radeon_add_legacy_connector(struct drm_device *dev, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; } diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 4eb67c0..f7ce8e1 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; int xorigin = 0, yorigin = 0; + int w = radeon_crtc->cursor_width; if (x < 0) xorigin = -x + 1; @@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, if (yorigin >= CURSOR_HEIGHT) yorigin = CURSOR_HEIGHT - 1; - radeon_lock_cursor(crtc, true); - if (ASIC_IS_DCE4(rdev)) { - /* cursors are offset into the total surface */ - x += crtc->x; - y += crtc->y; - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); - - /* XXX: check if evergreen has the same issues as avivo chips */ - WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0 : x) << 16) | - (yorigin ? 0 : y)); - WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); - WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, - ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1)); - } else if (ASIC_IS_AVIVO(rdev)) { - int w = radeon_crtc->cursor_width; + if (ASIC_IS_AVIVO(rdev)) { int i = 0; struct drm_crtc *crtc_p; @@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, if (w <= 0) w = 1; } + } + radeon_lock_cursor(crtc, true); + if (ASIC_IS_DCE4(rdev)) { + WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, + ((xorigin ? 0 : x) << 16) | + (yorigin ? 0 : y)); + WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, + ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); + } else if (ASIC_IS_AVIVO(rdev)) { WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, ((xorigin ? 0 : x) << 16) | (yorigin ? 0 : y)); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index dd279da..cb5b4ce 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { + if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); mc->real_vram_size = mc->aper_size; mc->mc_vram_size = mc->aper_size; @@ -799,11 +799,6 @@ int radeon_resume_kms(struct drm_device *dev) radeon_pm_resume(rdev); radeon_restore_bios_scratch_regs(rdev); - /* turn on display hw */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - radeon_fbdev_set_suspend(rdev, 0); release_console_sem(); @@ -811,6 +806,10 @@ int radeon_resume_kms(struct drm_device *dev) radeon_hpd_init(rdev); /* blat the mode back in */ drm_helper_resume_force_mode(dev); + /* turn on display hw */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8154cdf..ce627b2 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP5_SUPPORT) DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); + if (devices & ATOM_DEVICE_DFP6_SUPPORT) + DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_TV1_SUPPORT) DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_CV_SUPPORT) @@ -617,6 +619,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, *frac_fb_div_p = best_frac_feedback_div; *ref_div_p = best_ref_div; *post_div_p = best_post_div; + DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n", + freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div, + best_ref_div, best_post_div); + } static bool diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index e0b30b2..d7d06f1 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -626,11 +626,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *dig_connector; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); connector = radeon_get_connector_for_encoder(encoder); - if (!connector) - return 0; - + if (!connector) { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + return ATOM_ENCODER_MODE_DVI; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + default: + return ATOM_ENCODER_MODE_CRT; + } + } radeon_connector = to_radeon_connector(connector); switch (connector->connector_type) { @@ -1545,6 +1557,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; + + /* check for pre-DCE3 cards with shared encoders; + * can't really use the links individually, so don't disable + * the encoder if it's in use by another connector + */ + if (!ASIC_IS_DCE3(rdev)) { + struct drm_encoder *other_encoder; + struct radeon_encoder *other_radeon_encoder; + + list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { + other_radeon_encoder = to_radeon_encoder(other_encoder); + if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && + drm_helper_encoder_in_use(other_encoder)) + goto disable_done; + } + } + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); switch (radeon_encoder->encoder_id) { @@ -1584,6 +1613,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) break; } +disable_done: if (radeon_encoder_is_digital(encoder)) { if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) r600_hdmi_disable(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 5def6f5..5c25846 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -95,6 +95,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) } } + /* switch the pads to ddc mode */ + if (ASIC_IS_DCE3(rdev) && rec->hw_capable) { + temp = RREG32(rec->mask_clk_reg); + temp &= ~(1 << 16); + WREG32(rec->mask_clk_reg, temp); + } + /* clear the output pin values */ temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; WREG32(rec->a_clk_reg, temp); @@ -935,6 +942,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->dev = dev; + sprintf(i2c->adapter.name, "Radeon aux bus %s", name); i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.algo_data = &i2c->algo.dp; i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 059bfa4..a108c7e 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev) * chips. Disable MSI on them for now. */ if ((rdev->family >= CHIP_RV380) && - (!(rdev->flags & RADEON_IS_IGP))) { + (!(rdev->flags & RADEON_IS_IGP)) && + (!(rdev->flags & RADEON_IS_AGP))) { int ret = pci_enable_msi(rdev->pdev); if (!ret) { rdev->msi_enabled = 1; - DRM_INFO("radeon: using MSI.\n"); + dev_info(rdev->dev, "radeon: using MSI.\n"); } } rdev->irq.installed = true; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index ab389f8..b20379e 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -106,7 +106,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) info = data; value_ptr = (uint32_t *)((unsigned long)info->value); - value = *value_ptr; + if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) + return -EFAULT; + switch (info->request) { case RADEON_INFO_DEVICE_ID: value = dev->pci_device; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index e1e5255..cf3a51f 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, if (!ref_div) return 1; - vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; + vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div; /* * This is horribly crude: the VCO frequency range is divided into diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d5b9373..5505125 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -102,6 +102,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, type = ttm_bo_type_device; } *bo_ptr = NULL; + +retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -109,7 +111,6 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, bo->gobj = gobj; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); - radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ mutex_lock(&rdev->vram_mutex); @@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, &radeon_ttm_bo_destroy); mutex_unlock(&rdev->vram_mutex); if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) + if (r != -ERESTARTSYS) { + if (domain == RADEON_GEM_DOMAIN_VRAM) { + domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } dev_err(rdev->dev, "object_init failed for (%lu, 0x%08X)\n", size, domain); + } return r; } *bo_ptr = bo; @@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head) { struct radeon_bo_list *lobj; struct radeon_bo *bo; + u32 domain; int r; list_for_each_entry(lobj, head, list) { @@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head) list_for_each_entry(lobj, head, list) { bo = lobj->bo; if (!bo->pin_count) { - if (lobj->wdomain) { - radeon_ttm_placement_from_domain(bo, - lobj->wdomain); - } else { - radeon_ttm_placement_from_domain(bo, - lobj->rdomain); - } + domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; + + retry: + radeon_ttm_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false, false); - if (unlikely(r)) + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { + domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } return r; + } } lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 3fa6984..c91b741 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -224,6 +224,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) { int i; + /* no need to take locks, etc. if nothing's going to change */ + if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && + (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) + return; + mutex_lock(&rdev->ddev->struct_mutex); mutex_lock(&rdev->vram_mutex); mutex_lock(&rdev->cp.mutex); diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index c332f46..6492881 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -2836,6 +2836,7 @@ # define R200_TXFORMAT_ST_ROUTE_STQ5 (5 << 24) # define R200_TXFORMAT_ST_ROUTE_MASK (7 << 24) # define R200_TXFORMAT_ST_ROUTE_SHIFT 24 +# define R200_TXFORMAT_LOOKUP_DISABLE (1 << 27) # define R200_TXFORMAT_ALPHA_MASK_ENABLE (1 << 28) # define R200_TXFORMAT_CHROMA_KEY_ENABLE (1 << 29) # define R200_TXFORMAT_CUBIC_MAP_ENABLE (1 << 30) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index ce4ecbe..76c768b 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -398,7 +398,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev) struct drm_display_mode *mode1 = NULL; struct rs690_watermark wm0; struct rs690_watermark wm1; - u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; + u32 tmp; + u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); + u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; @@ -495,10 +497,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev) d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); } - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } else if (mode0) { if (dfixed_trunc(wm0.dbpp) > 64) a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); @@ -528,13 +526,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev) d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); if (rdev->disp_priority == 2) d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, - S_006D48_D2MODE_PRIORITY_A_OFF(1)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, - S_006D4C_D2MODE_PRIORITY_B_OFF(1)); - } else { + } else if (mode1) { if (dfixed_trunc(wm1.dbpp) > 64) a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); else @@ -563,13 +555,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, - S_006548_D1MODE_PRIORITY_A_OFF(1)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, - S_00654C_D1MODE_PRIORITY_B_OFF(1)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } + + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 0c9c169..7e4fbdb 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -925,7 +925,9 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) struct drm_display_mode *mode1 = NULL; struct rv515_watermark wm0; struct rv515_watermark wm1; - u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; + u32 tmp; + u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF; + u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF; fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; @@ -999,10 +1001,6 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; } - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } else if (mode0) { if (dfixed_trunc(wm0.dbpp) > 64) a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); @@ -1032,11 +1030,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); if (rdev->disp_priority == 2) d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - } else { + } else if (mode1) { if (dfixed_trunc(wm1.dbpp) > 64) a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); else @@ -1065,11 +1059,12 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } + + WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } void rv515_bandwidth_update(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b7fd820..67e8073 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -189,7 +189,10 @@ static void rv770_mc_program(struct radeon_device *rdev) WREG32((0x2c20 + j), 0x00000000); WREG32((0x2c24 + j), 0x00000000); } - WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); + /* r7xx hw bug. Read from HDP_DEBUG1 rather + * than writing to HDP_REG_COHERENCY_FLUSH_CNTL + */ + tmp = RREG32(HDP_DEBUG1); rv515_mc_stop(rdev, &save); if (r600_mc_wait_for_idle(rdev)) { @@ -886,6 +889,54 @@ static void rv770_gpu_init(struct radeon_device *rdev) } +static int rv770_vram_scratch_init(struct radeon_device *rdev) +{ + int r; + u64 gpu_addr; + + if (rdev->vram_scratch.robj == NULL) { + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, + true, RADEON_GEM_DOMAIN_VRAM, + &rdev->vram_scratch.robj); + if (r) { + return r; + } + } + + r = radeon_bo_reserve(rdev->vram_scratch.robj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->vram_scratch.robj, + RADEON_GEM_DOMAIN_VRAM, &gpu_addr); + if (r) { + radeon_bo_unreserve(rdev->vram_scratch.robj); + return r; + } + r = radeon_bo_kmap(rdev->vram_scratch.robj, + (void **)&rdev->vram_scratch.ptr); + if (r) + radeon_bo_unpin(rdev->vram_scratch.robj); + radeon_bo_unreserve(rdev->vram_scratch.robj); + + return r; +} + +static void rv770_vram_scratch_fini(struct radeon_device *rdev) +{ + int r; + + if (rdev->vram_scratch.robj == NULL) { + return; + } + r = radeon_bo_reserve(rdev->vram_scratch.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->vram_scratch.robj); + radeon_bo_unpin(rdev->vram_scratch.robj); + radeon_bo_unreserve(rdev->vram_scratch.robj); + } + radeon_bo_unref(&rdev->vram_scratch.robj); +} + int rv770_mc_init(struct radeon_device *rdev) { u32 tmp; @@ -951,6 +1002,9 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; } + r = rv770_vram_scratch_init(rdev); + if (r) + return r; rv770_gpu_init(rdev); r = r600_blit_init(rdev); if (r) { @@ -1176,6 +1230,7 @@ void rv770_fini(struct radeon_device *rdev) r600_irq_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); + rv770_vram_scratch_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9506f8c..6111a02 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -128,6 +128,7 @@ #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C +#define HDP_DEBUG1 0x2F34 #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c index f44bdc0..5cd004e 100644 --- a/drivers/hid/hid-egalax.c +++ b/drivers/hid/hid-egalax.c @@ -31,7 +31,7 @@ struct egalax_data { bool first; /* is this the first finger in the frame? */ bool valid; /* valid finger data, or just placeholder? */ bool activity; /* at least one active finger previously? */ - __u16 lastx, lasty; /* latest valid (x, y) in the frame */ + __u16 lastx, lasty, lastz; /* latest valid (x, y, z) in the frame */ }; static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi, @@ -79,6 +79,10 @@ static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi, case HID_DG_TIPPRESSURE: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_PRESSURE); + /* touchscreen emulation */ + input_set_abs_params(hi->input, ABS_PRESSURE, + field->logical_minimum, + field->logical_maximum, 0, 0); return 1; } return 0; @@ -109,8 +113,8 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input) if (td->valid) { /* emit multitouch events */ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id); - input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); - input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); + input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x >> 3); + input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y >> 3); input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z); input_mt_sync(input); @@ -121,6 +125,7 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input) */ td->lastx = td->x; td->lasty = td->y; + td->lastz = td->z; } /* @@ -129,8 +134,9 @@ static void egalax_filter_event(struct egalax_data *td, struct input_dev *input) * the oldest on the panel, the one we want for single touch */ if (!td->first && td->activity) { - input_event(input, EV_ABS, ABS_X, td->lastx); - input_event(input, EV_ABS, ABS_Y, td->lasty); + input_event(input, EV_ABS, ABS_X, td->lastx >> 3); + input_event(input, EV_ABS, ABS_Y, td->lasty >> 3); + input_event(input, EV_ABS, ABS_PRESSURE, td->lastz); } if (!td->valid) { diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 3ccd478..7dca2fa 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -111,6 +111,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t int ret = 0; mutex_lock(&minors_lock); + + if (!hidraw_table[minor]) { + ret = -ENODEV; + goto out; + } + dev = hidraw_table[minor]->hid; if (!dev->hid_output_raw_report) { @@ -246,6 +252,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, mutex_lock(&minors_lock); dev = hidraw_table[minor]; + if (!dev) { + ret = -ENODEV; + goto out; + } switch (cmd) { case HIDIOCGRDESCSIZE: @@ -319,6 +329,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, ret = -ENOTTY; } +out: mutex_unlock(&minors_lock); return ret; } diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index b729c02..ffd6899 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -828,6 +828,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co } } else { int skipped_report_id = 0; + int report_id = buf[0]; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; @@ -837,7 +838,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, - ((report_type + 1) << 8) | *buf, + ((report_type + 1) << 8) | report_id, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id, if this was a numbered report. */ diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 5f5aa39..3fe5490 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -33,7 +33,6 @@ static const struct hid_blacklist { { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, - { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index 65335b2..9975bbf 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -916,27 +916,27 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); - int val, orig_div, new_div, shift; + int val, orig_div, new_div; val = simple_strtol(buf, NULL, 10); new_div = DIV_TO_REG(val); - if (new_div == 0) { - return -EINVAL; - } + mutex_lock(&data->update_lock); orig_div = data->fan_div[nr]; data->fan_div[nr] = DIV_FROM_REG(new_div); if (nr < 4) { /* 0 <= nr < 4 */ - shift = 2 * nr; adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3, - ((DIV_TO_REG(orig_div) & (~(0x03 << shift))) | - (new_div << shift))); + (DIV_TO_REG(data->fan_div[0]) << 0) | + (DIV_TO_REG(data->fan_div[1]) << 2) | + (DIV_TO_REG(data->fan_div[2]) << 4) | + (DIV_TO_REG(data->fan_div[3]) << 6)); } else { /* 3 < nr < 8 */ - shift = 2 * (nr - 4); adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7, - ((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) | - (new_div << shift))); + (DIV_TO_REG(data->fan_div[4]) << 0) | + (DIV_TO_REG(data->fan_div[5]) << 2) | + (DIV_TO_REG(data->fan_div[6]) << 4) | + (DIV_TO_REG(data->fan_div[7]) << 6)); } if (data->fan_div[nr] != orig_div) { diff --git a/drivers/hwmon/ads7871.c b/drivers/hwmon/ads7871.c index b300a20..5231934 100644 --- a/drivers/hwmon/ads7871.c +++ b/drivers/hwmon/ads7871.c @@ -160,30 +160,12 @@ static const struct attribute_group ads7871_group = { static int __devinit ads7871_probe(struct spi_device *spi) { - int status, ret, err = 0; + int ret, err; uint8_t val; struct ads7871_data *pdata; dev_dbg(&spi->dev, "probe\n"); - pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL); - if (!pdata) { - err = -ENOMEM; - goto exit; - } - - status = sysfs_create_group(&spi->dev.kobj, &ads7871_group); - if (status < 0) - goto error_free; - - pdata->hwmon_dev = hwmon_device_register(&spi->dev); - if (IS_ERR(pdata->hwmon_dev)) { - err = PTR_ERR(pdata->hwmon_dev); - goto error_remove; - } - - spi_set_drvdata(spi, pdata); - /* Configure the SPI bus */ spi->mode = (SPI_MODE_0); spi->bits_per_word = 8; @@ -201,6 +183,24 @@ static int __devinit ads7871_probe(struct spi_device *spi) we need to make sure we really have a chip*/ if (val != ret) { err = -ENODEV; + goto exit; + } + + pdata = kzalloc(sizeof(struct ads7871_data), GFP_KERNEL); + if (!pdata) { + err = -ENOMEM; + goto exit; + } + + err = sysfs_create_group(&spi->dev.kobj, &ads7871_group); + if (err < 0) + goto error_free; + + spi_set_drvdata(spi, pdata); + + pdata->hwmon_dev = hwmon_device_register(&spi->dev); + if (IS_ERR(pdata->hwmon_dev)) { + err = PTR_ERR(pdata->hwmon_dev); goto error_remove; } diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 0e4b564..3f5a00e 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -275,7 +275,6 @@ static int emc1403_probe(struct i2c_client *client, res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr); if (res) { dev_warn(&client->dev, "create group failed\n"); - hwmon_device_unregister(data->hwmon_dev); goto thermal_error1; } data->hwmon_dev = hwmon_device_register(&client->dev); diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 537841e..fec078d 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -2100,7 +2100,6 @@ static int f71882fg_remove(struct platform_device *pdev) int nr_fans = (data->type == f71882fg) ? 4 : 3; u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); - platform_set_drvdata(pdev, NULL); if (data->hwmon_dev) hwmon_device_unregister(data->hwmon_dev); @@ -2167,6 +2166,7 @@ static int f71882fg_remove(struct platform_device *pdev) } } + platform_set_drvdata(pdev, NULL); kfree(data); return 0; diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c index 0f58ecc..9638d58 100644 --- a/drivers/hwmon/f75375s.c +++ b/drivers/hwmon/f75375s.c @@ -79,7 +79,7 @@ enum chips { f75373, f75375 }; #define F75375_REG_PWM2_DROP_DUTY 0x6C #define FAN_CTRL_LINEAR(nr) (4 + nr) -#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2)) +#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2)) /* * Data structures and manipulation thereof @@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val) return -EINVAL; fanmode = f75375_read8(client, F75375_REG_FAN_TIMER); - fanmode = ~(3 << FAN_CTRL_MODE(nr)); + fanmode &= ~(3 << FAN_CTRL_MODE(nr)); switch (val) { case 0: /* Full speed */ @@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); conf = f75375_read8(client, F75375_REG_CONFIG1); - conf = ~(1 << FAN_CTRL_LINEAR(nr)); + conf &= ~(1 << FAN_CTRL_LINEAR(nr)); if (val == 0) conf |= (1 << FAN_CTRL_LINEAR(nr)) ; diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 7580f55..36e9575 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c @@ -221,6 +221,8 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), + AXIS_DMI_MATCH("HPB532x", "HP ProBook 532", y_inverted), + AXIS_DMI_MATCH("Mini5102", "HP Mini 5102", xy_rotated_left_usd), { NULL, } /* Laptop models without axis info (yet): * "NC6910" "HP Compaq 6910" diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 8bdf80d..8364932 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -143,6 +143,37 @@ static const struct pci_device_id k8temp_ids[] = { MODULE_DEVICE_TABLE(pci, k8temp_ids); +static int __devinit is_rev_g_desktop(u8 model) +{ + u32 brandidx; + + if (model < 0x69) + return 0; + + if (model == 0xc1 || model == 0x6c || model == 0x7c) + return 0; + + /* + * Differentiate between AM2 and ASB1. + * See "Constructing the processor Name String" in "Revision + * Guide for AMD NPT Family 0Fh Processors" (33610). + */ + brandidx = cpuid_ebx(0x80000001); + brandidx = (brandidx >> 9) & 0x1f; + + /* Single core */ + if ((model == 0x6f || model == 0x7f) && + (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc)) + return 0; + + /* Dual core */ + if (model == 0x6b && + (brandidx == 0xb || brandidx == 0xc)) + return 0; + + return 1; +} + static int __devinit k8temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev, "wrong - check erratum #141\n"); } - if ((model >= 0x69) && - !(model == 0xc1 || model == 0x6c || model == 0x7c || - model == 0x6b || model == 0x6f || model == 0x7f)) { + if (is_rev_g_desktop(model)) { /* * RevG desktop CPUs (i.e. no socket S1G1 or * ASB1 parts) need additional offset, diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 6138f03..fc591ae 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) wake_up_interruptible(&lis3_dev.misc_wait); kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); out: - if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && + if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev && lis3_dev.idev->input->users) return IRQ_WAKE_THREAD; return IRQ_HANDLED; @@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) * io-apic is not configurable (and generates a warning) but I keep it * in case of support for other hardware. */ - if (dev->whoami == WAI_8B) + if (dev->pdata && dev->whoami == WAI_8B) thread_fn = lis302dl_interrupt_thread1_8b; else thread_fn = NULL; diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index b3841a6..2e8f0c9 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -1259,6 +1259,7 @@ static int lm85_probe(struct i2c_client *client, switch (data->type) { case adm1027: case adt7463: + case adt7468: case emc6d100: case emc6d102: data->freq_map = adm1027_freq_map; diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c index 4a64b85..68e69a4 100644 --- a/drivers/hwmon/pc87360.c +++ b/drivers/hwmon/pc87360.c @@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev) static int __init pc87360_device_add(unsigned short address) { - struct resource res = { - .name = "pc87360", - .flags = IORESOURCE_IO, - }; - int err, i; + struct resource res[3]; + int err, i, res_count; pdev = platform_device_alloc("pc87360", address); if (!pdev) { @@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address) goto exit; } + memset(res, 0, 3 * sizeof(struct resource)); + res_count = 0; for (i = 0; i < 3; i++) { if (!extra_isa[i]) continue; - res.start = extra_isa[i]; - res.end = extra_isa[i] + PC87360_EXTENT - 1; + res[res_count].start = extra_isa[i]; + res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1; + res[res_count].name = "pc87360", + res[res_count].flags = IORESOURCE_IO, - err = acpi_check_resource_conflict(&res); + err = acpi_check_resource_conflict(&res[res_count]); if (err) goto exit_device_put; - err = platform_device_add_resources(pdev, &res, 1); - if (err) { - printk(KERN_ERR "pc87360: Device resource[%d] " - "addition failed (%d)\n", i, err); - goto exit_device_put; - } + res_count++; + } + + err = platform_device_add_resources(pdev, res, res_count); + if (err) { + printk(KERN_ERR "pc87360: Device resources addition failed " + "(%d)\n", err); + goto exit_device_put; } err = platform_device_add(pdev); diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index a610e78..38a41d2 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -333,11 +333,11 @@ static inline int sht15_calc_humid(struct sht15_data *data) const int c1 = -4; const int c2 = 40500; /* x 10 ^ -6 */ - const int c3 = -2800; /* x10 ^ -9 */ + const int c3 = -28; /* x 10 ^ -7 */ RHlinear = c1*1000 + c2 * data->val_humid/1000 - + (data->val_humid * data->val_humid * c3)/1000000; + + (data->val_humid * data->val_humid * c3) / 10000; return (temp - 25000) * (10000 + 80 * data->val_humid) / 1000000 + RHlinear; } diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c index f397ce7..b2074e3 100644 --- a/drivers/hwmon/via686a.c +++ b/drivers/hwmon/via686a.c @@ -687,6 +687,13 @@ static int __devexit via686a_remove(struct platform_device *pdev) return 0; } +static void via686a_update_fan_div(struct via686a_data *data) +{ + int reg = via686a_read_value(data, VIA686A_REG_FANDIV); + data->fan_div[0] = (reg >> 4) & 0x03; + data->fan_div[1] = reg >> 6; +} + static void __devinit via686a_init_device(struct via686a_data *data) { u8 reg; @@ -700,6 +707,9 @@ static void __devinit via686a_init_device(struct via686a_data *data) via686a_write_value(data, VIA686A_REG_TEMP_MODE, (reg & ~VIA686A_TEMP_MODE_MASK) | VIA686A_TEMP_MODE_CONTINUOUS); + + /* Pre-read fan clock divisor values */ + via686a_update_fan_div(data); } static struct via686a_data *via686a_update_device(struct device *dev) @@ -751,9 +761,7 @@ static struct via686a_data *via686a_update_device(struct device *dev) (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) & 0xc0) >> 6; - i = via686a_read_value(data, VIA686A_REG_FANDIV); - data->fan_div[0] = (i >> 4) & 0x03; - data->fan_div[1] = i >> 6; + via686a_update_fan_div(data); data->alarms = via686a_read_value(data, VIA686A_REG_ALARM1) | diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 0dcaba9..e01a3e9 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -277,6 +277,11 @@ struct w83627ehf_data { struct device *hwmon_dev; struct mutex lock; + const u8 *REG_FAN_START_OUTPUT; + const u8 *REG_FAN_STOP_OUTPUT; + const u8 *REG_FAN_MAX_OUTPUT; + const u8 *REG_FAN_STEP_OUTPUT; + struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -524,7 +529,10 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) } } - for (i = 0; i < 4; i++) { + for (i = 0; i < data->pwm_num; i++) { + if (!(data->has_fan & (1 << i))) + continue; + /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */ if (i != 1) { pwmcfg = w83627ehf_read_value(data, @@ -546,6 +554,17 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, W83627EHF_REG_FAN_STOP_TIME[i]); + + if (data->REG_FAN_MAX_OUTPUT[i] != 0xff) + data->fan_max_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_MAX_OUTPUT[i]); + + if (data->REG_FAN_STEP_OUTPUT[i] != 0xff) + data->fan_step_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_STEP_OUTPUT[i]); + data->target_temp[i] = w83627ehf_read_value(data, W83627EHF_REG_TARGET[i]) & @@ -1126,7 +1145,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \ + w83627ehf_write_value(data, data->REG_##REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } @@ -1206,12 +1225,26 @@ static struct sensor_device_attribute sda_sf3_arrays[] = { store_fan_stop_output, 1), SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, store_fan_stop_output, 2), +}; - /* pwm1 and pwm3 don't support max and step settings */ + +/* + * pwm1 and pwm3 don't support max and step settings on all chips. + * Need to check support while generating/removing attribute files. + */ +static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { + SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 0), + SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 0), SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, store_fan_max_output, 1), SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, store_fan_step_output, 1), + SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, + store_fan_max_output, 2), + SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, + store_fan_step_output, 2), }; static ssize_t @@ -1235,6 +1268,12 @@ static void w83627ehf_device_remove_files(struct device *dev) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); + for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { + struct sensor_device_attribute *attr = + &sda_sf3_max_step_arrays[i]; + if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) + device_remove_file(dev, &attr->dev_attr); + } for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); for (i = 0; i < data->in_num; i++) { @@ -1352,6 +1391,11 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) data->in6_skip = !data->temp3_disable; } + data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; + data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT; + data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT; + /* Initialize the chip */ w83627ehf_init_device(data); @@ -1440,6 +1484,15 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) &sda_sf3_arrays[i].dev_attr))) goto exit_remove; + for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { + struct sensor_device_attribute *attr = + &sda_sf3_max_step_arrays[i]; + if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { + err = device_create_file(dev, &attr->dev_attr); + if (err) + goto exit_remove; + } + } /* if fan4 is enabled create the sf3 files for it */ if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index bbd7760..29933f8 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c @@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg) static int pca_isa_waitforcompletion(void *pd) { - long ret = ~0; unsigned long timeout; + long ret; if (irq > -1) { ret = wait_event_timeout(pca_wait, @@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd) } else { /* Do polling */ timeout = jiffies + pca_isa_ops.timeout; - while (((pca_isa_readbyte(pd, I2C_PCA_CON) - & I2C_PCA_CON_SI) == 0) - && (ret = time_before(jiffies, timeout))) + do { + ret = time_before(jiffies, timeout); + if (pca_isa_readbyte(pd, I2C_PCA_CON) + & I2C_PCA_CON_SI) + break; udelay(100); + } while (ret); } + return ret > 0; } diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index ef5c784..ace6799 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) static int i2c_pca_pf_waitforcompletion(void *pd) { struct i2c_pca_pf_data *i2c = pd; - long ret = ~0; unsigned long timeout; + long ret; if (i2c->irq) { ret = wait_event_timeout(i2c->wait, @@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd) } else { /* Do polling */ timeout = jiffies + i2c->adap.timeout; - while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) - & I2C_PCA_CON_SI) == 0) - && (ret = time_before(jiffies, timeout))) + do { + ret = time_before(jiffies, timeout); + if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON) + & I2C_PCA_CON_SI) + break; udelay(100); + } while (ret); } return ret > 0; @@ -221,7 +224,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev) if (irq) { ret = request_irq(irq, i2c_pca_pf_handler, - IRQF_TRIGGER_FALLING, i2c->adap.name, i2c); + IRQF_TRIGGER_FALLING, pdev->name, i2c); if (ret) goto e_reqirq; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 0815e10..9e6aa8d 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -923,6 +923,14 @@ static int i2c_do_del_adapter(struct i2c_driver *driver, static int __unregister_client(struct device *dev, void *dummy) { struct i2c_client *client = i2c_verify_client(dev); + if (client && strcmp(client->name, "dummy")) + i2c_unregister_device(client); + return 0; +} + +static int __unregister_dummy(struct device *dev, void *dummy) +{ + struct i2c_client *client = i2c_verify_client(dev); if (client) i2c_unregister_device(client); return 0; @@ -977,8 +985,12 @@ int i2c_del_adapter(struct i2c_adapter *adap) i2c_unlock_adapter(adap); /* Detach any active clients. This can't fail, thus we do not - checking the returned value. */ + * check the returned value. This is a two-pass process, because + * we can't remove the dummy devices during the first pass: they + * could have been instantiated by real devices wishing to clean + * them up properly, so we give them a chance to do that first. */ res = device_for_each_child(&adap->dev, NULL, __unregister_client); + res = device_for_each_child(&adap->dev, NULL, __unregister_dummy); #ifdef CONFIG_I2C_COMPAT class_compat_remove_link(i2c_adapter_compat_class, &adap->dev, diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 64207df..2de76cc 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -506,15 +506,22 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, return (flags & REQ_FAILED) ? -EIO : 0; } -static void ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) +/* + * returns true if rq has been completed + */ +static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) { unsigned int nr_bytes = cmd->nbytes - cmd->nleft; if (cmd->tf_flags & IDE_TFLAG_WRITE) nr_bytes -= cmd->last_xfer_len; - if (nr_bytes > 0) + if (nr_bytes > 0) { ide_complete_rq(drive, 0, nr_bytes); + return true; + } + + return false; } static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) @@ -679,7 +686,8 @@ out_end: } if (uptodate == 0 && rq->bio) - ide_cd_error_cmd(drive, cmd); + if (ide_cd_error_cmd(drive, cmd)) + return ide_stopped; /* make sure it's fully ended */ if (blk_fs_request(rq) == 0) { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 54f0fb4..ae48da9 100755 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -308,7 +308,7 @@ static int intel_idle_probe(void) break; case 0x1C: /* 28 - Atom Processor */ - lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ + lapic_timer_reliable_states = (1 << 1); /* C1 */ cpuidle_state_table = atom_cstates; choose_substate = choose_zero_substate; break; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index ad63b79..2422945 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -2987,6 +2987,7 @@ static int cm_sidr_req_handler(struct cm_work *work) goto out; /* No match. */ } atomic_inc(&cur_cm_id_priv->refcount); + atomic_inc(&cm_id_priv->refcount); spin_unlock_irq(&cm.lock); cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b930b81..2a072be 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1136,6 +1136,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) cm_id->context = conn_id; cm_id->cm_handler = cma_ib_handler; + /* + * Protect against the user destroying conn_id from another thread + * until we're done accessing it. + */ + atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (!ret) { /* @@ -1148,8 +1153,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); goto out; } + cma_deref_id(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; @@ -1351,17 +1358,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, event.param.conn.private_data_len = iw_event->private_data_len; event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; event.param.conn.responder_resources = attr.max_qp_rd_atom; + + /* + * Protect against the user destroying conn_id from another thread + * until we're done accessing it. + */ + atomic_inc(&conn_id->refcount); ret = conn_id->id.event_handler(&conn_id->id, &event); if (ret) { /* User wants to destroy the CM ID */ conn_id->cm_id.iw = NULL; cma_exch(conn_id, CMA_DESTROYING); mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); rdma_destroy_id(&conn_id->id); goto out; } mutex_unlock(&conn_id->handler_mutex); + cma_deref_id(conn_id); out: if (dev) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 6fcfbeb..abb8714 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -891,68 +891,81 @@ out: return ret ? ret : in_len; } +static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) +{ + struct ib_uverbs_wc tmp; + + tmp.wr_id = wc->wr_id; + tmp.status = wc->status; + tmp.opcode = wc->opcode; + tmp.vendor_err = wc->vendor_err; + tmp.byte_len = wc->byte_len; + tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; + tmp.qp_num = wc->qp->qp_num; + tmp.src_qp = wc->src_qp; + tmp.wc_flags = wc->wc_flags; + tmp.pkey_index = wc->pkey_index; + tmp.slid = wc->slid; + tmp.sl = wc->sl; + tmp.dlid_path_bits = wc->dlid_path_bits; + tmp.port_num = wc->port_num; + tmp.reserved = 0; + + if (copy_to_user(dest, &tmp, sizeof tmp)) + return -EFAULT; + + return 0; +} + ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { struct ib_uverbs_poll_cq cmd; - struct ib_uverbs_poll_cq_resp *resp; + struct ib_uverbs_poll_cq_resp resp; + u8 __user *header_ptr; + u8 __user *data_ptr; struct ib_cq *cq; - struct ib_wc *wc; - int ret = 0; - int i; - int rsize; + struct ib_wc wc; + int ret; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; - wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); - if (!wc) - return -ENOMEM; - - rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); - resp = kmalloc(rsize, GFP_KERNEL); - if (!resp) { - ret = -ENOMEM; - goto out_wc; - } - cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); - if (!cq) { - ret = -EINVAL; - goto out; - } + if (!cq) + return -EINVAL; - resp->count = ib_poll_cq(cq, cmd.ne, wc); + /* we copy a struct ib_uverbs_poll_cq_resp to user space */ + header_ptr = (void __user *)(unsigned long) cmd.response; + data_ptr = header_ptr + sizeof resp; - put_cq_read(cq); + memset(&resp, 0, sizeof resp); + while (resp.count < cmd.ne) { + ret = ib_poll_cq(cq, 1, &wc); + if (ret < 0) + goto out_put; + if (!ret) + break; + + ret = copy_wc_to_user(data_ptr, &wc); + if (ret) + goto out_put; - for (i = 0; i < resp->count; i++) { - resp->wc[i].wr_id = wc[i].wr_id; - resp->wc[i].status = wc[i].status; - resp->wc[i].opcode = wc[i].opcode; - resp->wc[i].vendor_err = wc[i].vendor_err; - resp->wc[i].byte_len = wc[i].byte_len; - resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; - resp->wc[i].qp_num = wc[i].qp->qp_num; - resp->wc[i].src_qp = wc[i].src_qp; - resp->wc[i].wc_flags = wc[i].wc_flags; - resp->wc[i].pkey_index = wc[i].pkey_index; - resp->wc[i].slid = wc[i].slid; - resp->wc[i].sl = wc[i].sl; - resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; - resp->wc[i].port_num = wc[i].port_num; + data_ptr += sizeof(struct ib_uverbs_wc); + ++resp.count; } - if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) + if (copy_to_user(header_ptr, &resp, sizeof resp)) { ret = -EFAULT; + goto out_put; + } -out: - kfree(resp); + ret = in_len; -out_wc: - kfree(wc); - return ret ? ret : in_len; +out_put: + put_cq_read(cq); + return ret; } ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 8f0caf7..78fbe9f 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -53,7 +53,7 @@ #define T3_MAX_PBL_SIZE 256 #define T3_MAX_RQ_SIZE 1024 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) -#define T3_MAX_CQ_DEPTH 262144 +#define T3_MAX_CQ_DEPTH 65536 #define T3_MAX_NUM_STAG (1<<15) #define T3_MAX_MR_SIZE 0x100000000ULL #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index ebfb117..9c05ddd 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep) V_MSS_IDX(mtu_idx) | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | + V_CONG_CONTROL_FLAVOR(cong_flavor); skb->priority = CPL_PRIORITY_SETUP; set_arp_failure_handler(skb, act_open_req_arp_failure); @@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) V_MSS_IDX(mtu_idx) | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | + V_CONG_CONTROL_FLAVOR(cong_flavor); rpl = cplhdr(skb); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 34157bb..f5320b5 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, memcpy(joydev->abspam, abspam, len); + for (i = 0; i < joydev->nabs; i++) + joydev->absmap[joydev->abspam[i]] = i; + out: kfree(abspam); return retval; diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index 05edd75..a9cf768 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -205,8 +205,8 @@ struct atp { bool overflow_warned; int x_old; /* last reported x/y, */ int y_old; /* used for smoothing */ - u8 xy_cur[ATP_XSENSORS + ATP_YSENSORS]; - u8 xy_old[ATP_XSENSORS + ATP_YSENSORS]; + signed char xy_cur[ATP_XSENSORS + ATP_YSENSORS]; + signed char xy_old[ATP_XSENSORS + ATP_YSENSORS]; int xy_acc[ATP_XSENSORS + ATP_YSENSORS]; int idlecount; /* number of empty packets */ struct work_struct work; @@ -531,7 +531,7 @@ static void atp_complete_geyser_1_2(struct urb *urb) for (i = 0; i < ATP_XSENSORS + ATP_YSENSORS; i++) { /* accumulate the change */ - int change = dev->xy_old[i] - dev->xy_cur[i]; + signed char change = dev->xy_old[i] - dev->xy_cur[i]; dev->xy_acc[i] -= change; /* prevent down drifting */ diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 6dedded..3564cda 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -55,6 +55,14 @@ #define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236 #define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237 #define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238 +/* MacbookAir3,2 (unibody), aka wellspring5 */ +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI 0x023f +#define USB_DEVICE_ID_APPLE_WELLSPRING4_ISO 0x0240 +#define USB_DEVICE_ID_APPLE_WELLSPRING4_JIS 0x0241 +/* MacbookAir3,1 (unibody), aka wellspring4 */ +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI 0x0242 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO 0x0243 +#define USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS 0x0244 #define BCM5974_DEVICE(prod) { \ .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ @@ -80,6 +88,14 @@ static const struct usb_device_id bcm5974_table[] = { BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_ISO), BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING3_JIS), + /* MacbookAir3,2 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4_JIS), + /* MacbookAir3,1 */ + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS), /* Terminating entry */ {} }; @@ -234,6 +250,30 @@ static const struct bcm5974_config bcm5974_config_table[] = { { DIM_X, DIM_X / SN_COORD, -4460, 5166 }, { DIM_Y, DIM_Y / SN_COORD, -75, 6700 } }, + { + USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING4_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, + { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + }, + { + USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI, + USB_DEVICE_ID_APPLE_WELLSPRING4A_ISO, + USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, + HAS_INTEGRATED_BUTTON, + 0x84, sizeof(struct bt_data), + 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, + { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, + { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, + { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, + { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } + }, {} }; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 705589d..4009b41 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -712,8 +712,8 @@ static const struct dmi_system_id __initconst toshiba_dmi_table[] = { }, }, - { } #endif + { } }; void __init synaptics_module_init(void) diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index b6aa7d2..298c8e5 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -51,7 +51,8 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) -#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100) +#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ +#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) /* synaptics modes query bits */ diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ed7ad74..a0730fd 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -333,6 +333,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Sony Vaio VPCZ122GX */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCZ122GX"), + }, + }, + { /* Sony Vaio FS-115b */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), @@ -413,6 +420,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), }, }, + { + /* Dell Vostro V13 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + }, + }, { } }; @@ -534,6 +548,17 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { }; #endif +static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { + { + /* Dell Vostro V13 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"), + }, + }, + { } +}; + /* * Some Wistron based laptops need us to explicitly enable the 'Dritek * keyboard extension' to make their extra keys start generating scancodes. @@ -886,6 +911,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = true; + if (dmi_check_system(i8042_dmi_notimeout_table)) + i8042_notimeout = true; + if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; #endif /* CONFIG_X86 */ diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 6440a8f..3e8a78f 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -65,6 +65,10 @@ static unsigned int i8042_blink_frequency = 500; module_param_named(panicblink, i8042_blink_frequency, uint, 0600); MODULE_PARM_DESC(panicblink, "Frequency with which keyboard LEDs should blink when kernel panics"); +static bool i8042_notimeout; +module_param_named(notimeout, i8042_notimeout, bool, 0); +MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); + #ifdef CONFIG_X86 static bool i8042_dritek; module_param_named(dritek, i8042_dritek, bool, 0); @@ -507,7 +511,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) } else { dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) | - ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0); + ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0); port_no = (str & I8042_STR_AUXDATA) ? I8042_AUX_PORT_NO : I8042_KBD_PORT_NO; @@ -861,9 +865,6 @@ static int i8042_controller_selftest(void) unsigned char param; int i = 0; - if (!i8042_reset) - return 0; - /* * We try this 5 times; on some really fragile systems this does not * take the first time... @@ -1020,7 +1021,8 @@ static void i8042_controller_reset(void) * Reset the controller if requested. */ - i8042_controller_selftest(); + if (i8042_reset) + i8042_controller_selftest(); /* * Restore the original control register setting. @@ -1094,23 +1096,11 @@ static void i8042_dritek_enable(void) #ifdef CONFIG_PM /* - * Here we try to restore the original BIOS settings to avoid - * upsetting it. - */ - -static int i8042_pm_reset(struct device *dev) -{ - i8042_controller_reset(); - - return 0; -} - -/* * Here we try to reset everything back to a state we had * before suspending. */ -static int i8042_pm_restore(struct device *dev) +static int i8042_controller_resume(bool force_reset) { int error; @@ -1118,9 +1108,11 @@ static int i8042_pm_restore(struct device *dev) if (error) return error; - error = i8042_controller_selftest(); - if (error) - return error; + if (i8042_reset || force_reset) { + error = i8042_controller_selftest(); + if (error) + return error; + } /* * Restore original CTR value and disable all ports @@ -1162,6 +1154,28 @@ static int i8042_pm_restore(struct device *dev) return 0; } +/* + * Here we try to restore the original BIOS settings to avoid + * upsetting it. + */ + +static int i8042_pm_reset(struct device *dev) +{ + i8042_controller_reset(); + + return 0; +} + +static int i8042_pm_resume(struct device *dev) +{ + /* + * On resume from S2R we always try to reset the controller + * to bring it in a sane state. (In case of S2D we expect + * BIOS to reset the controller for us.) + */ + return i8042_controller_resume(true); +} + static int i8042_pm_thaw(struct device *dev) { i8042_interrupt(0, NULL); @@ -1169,9 +1183,14 @@ static int i8042_pm_thaw(struct device *dev) return 0; } +static int i8042_pm_restore(struct device *dev) +{ + return i8042_controller_resume(false); +} + static const struct dev_pm_ops i8042_pm_ops = { .suspend = i8042_pm_reset, - .resume = i8042_pm_restore, + .resume = i8042_pm_resume, .thaw = i8042_pm_thaw, .poweroff = i8042_pm_reset, .restore = i8042_pm_restore, @@ -1389,9 +1408,11 @@ static int __init i8042_probe(struct platform_device *dev) i8042_platform_device = dev; - error = i8042_controller_selftest(); - if (error) - return error; + if (i8042_reset) { + error = i8042_controller_selftest(); + if (error) + return error; + } error = i8042_controller_init(); if (error) @@ -1483,8 +1504,8 @@ static int __init i8042_init(void) static void __exit i8042_exit(void) { - platform_driver_unregister(&i8042_driver); platform_device_unregister(i8042_platform_device); + platform_driver_unregister(&i8042_driver); i8042_platform_exit(); panic_blink = NULL; diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 415f630..8a6a2a1 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -409,8 +409,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom) /* general pen packet */ if ((data[1] & 0xb8) == 0xa0) { t = (data[6] << 2) | ((data[7] >> 6) & 3); - if (features->type >= INTUOS4S && features->type <= INTUOS4L) + if ((features->type >= INTUOS4S && features->type <= INTUOS4L) || + features->type == WACOM_21UX2) { t = (t << 1) | (data[1] & 1); + } input_report_abs(input, ABS_PRESSURE, t); input_report_abs(input, ABS_TILT_X, ((data[7] << 1) & 0x7e) | (data[8] >> 7)); diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c index e140816..6ad8be1 100644 --- a/drivers/input/xen-kbdfront.c +++ b/drivers/input/xen-kbdfront.c @@ -109,7 +109,7 @@ static irqreturn_t input_handler(int rq, void *dev_id) static int __devinit xenkbd_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { - int ret, i; + int ret, i, abs; struct xenkbd_info *info; struct input_dev *kbd, *ptr; @@ -127,6 +127,11 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, if (!info->page) goto error_nomem; + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) + abs = 0; + if (abs) + xenbus_printf(XBT_NIL, dev->nodename, "request-abs-pointer", "1"); + /* keyboard */ kbd = input_allocate_device(); if (!kbd) @@ -136,11 +141,12 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, kbd->id.bustype = BUS_PCI; kbd->id.vendor = 0x5853; kbd->id.product = 0xffff; - kbd->evbit[0] = BIT(EV_KEY); + + __set_bit(EV_KEY, kbd->evbit); for (i = KEY_ESC; i < KEY_UNKNOWN; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); for (i = KEY_OK; i < KEY_MAX; i++) - set_bit(i, kbd->keybit); + __set_bit(i, kbd->keybit); ret = input_register_device(kbd); if (ret) { @@ -159,12 +165,20 @@ static int __devinit xenkbd_probe(struct xenbus_device *dev, ptr->id.bustype = BUS_PCI; ptr->id.vendor = 0x5853; ptr->id.product = 0xfffe; - ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS); + + if (abs) { + __set_bit(EV_ABS, ptr->evbit); + input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); + input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + } else { + input_set_capability(ptr, EV_REL, REL_X); + input_set_capability(ptr, EV_REL, REL_Y); + } + input_set_capability(ptr, EV_REL, REL_WHEEL); + + __set_bit(EV_KEY, ptr->evbit); for (i = BTN_LEFT; i <= BTN_TASK; i++) - set_bit(i, ptr->keybit); - ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL); - input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0); - input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0); + __set_bit(i, ptr->keybit); ret = input_register_device(ptr); if (ret) { @@ -271,7 +285,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, enum xenbus_state backend_state) { struct xenkbd_info *info = dev_get_drvdata(&dev->dev); - int ret, val; + int val; switch (backend_state) { case XenbusStateInitialising: @@ -282,17 +296,6 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateInitWait: InitWait: - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, - "feature-abs-pointer", "%d", &val); - if (ret < 0) - val = 0; - if (val) { - ret = xenbus_printf(XBT_NIL, info->xbdev->nodename, - "request-abs-pointer", "1"); - if (ret) - printk(KERN_WARNING - "xenkbd: can't request abs-pointer"); - } xenbus_switch_state(dev, XenbusStateConnected); break; diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c index 47a5ffe..fef5e75 100644 --- a/drivers/isdn/gigaset/bas-gigaset.c +++ b/drivers/isdn/gigaset/bas-gigaset.c @@ -438,23 +438,27 @@ static void cmd_in_timeout(unsigned long data) return; } - if (ucs->retry_cmd_in++ < BAS_RETRY) { - dev_notice(cs->dev, "control read: timeout, retry %d\n", - ucs->retry_cmd_in); - rc = atread_submit(cs, BAS_TIMEOUT); - if (rc >= 0 || rc == -ENODEV) - /* resubmitted or disconnected */ - /* - bypass regular exit block */ - return; - } else { + if (ucs->retry_cmd_in++ >= BAS_RETRY) { dev_err(cs->dev, "control read: timeout, giving up after %d tries\n", ucs->retry_cmd_in); + kfree(ucs->rcvbuf); + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + error_reset(cs); + return; + } + + gig_dbg(DEBUG_USBREQ, "%s: timeout, retry %d", + __func__, ucs->retry_cmd_in); + rc = atread_submit(cs, BAS_TIMEOUT); + if (rc < 0) { + kfree(ucs->rcvbuf); + ucs->rcvbuf = NULL; + ucs->rcvbuf_size = 0; + if (rc != -ENODEV) + error_reset(cs); } - kfree(ucs->rcvbuf); - ucs->rcvbuf = NULL; - ucs->rcvbuf_size = 0; - error_reset(cs); } /* read_ctrl_callback @@ -470,18 +474,11 @@ static void read_ctrl_callback(struct urb *urb) struct cardstate *cs = inbuf->cs; struct bas_cardstate *ucs = cs->hw.bas; int status = urb->status; - int have_data = 0; unsigned numbytes; int rc; update_basstate(ucs, 0, BS_ATRDPEND); wake_up(&ucs->waitqueue); - - if (!ucs->rcvbuf_size) { - dev_warn(cs->dev, "%s: no receive in progress\n", __func__); - return; - } - del_timer(&ucs->timer_cmd_in); switch (status) { @@ -495,19 +492,10 @@ static void read_ctrl_callback(struct urb *urb) numbytes = ucs->rcvbuf_size; } - /* copy received bytes to inbuf */ - have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes); - - if (unlikely(numbytes < ucs->rcvbuf_size)) { - /* incomplete - resubmit for remaining bytes */ - ucs->rcvbuf_size -= numbytes; - ucs->retry_cmd_in = 0; - rc = atread_submit(cs, BAS_TIMEOUT); - if (rc >= 0 || rc == -ENODEV) - /* resubmitted or disconnected */ - /* - bypass regular exit block */ - return; - error_reset(cs); + /* copy received bytes to inbuf, notify event layer */ + if (gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes)) { + gig_dbg(DEBUG_INTR, "%s-->BH", __func__); + gigaset_schedule_event(cs); } break; @@ -516,37 +504,32 @@ static void read_ctrl_callback(struct urb *urb) case -EINPROGRESS: /* pending */ case -ENODEV: /* device removed */ case -ESHUTDOWN: /* device shut down */ - /* no action necessary */ + /* no further action necessary */ gig_dbg(DEBUG_USBREQ, "%s: %s", __func__, get_usb_statmsg(status)); break; - default: /* severe trouble */ - dev_warn(cs->dev, "control read: %s\n", - get_usb_statmsg(status)); + default: /* other errors: retry */ if (ucs->retry_cmd_in++ < BAS_RETRY) { - dev_notice(cs->dev, "control read: retry %d\n", - ucs->retry_cmd_in); + gig_dbg(DEBUG_USBREQ, "%s: %s, retry %d", __func__, + get_usb_statmsg(status), ucs->retry_cmd_in); rc = atread_submit(cs, BAS_TIMEOUT); - if (rc >= 0 || rc == -ENODEV) - /* resubmitted or disconnected */ - /* - bypass regular exit block */ + if (rc >= 0) + /* successfully resubmitted, skip freeing */ return; - } else { - dev_err(cs->dev, - "control read: giving up after %d tries\n", - ucs->retry_cmd_in); + if (rc == -ENODEV) + /* disconnect, no further action necessary */ + break; } + dev_err(cs->dev, "control read: %s, giving up after %d tries\n", + get_usb_statmsg(status), ucs->retry_cmd_in); error_reset(cs); } + /* read finished, free buffer */ kfree(ucs->rcvbuf); ucs->rcvbuf = NULL; ucs->rcvbuf_size = 0; - if (have_data) { - gig_dbg(DEBUG_INTR, "%s-->BH", __func__); - gigaset_schedule_event(cs); - } } /* atread_submit @@ -1616,13 +1599,13 @@ static int gigaset_init_bchannel(struct bc_state *bcs) ret = starturbs(bcs); if (ret < 0) { + spin_unlock_irqrestore(&cs->lock, flags); dev_err(cs->dev, "could not start isochronous I/O for channel B%d: %s\n", bcs->channel + 1, ret == -EFAULT ? "null URB" : get_usb_rcmsg(ret)); if (ret != -ENODEV) error_hangup(bcs); - spin_unlock_irqrestore(&cs->lock, flags); return ret; } @@ -1632,11 +1615,11 @@ static int gigaset_init_bchannel(struct bc_state *bcs) dev_err(cs->dev, "could not open channel B%d\n", bcs->channel + 1); stopurbs(bcs->hw.bas); - if (ret != -ENODEV) - error_hangup(bcs); } spin_unlock_irqrestore(&cs->lock, flags); + if (ret < 0 && ret != -ENODEV) + error_hangup(bcs); return ret; } diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c index 6fbe899..05b15ed 100644 --- a/drivers/isdn/gigaset/capi.c +++ b/drivers/isdn/gigaset/capi.c @@ -378,13 +378,13 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) ++bcs->trans_up; if (!ap) { - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); return; } /* don't send further B3 messages if disconnected */ if (bcs->apconnstate < APCONN_ACTIVE) { - gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack"); + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); return; } @@ -422,13 +422,14 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) bcs->trans_down++; if (!ap) { - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); + dev_kfree_skb_any(skb); return; } /* don't send further B3 messages if disconnected */ if (bcs->apconnstate < APCONN_ACTIVE) { - gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); dev_kfree_skb_any(skb); return; } @@ -747,7 +748,7 @@ void gigaset_isdn_connD(struct bc_state *bcs) ap = bcs->ap; if (!ap) { spin_unlock_irqrestore(&bcs->aplock, flags); - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } if (bcs->apconnstate == APCONN_NONE) { @@ -843,7 +844,7 @@ void gigaset_isdn_connB(struct bc_state *bcs) ap = bcs->ap; if (!ap) { spin_unlock_irqrestore(&bcs->aplock, flags); - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } if (!bcs->apconnstate) { @@ -901,13 +902,12 @@ void gigaset_isdn_connB(struct bc_state *bcs) */ void gigaset_isdn_hupB(struct bc_state *bcs) { - struct cardstate *cs = bcs->cs; struct gigaset_capi_appl *ap = bcs->ap; /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */ if (!ap) { - dev_err(cs->dev, "%s: no application\n", __func__); + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); return; } @@ -1044,6 +1044,7 @@ static inline void remove_appl_from_channel(struct bc_state *bcs, do { if (bcap->bcnext == ap) { bcap->bcnext = bcap->bcnext->bcnext; + spin_unlock_irqrestore(&bcs->aplock, flags); return; } bcap = bcap->bcnext; diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c index 2dfd346..f39ccdf 100644 --- a/drivers/isdn/gigaset/isocdata.c +++ b/drivers/isdn/gigaset/isocdata.c @@ -842,13 +842,14 @@ static inline void trans_receive(unsigned char *src, unsigned count, if (unlikely(bcs->ignore)) { bcs->ignore--; - hdlc_flush(bcs); return; } skb = bcs->rx_skb; - if (skb == NULL) + if (skb == NULL) { skb = gigaset_new_rx_skb(bcs); - bcs->hw.bas->goodbytes += skb->len; + if (skb == NULL) + return; + } dobytes = bcs->rx_bufsize - skb->len; while (count > 0) { dst = skb_put(skb, count < dobytes ? count : dobytes); @@ -860,6 +861,7 @@ static inline void trans_receive(unsigned char *src, unsigned count, if (dobytes == 0) { dump_bytes(DEBUG_STREAM_DUMP, "rcv data", skb->data, skb->len); + bcs->hw.bas->goodbytes += skb->len; gigaset_skb_rcvd(bcs, skb); skb = gigaset_new_rx_skb(bcs); if (skb == NULL) diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 544cf4b..0a347a8 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -801,6 +801,16 @@ static void closecard(int cardnr) ll_unload(csta); } +static irqreturn_t card_irq(int intno, void *dev_id) +{ + struct IsdnCardState *cs = dev_id; + irqreturn_t ret = cs->irq_func(intno, cs); + + if (ret == IRQ_HANDLED) + cs->irq_cnt++; + return ret; +} + static int init_card(struct IsdnCardState *cs) { int irq_cnt, cnt = 3, ret; @@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs) ret = cs->cardmsg(cs, CARD_INIT, NULL); return(ret); } - irq_cnt = kstat_irqs(cs->irq); + irq_cnt = cs->irq_cnt = 0; printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], cs->irq, irq_cnt); - if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { + if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", cs->irq); return 1; @@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs) /* Timeout 10ms */ msleep(10); printk(KERN_INFO "%s: IRQ %d count %d\n", - CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); - if (kstat_irqs(cs->irq) == irq_cnt) { + CardType[cs->typ], cs->irq, cs->irq_cnt); + if (cs->irq_cnt == irq_cnt) { printk(KERN_WARNING "%s: IRQ(%d) getting no interrupts during init %d\n", CardType[cs->typ], cs->irq, 4 - cnt); diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 832a878..32ab392 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h @@ -959,6 +959,7 @@ struct IsdnCardState { u_long event; struct work_struct tqueue; struct timer_list dbusytimer; + unsigned int irq_cnt; #ifdef ERROR_STATISTIC int err_crc; int err_tx; diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index fc8454d..51dc60d 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c @@ -2636,12 +2636,6 @@ isdn_tty_modem_result(int code, modem_info * info) if ((info->flags & ISDN_ASYNC_CLOSING) || (!info->tty)) { return; } -#ifdef CONFIG_ISDN_AUDIO - if ( !info->vonline ) - tty_ldisc_flush(info->tty); -#else - tty_ldisc_flush(info->tty); -#endif if ((info->flags & ISDN_ASYNC_CHECK_CD) && (!((info->flags & ISDN_ASYNC_CALLOUT_ACTIVE) && (info->flags & ISDN_ASYNC_CALLOUT_NOHUP)))) { diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c index 1081091..2655e3a 100644 --- a/drivers/isdn/sc/ioctl.c +++ b/drivers/isdn/sc/ioctl.c @@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data) pr_debug("%s: SCIOGETSPID: ioctl received\n", sc_adapter[card]->devicename); - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); + spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL); if (!spid) { kfree(rcvmsg); return -ENOMEM; @@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data) kfree(rcvmsg); return status; } - strcpy(spid, rcvmsg->msg_data.byte_array); + strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE); /* * Package the switch type and send to user space @@ -272,12 +272,12 @@ int sc_ioctl(int card, scs_ioctl *data) return status; } - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); + dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL); if (!dn) { kfree(rcvmsg); return -ENOMEM; } - strcpy(dn, rcvmsg->msg_data.byte_array); + strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE); kfree(rcvmsg); /* @@ -348,7 +348,7 @@ int sc_ioctl(int card, scs_ioctl *data) pr_debug("%s: SCIOSTAT: ioctl received\n", sc_adapter[card]->devicename); - bi = kmalloc (sizeof(boardInfo), GFP_KERNEL); + bi = kzalloc(sizeof(boardInfo), GFP_KERNEL); if (!bi) { kfree(rcvmsg); return -ENOMEM; diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index a688293..614ebeb 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") } }, + {} }; /* diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 2b7907b..0bdb201 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, /* Validate the chunk size against the device block size */ if (chunk_size % - (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { + (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) || + chunk_size % + (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) { *error = "Chunk size is not a multiple of device blocksize"; return -EINVAL; } diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index e8dfa06..0b25362 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -126,8 +126,9 @@ struct dm_exception_store { }; /* - * Obtain the cow device used by a given snapshot. + * Obtain the origin or cow device used by a given snapshot. */ +struct dm_dev *dm_snap_origin(struct dm_snapshot *snap); struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); /* diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index d7500e1..bb6bdc8 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -249,40 +249,50 @@ static void __hash_remove(struct hash_cell *hc) static void dm_hash_remove_all(int keep_open_devices) { - int i, dev_skipped, dev_removed; + int i, dev_skipped; struct hash_cell *hc; - struct list_head *tmp, *n; + struct mapped_device *md; + +retry: + dev_skipped = 0; down_write(&_hash_lock); -retry: - dev_skipped = dev_removed = 0; for (i = 0; i < NUM_BUCKETS; i++) { - list_for_each_safe (tmp, n, _name_buckets + i) { - hc = list_entry(tmp, struct hash_cell, name_list); + list_for_each_entry(hc, _name_buckets + i, name_list) { + md = hc->md; + dm_get(md); - if (keep_open_devices && - dm_lock_for_deletion(hc->md)) { + if (keep_open_devices && dm_lock_for_deletion(md)) { + dm_put(md); dev_skipped++; continue; } + __hash_remove(hc); - dev_removed = 1; - } - } - /* - * Some mapped devices may be using other mapped devices, so if any - * still exist, repeat until we make no further progress. - */ - if (dev_skipped) { - if (dev_removed) - goto retry; + up_write(&_hash_lock); - DMWARN("remove_all left %d open device(s)", dev_skipped); + dm_put(md); + if (likely(keep_open_devices)) + dm_destroy(md); + else + dm_destroy_immediate(md); + + /* + * Some mapped devices may be using other mapped + * devices, so repeat until we make no further + * progress. If a new mapped device is created + * here it will also get removed. + */ + goto retry; + } } up_write(&_hash_lock); + + if (dev_skipped) + DMWARN("remove_all left %d open device(s)", dev_skipped); } static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, @@ -640,6 +650,7 @@ static int dev_create(struct dm_ioctl *param, size_t param_size) r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); if (r) { dm_put(md); + dm_destroy(md); return r; } @@ -742,6 +753,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) param->flags |= DM_UEVENT_GENERATED_FLAG; dm_put(md); + dm_destroy(md); return 0; } diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 826bce7..b294ea6 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -33,7 +33,6 @@ struct pgpath { unsigned fail_count; /* Cumulative failure count */ struct dm_path path; - struct work_struct deactivate_path; struct work_struct activate_path; }; @@ -116,7 +115,6 @@ static struct workqueue_struct *kmultipathd, *kmpath_handlerd; static void process_queued_ios(struct work_struct *work); static void trigger_event(struct work_struct *work); static void activate_path(struct work_struct *work); -static void deactivate_path(struct work_struct *work); /*----------------------------------------------- @@ -129,7 +127,6 @@ static struct pgpath *alloc_pgpath(void) if (pgpath) { pgpath->is_active = 1; - INIT_WORK(&pgpath->deactivate_path, deactivate_path); INIT_WORK(&pgpath->activate_path, activate_path); } @@ -141,14 +138,6 @@ static void free_pgpath(struct pgpath *pgpath) kfree(pgpath); } -static void deactivate_path(struct work_struct *work) -{ - struct pgpath *pgpath = - container_of(work, struct pgpath, deactivate_path); - - blk_abort_queue(pgpath->path.dev->bdev->bd_disk->queue); -} - static struct priority_group *alloc_priority_group(void) { struct priority_group *pg; @@ -993,7 +982,6 @@ static int fail_path(struct pgpath *pgpath) pgpath->path.dev->name, m->nr_valid_paths); schedule_work(&m->trigger_event); - queue_work(kmultipathd, &pgpath->deactivate_path); out: spin_unlock_irqrestore(&m->lock, flags); diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5485377..a1f2ab5 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -148,6 +148,12 @@ struct dm_snapshot { #define RUNNING_MERGE 0 #define SHUTDOWN_MERGE 1 +struct dm_dev *dm_snap_origin(struct dm_snapshot *s) +{ + return s->origin; +} +EXPORT_SYMBOL(dm_snap_origin); + struct dm_dev *dm_snap_cow(struct dm_snapshot *s) { return s->cow; @@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) origin_mode = FMODE_WRITE; } - origin_path = argv[0]; - argv++; - argc--; - s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) { ti->error = "Cannot allocate snapshot context private " @@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } + origin_path = argv[0]; + argv++; + argc--; + + r = dm_get_device(ti, origin_path, origin_mode, &s->origin); + if (r) { + ti->error = "Cannot get origin device"; + goto bad_origin; + } + cow_path = argv[0]; argv++; argc--; @@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) argv += args_used; argc -= args_used; - r = dm_get_device(ti, origin_path, origin_mode, &s->origin); - if (r) { - ti->error = "Cannot get origin device"; - goto bad_origin; - } - s->ti = ti; s->valid = 1; s->active = 0; @@ -1212,15 +1218,15 @@ bad_kcopyd: dm_exception_table_exit(&s->complete, exception_cache); bad_hash_tables: - dm_put_device(ti, s->origin); - -bad_origin: dm_exception_store_destroy(s->store); bad_store: dm_put_device(ti, s->cow); bad_cow: + dm_put_device(ti, s->origin); + +bad_origin: kfree(s); bad: @@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti) mempool_destroy(s->pending_pool); - dm_put_device(ti, s->origin); - dm_exception_store_destroy(s->store); dm_put_device(ti, s->cow); + dm_put_device(ti, s->origin); + kfree(s); } @@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_snapshot *snap = ti->private; + int r; + + r = fn(ti, snap->origin, 0, ti->len, data); - return fn(ti, snap->origin, 0, ti->len, data); + if (!r) + r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); + + return r; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 9924ea2..4a83321 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1081,11 +1081,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, */ q->limits = *limits; - if (limits->no_cluster) - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); - else - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); - dm_table_set_integrity(t); /* diff --git a/drivers/md/dm.c b/drivers/md/dm.c index d21e128..96f01d25 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -2039,13 +2040,14 @@ static void event_callback(void *context) wake_up(&md->eventq); } +/* + * Protected by md->suspend_lock obtained by dm_swap_table(). + */ static void __set_size(struct mapped_device *md, sector_t size) { set_capacity(md->disk, size); - mutex_lock(&md->bdev->bd_inode->i_mutex); i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); - mutex_unlock(&md->bdev->bd_inode->i_mutex); } /* @@ -2141,6 +2143,7 @@ static struct mapped_device *dm_find_md(dev_t dev) md = idr_find(&_minor_idr, minor); if (md && (md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || + dm_deleting_md(md) || test_bit(DMF_FREEING, &md->flags))) { md = NULL; goto out; @@ -2175,6 +2178,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr) void dm_get(struct mapped_device *md) { atomic_inc(&md->holders); + BUG_ON(test_bit(DMF_FREEING, &md->flags)); } const char *dm_device_name(struct mapped_device *md) @@ -2183,27 +2187,55 @@ const char *dm_device_name(struct mapped_device *md) } EXPORT_SYMBOL_GPL(dm_device_name); -void dm_put(struct mapped_device *md) +static void __dm_destroy(struct mapped_device *md, bool wait) { struct dm_table *map; - BUG_ON(test_bit(DMF_FREEING, &md->flags)); + might_sleep(); - if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { - map = dm_get_live_table(md); - idr_replace(&_minor_idr, MINOR_ALLOCED, - MINOR(disk_devt(dm_disk(md)))); - set_bit(DMF_FREEING, &md->flags); - spin_unlock(&_minor_lock); - if (!dm_suspended_md(md)) { - dm_table_presuspend_targets(map); - dm_table_postsuspend_targets(map); - } - dm_sysfs_exit(md); - dm_table_put(map); - dm_table_destroy(__unbind(md)); - free_dev(md); + spin_lock(&_minor_lock); + map = dm_get_live_table(md); + idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); + set_bit(DMF_FREEING, &md->flags); + spin_unlock(&_minor_lock); + + if (!dm_suspended_md(md)) { + dm_table_presuspend_targets(map); + dm_table_postsuspend_targets(map); } + + /* + * Rare, but there may be I/O requests still going to complete, + * for example. Wait for all references to disappear. + * No one should increment the reference count of the mapped_device, + * after the mapped_device state becomes DMF_FREEING. + */ + if (wait) + while (atomic_read(&md->holders)) + msleep(1); + else if (atomic_read(&md->holders)) + DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", + dm_device_name(md), atomic_read(&md->holders)); + + dm_sysfs_exit(md); + dm_table_put(map); + dm_table_destroy(__unbind(md)); + free_dev(md); +} + +void dm_destroy(struct mapped_device *md) +{ + __dm_destroy(md, true); +} + +void dm_destroy_immediate(struct mapped_device *md) +{ + __dm_destroy(md, false); +} + +void dm_put(struct mapped_device *md) +{ + atomic_dec(&md->holders); } EXPORT_SYMBOL_GPL(dm_put); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index bad1724..8223671 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -122,6 +122,11 @@ void dm_linear_exit(void); int dm_stripe_init(void); void dm_stripe_exit(void); +/* + * mapped_device operations + */ +void dm_destroy(struct mapped_device *md); +void dm_destroy_immediate(struct mapped_device *md); int dm_open_count(struct mapped_device *md); int dm_lock_for_deletion(struct mapped_device *md); diff --git a/drivers/md/md.c b/drivers/md/md.c index cb20d0b..9ec74cc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -441,6 +441,9 @@ static mddev_t * mddev_find(dev_t unit) { mddev_t *mddev, *new = NULL; + if (unit && MAJOR(unit) != MD_MAJOR) + unit &= ~((1<to_remove or my + * otherwise change the sysfs namespace will fail with + * -EBUSY if sysfs_active is still set. + * We set sysfs_active under reconfig_mutex and elsewhere + * test it under the same mutex to ensure its correct value + * is seen. */ struct attribute_group *to_remove = mddev->to_remove; mddev->to_remove = NULL; - mutex_lock(&mddev->open_mutex); + mddev->sysfs_active = 1; mutex_unlock(&mddev->reconfig_mutex); if (to_remove != &md_redundancy_group) @@ -550,7 +557,7 @@ static void mddev_unlock(mddev_t * mddev) sysfs_put(mddev->sysfs_action); mddev->sysfs_action = NULL; } - mutex_unlock(&mddev->open_mutex); + mddev->sysfs_active = 0; } else mutex_unlock(&mddev->reconfig_mutex); @@ -1274,7 +1281,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); - return num_sectors / 2; /* kB for sysfs */ + return num_sectors; } @@ -1640,7 +1647,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); - return num_sectors / 2; /* kB for sysfs */ + return num_sectors; } static struct super_type super_types[] = { @@ -2960,7 +2967,9 @@ level_store(mddev_t *mddev, const char *buf, size_t len) * - new personality will access other array. */ - if (mddev->sync_thread || mddev->reshape_position != MaxSector) + if (mddev->sync_thread || + mddev->reshape_position != MaxSector || + mddev->sysfs_active) return -EBUSY; if (!mddev->pers->quiesce) { @@ -4242,9 +4251,6 @@ static int md_alloc(dev_t dev, char *name) goto abort; mddev->queue->queuedata = mddev; - /* Can be unlocked because the queue is new: no concurrency */ - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); - blk_queue_make_request(mddev->queue, md_make_request); disk = alloc_disk(1 << shift); @@ -4344,13 +4350,9 @@ static int md_run(mddev_t *mddev) if (mddev->pers) return -EBUSY; - - /* These two calls synchronise us with the - * sysfs_remove_group calls in mddev_unlock, - * so they must have completed. - */ - mutex_lock(&mddev->open_mutex); - mutex_unlock(&mddev->open_mutex); + /* Cannot run until previous stop completes properly */ + if (mddev->sysfs_active) + return -EBUSY; /* * Analyze all RAID superblock(s) @@ -4548,6 +4550,7 @@ static int do_md_run(mddev_t *mddev) set_capacity(mddev->gendisk, mddev->array_sectors); revalidate_disk(mddev->gendisk); + mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); out: return err; @@ -4636,6 +4639,7 @@ static void md_clean(mddev_t *mddev) mddev->sync_speed_min = mddev->sync_speed_max = 0; mddev->recovery = 0; mddev->in_sync = 0; + mddev->changed = 0; mddev->degraded = 0; mddev->barriers_work = 0; mddev->safemode = 0; @@ -4711,12 +4715,13 @@ out: */ static int do_md_stop(mddev_t * mddev, int mode, int is_open) { - int err = 0; + int err = 0, revalidate = 0; struct gendisk *disk = mddev->gendisk; mdk_rdev_t *rdev; mutex_lock(&mddev->open_mutex); - if (atomic_read(&mddev->openers) > is_open) { + if (atomic_read(&mddev->openers) > is_open || + mddev->sysfs_active) { printk("md: %s still in use.\n",mdname(mddev)); err = -EBUSY; } else if (mddev->pers) { @@ -4740,7 +4745,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) } set_capacity(disk, 0); - revalidate_disk(disk); + revalidate = 1; + mddev->changed = 1; if (mddev->ro) mddev->ro = 0; @@ -4748,6 +4754,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) err = 0; } mutex_unlock(&mddev->open_mutex); + if (revalidate) + revalidate_disk(disk); if (err) return err; /* @@ -5104,17 +5112,21 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) PTR_ERR(rdev)); return PTR_ERR(rdev); } - /* set save_raid_disk if appropriate */ + /* set saved_raid_disk if appropriate */ if (!mddev->persistent) { if (info->state & (1<raid_disk < mddev->raid_disks) + info->raid_disk < mddev->raid_disks) { rdev->raid_disk = info->raid_disk; - else + set_bit(In_sync, &rdev->flags); + } else rdev->raid_disk = -1; } else super_types[mddev->major_version]. validate_super(mddev, rdev); - rdev->saved_raid_disk = rdev->raid_disk; + if (test_bit(In_sync, &rdev->flags)) + rdev->saved_raid_disk = rdev->raid_disk; + else + rdev->saved_raid_disk = -1; clear_bit(In_sync, &rdev->flags); /* just to be sure */ if (info->state & (1<openers); mutex_unlock(&mddev->open_mutex); - check_disk_size_change(mddev->gendisk, bdev); + check_disk_change(bdev); out: return err; } @@ -5936,6 +5948,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) return 0; } + +static int md_media_changed(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + return mddev->changed; +} + +static int md_revalidate(struct gendisk *disk) +{ + mddev_t *mddev = disk->private_data; + + mddev->changed = 0; + return 0; +} static const struct block_device_operations md_fops = { .owner = THIS_MODULE, @@ -5946,6 +5973,8 @@ static const struct block_device_operations md_fops = .compat_ioctl = md_compat_ioctl, #endif .getgeo = md_getgeo, + .media_changed = md_media_changed, + .revalidate_disk= md_revalidate, }; static int md_thread(void * arg) @@ -5981,9 +6010,8 @@ static int md_thread(void * arg) || kthread_should_stop(), thread->timeout); - clear_bit(THREAD_WAKEUP, &thread->flags); - - thread->run(thread->mddev); + if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags)) + thread->run(thread->mddev); } return 0; diff --git a/drivers/md/md.h b/drivers/md/md.h index 10597bf..2e2a3c2 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -125,6 +125,10 @@ struct mddev_s int suspended; atomic_t active_io; int ro; + int sysfs_active; /* set when sysfs deletes + * are happening, so run/ + * takeover/stop are not safe + */ struct gendisk *gendisk; @@ -246,6 +250,8 @@ struct mddev_s atomic_t active; /* general refcount */ atomic_t openers; /* number of active opens */ + int changed; /* True if we might need to + * reread partition info */ int degraded; /* whether md should consider * adding a spare */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a948da8..2e99c29 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1208,6 +1208,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && + !mddev->recovery_disabled && mddev->degraded < conf->raid_disks) { err = -EBUSY; goto abort; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 42e64e4..66fe487 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -825,11 +825,29 @@ static int make_request(mddev_t *mddev, struct bio * bio) */ bp = bio_split(bio, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); + + /* Each of these 'make_request' calls will call 'wait_barrier'. + * If the first succeeds but the second blocks due to the resync + * thread raising the barrier, we will deadlock because the + * IO to the underlying device will be queued in generic_make_request + * and will never complete, so will never reduce nr_pending. + * So increment nr_waiting here so no new raise_barriers will + * succeed, and so the second wait_barrier cannot block. + */ + spin_lock_irq(&conf->resync_lock); + conf->nr_waiting++; + spin_unlock_irq(&conf->resync_lock); + if (make_request(mddev, &bp->bio1)) generic_make_request(&bp->bio1); if (make_request(mddev, &bp->bio2)) generic_make_request(&bp->bio2); + spin_lock_irq(&conf->resync_lock); + conf->nr_waiting--; + wake_up(&conf->wait_barrier); + spin_unlock_irq(&conf->resync_lock); + bio_pair_release(bp); return 0; bad_map: @@ -2375,13 +2393,13 @@ static int run(mddev_t *mddev) return 0; out_free_conf: + md_unregister_thread(mddev->thread); if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); mddev->private = NULL; - md_unregister_thread(mddev->thread); out: return -EIO; } diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index 94a8577..78eb8cb 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie) * a keyup event might follow immediately after the keydown. */ spin_lock_irqsave(&ir->keylock, flags); - if (time_is_after_eq_jiffies(ir->keyup_jiffies)) + if (time_is_before_eq_jiffies(ir->keyup_jiffies)) ir_keyup(ir); spin_unlock_irqrestore(&ir->keylock, flags); } diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index 800800a..33af683 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -1240,6 +1240,57 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) return adap->fe == NULL ? -ENODEV : 0; } +/* STK7770P */ +static struct dib7000p_config dib7770p_dib7000p_config = { + .output_mpeg2_in_188_bytes = 1, + + .agc_config_count = 1, + .agc = &dib7070_agc_config, + .bw = &dib7070_bw_config_12_mhz, + .tuner_is_baseband = 1, + .spur_protect = 1, + + .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, + .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, + .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, + + .hostbus_diversity = 1, + .enable_current_mirror = 1, +}; + +static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) +{ + struct usb_device_descriptor *p = &adap->dev->udev->descriptor; + if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && + p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) + dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); + else + dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); + msleep(10); + dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); + dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); + dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); + dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); + + dib0700_ctrl_clock(adap->dev, 72, 1); + + msleep(10); + dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); + msleep(10); + dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); + + if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, + &dib7770p_dib7000p_config) != 0) { + err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", + __func__); + return -ENODEV; + } + + adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, + &dib7770p_dib7000p_config); + return adap->fe == NULL ? -ENODEV : 0; +} + /* DIB807x generic */ static struct dibx000_agc_config dib807x_agc_config[2] = { { @@ -2081,7 +2132,7 @@ struct usb_device_id dib0700_usb_id_table[] = { /* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, - { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) }, + { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) }, { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, /* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, @@ -2592,7 +2643,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, - .frontend_attach = stk7070p_frontend_attach, + .frontend_attach = stk7770p_frontend_attach, .tuner_attach = dib7770p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c index 2e28b97..73f59ab 100644 --- a/drivers/media/dvb/frontends/dib7000p.c +++ b/drivers/media/dvb/frontends/dib7000p.c @@ -260,6 +260,8 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad // dprintk( "908: %x, 909: %x\n", reg_908, reg_909); + reg_908 |= (state->cfg.enable_current_mirror & 1) << 7; + dib7000p_write_word(state, 908, reg_908); dib7000p_write_word(state, 909, reg_909); } diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h index 805dd13..04a7449 100644 --- a/drivers/media/dvb/frontends/dib7000p.h +++ b/drivers/media/dvb/frontends/dib7000p.h @@ -33,6 +33,9 @@ struct dib7000p_config { int (*agc_control) (struct dvb_frontend *, u8 before); u8 output_mode; + + u8 enable_current_mirror : 1; + }; #define DEFAULT_DIB7000P_I2C_ADDRESS 18 diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c index 4eba35a..c38dd67 100644 --- a/drivers/media/dvb/ttpci/av7110_ca.c +++ b/drivers/media/dvb/ttpci/av7110_ca.c @@ -277,7 +277,7 @@ static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg) { ca_slot_info_t *info=(ca_slot_info_t *)parg; - if (info->num > 1) + if (info->num < 0 || info->num > 1) return -EINVAL; av7110->ci_slot[info->num].num = info->num; av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ? diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c index 5bf4985..a91642c 100644 --- a/drivers/media/radio/radio-aimslab.c +++ b/drivers/media/radio/radio-aimslab.c @@ -31,7 +31,7 @@ #include /* Modules */ #include /* Initdata */ #include /* request_region */ -#include /* udelay */ +#include /* msleep */ #include /* kernel radio structs */ #include /* for KERNEL_VERSION MACRO */ #include /* outb, outb_p */ @@ -71,27 +71,17 @@ static struct rtrack rtrack_card; /* local things */ -static void sleep_delay(long n) -{ - /* Sleep nicely for 'n' uS */ - int d = n / msecs_to_jiffies(1000); - if (!d) - udelay(n); - else - msleep(jiffies_to_msecs(d)); -} - static void rt_decvol(struct rtrack *rt) { outb(0x58, rt->io); /* volume down + sigstr + on */ - sleep_delay(100000); + msleep(100); outb(0xd8, rt->io); /* volume steady + sigstr + on */ } static void rt_incvol(struct rtrack *rt) { outb(0x98, rt->io); /* volume up + sigstr + on */ - sleep_delay(100000); + msleep(100); outb(0xd8, rt->io); /* volume steady + sigstr + on */ } @@ -120,7 +110,7 @@ static int rt_setvol(struct rtrack *rt, int vol) if (vol == 0) { /* volume = 0 means mute the card */ outb(0x48, rt->io); /* volume down but still "on" */ - sleep_delay(2000000); /* make sure it's totally down */ + msleep(2000); /* make sure it's totally down */ outb(0xd0, rt->io); /* volume steady, off */ rt->curvol = 0; /* track the volume state! */ mutex_unlock(&rt->lock); @@ -155,7 +145,7 @@ static void send_0_byte(struct rtrack *rt) outb_p(128+64+16+8+ 1, rt->io); /* on + wr-enable + data low */ outb_p(128+64+16+8+2+1, rt->io); /* clock */ } - sleep_delay(1000); + msleep(1); } static void send_1_byte(struct rtrack *rt) @@ -169,7 +159,7 @@ static void send_1_byte(struct rtrack *rt) outb_p(128+64+16+8+4+2+1, rt->io); /* clock */ } - sleep_delay(1000); + msleep(1); } static int rt_setfreq(struct rtrack *rt, unsigned long freq) @@ -427,7 +417,7 @@ static int __init rtrack_init(void) /* this ensures that the volume is all the way down */ outb(0x48, rt->io); /* volume down but still "on" */ - sleep_delay(2000000); /* make sure it's totally down */ + msleep(2000); /* make sure it's totally down */ outb(0xc0, rt->io); /* steady volume, mute card */ return 0; diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile index 755dd0c..6f2b573 100644 --- a/drivers/media/video/cx231xx/Makefile +++ b/drivers/media/video/cx231xx/Makefile @@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video EXTRA_CFLAGS += -Idrivers/media/common/tuners EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core EXTRA_CFLAGS += -Idrivers/media/dvb/frontends +EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 6bdc0ef..f2a4900 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c @@ -32,6 +32,7 @@ #include #include +#include "dvb-usb-ids.h" #include "xc5000.h" #include "cx231xx.h" @@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = { .driver_info = CX231XX_BOARD_CNXT_RDE_250}, {USB_DEVICE(0x0572, 0x58A1), .driver_info = CX231XX_BOARD_CNXT_RDU_250}, + {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff), + .driver_info = CX231XX_BOARD_UNKNOWN}, {}, }; @@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev) dev->board.name, dev->model); /* set the direction for GPIO pins */ - cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); - cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); - cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); + if (dev->board.tuner_gpio) { + cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); + cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); + cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); - /* request some modules if any required */ + /* request some modules if any required */ - /* reset the Tuner */ - cx231xx_gpio_set(dev, dev->board.tuner_gpio); + /* reset the Tuner */ + cx231xx_gpio_set(dev, dev->board.tuner_gpio); + } /* set the mode to Analog mode initially */ cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); diff --git a/drivers/media/video/cx23885/cx23885-core.c b/drivers/media/video/cx23885/cx23885-core.c index 0dde57e..fb67433 100644 --- a/drivers/media/video/cx23885/cx23885-core.c +++ b/drivers/media/video/cx23885/cx23885-core.c @@ -737,6 +737,7 @@ static void cx23885_dev_checkrevision(struct cx23885_dev *dev) case 0x0e: /* CX23887-15Z */ dev->hwrevision = 0xc0; + break; case 0x0f: /* CX23887-14Z */ dev->hwrevision = 0xb1; diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 3a4fd85..2fa5bb4 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -1605,11 +1605,11 @@ struct em28xx_board em28xx_boards[] = { .input = { { .type = EM28XX_VMUX_COMPOSITE1, .vmux = SAA7115_COMPOSITE0, - .amux = EM28XX_AMUX_VIDEO2, + .amux = EM28XX_AMUX_LINE_IN, }, { .type = EM28XX_VMUX_SVIDEO, .vmux = SAA7115_SVIDEO3, - .amux = EM28XX_AMUX_VIDEO2, + .amux = EM28XX_AMUX_LINE_IN, } }, }, [EM2860_BOARD_TERRATEC_AV350] = { diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index 678675b..59aba24 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c @@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev, usb_rcvintpipe(dev, ep->bEndpointAddress), buffer, buffer_len, int_irq, (void *)gspca_dev, interval); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; gspca_dev->int_urb = urb; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) { diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c index 644a7fd..74a005f 100644 --- a/drivers/media/video/gspca/sn9c20x.c +++ b/drivers/media/video/gspca/sn9c20x.c @@ -2368,8 +2368,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, (data[33] << 10); avg_lum >>= 9; atomic_set(&sd->avg_lum, avg_lum); - gspca_frame_add(gspca_dev, LAST_PACKET, - data, len); + gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); return; } if (gspca_dev->last_packet_type == LAST_PACKET) { diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 176c5b3..da2ae07 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c @@ -57,6 +57,7 @@ struct sd { u8 jpegqual; /* webcam quality */ u8 reg18; + u8 flags; s8 ag_cnt; #define AG_CNT_START 13 @@ -88,6 +89,9 @@ enum { u8 jpeg_hdr[JPEG_HDR_SZ]; }; +/* device flags */ +#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */ + /* V4L2 controls supported by the driver */ static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val); static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val); @@ -1774,7 +1778,8 @@ static int sd_config(struct gspca_dev *gspca_dev, struct cam *cam; sd->bridge = id->driver_info >> 16; - sd->sensor = id->driver_info; + sd->sensor = id->driver_info >> 8; + sd->flags = id->driver_info; cam = &gspca_dev->cam; if (sd->sensor == SENSOR_ADCM1700) { @@ -2488,8 +2493,7 @@ static int sd_start(struct gspca_dev *gspca_dev) reg1 = 0x44; reg17 = 0xa2; break; - default: -/* case SENSOR_SP80708: */ + case SENSOR_SP80708: init = sp80708_sensor_param1; if (mode) { /*?? reg1 = 0x04; * 320 clk 48Mhz */ @@ -2999,14 +3003,18 @@ static const struct sd_desc sd_desc = { /* -- module initialisation -- */ #define BS(bridge, sensor) \ .driver_info = (BRIDGE_ ## bridge << 16) \ - | SENSOR_ ## sensor + | (SENSOR_ ## sensor << 8) +#define BSF(bridge, sensor, flags) \ + .driver_info = (BRIDGE_ ## bridge << 16) \ + | (SENSOR_ ## sensor << 8) \ + | (flags) static const __devinitdata struct usb_device_id device_table[] = { #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)}, {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, #endif - {USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)}, - {USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)}, + {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)}, + {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)}, {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)}, diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c index c338f3f..9cccb74 100644 --- a/drivers/media/video/hdpvr/hdpvr-video.c +++ b/drivers/media/video/hdpvr/hdpvr-video.c @@ -157,6 +157,7 @@ int hdpvr_alloc_buffers(struct hdpvr_device *dev, uint count) mem, dev->bulk_in_size, hdpvr_read_bulk_callback, buf); + buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; buf->status = BUFSTAT_AVAILABLE; list_add_tail(&buf->buff_list, &dev->free_buff_list); } diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 07f6bb8..f55e67a 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -6631,6 +6631,18 @@ struct pci_device_id saa7134_pci_tbl[] = { .subdevice = 0x6655, .driver_data = SAA7134_BOARD_LEADTEK_WINFAST_DTV1000S, }, { + .vendor = PCI_VENDOR_ID_PHILIPS, + .device = PCI_DEVICE_ID_PHILIPS_SAA7133, + .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ + .subdevice = 0x7190, + .driver_data = SAA7134_BOARD_BEHOLD_H7, + }, { + .vendor = PCI_VENDOR_ID_PHILIPS, + .device = PCI_DEVICE_ID_PHILIPS_SAA7133, + .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ + .subdevice = 0x7090, + .driver_data = SAA7134_BOARD_BEHOLD_A7, + }, { /* --- boards without eeprom + subsystem ID --- */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, @@ -6668,18 +6680,6 @@ struct pci_device_id saa7134_pci_tbl[] = { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = SAA7134_BOARD_UNKNOWN, - }, { - .vendor = PCI_VENDOR_ID_PHILIPS, - .device = PCI_DEVICE_ID_PHILIPS_SAA7133, - .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ - .subdevice = 0x7190, - .driver_data = SAA7134_BOARD_BEHOLD_H7, - }, { - .vendor = PCI_VENDOR_ID_PHILIPS, - .device = PCI_DEVICE_ID_PHILIPS_SAA7133, - .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ - .subdevice = 0x7090, - .driver_data = SAA7134_BOARD_BEHOLD_A7, },{ /* --- end of list --- */ } diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c index 28e19da..5c9587a 100644 --- a/drivers/media/video/sn9c102/sn9c102_core.c +++ b/drivers/media/video/sn9c102/sn9c102_core.c @@ -1430,9 +1430,9 @@ static DEVICE_ATTR(i2c_reg, S_IRUGO | S_IWUSR, sn9c102_show_i2c_reg, sn9c102_store_i2c_reg); static DEVICE_ATTR(i2c_val, S_IRUGO | S_IWUSR, sn9c102_show_i2c_val, sn9c102_store_i2c_val); -static DEVICE_ATTR(green, S_IWUGO, NULL, sn9c102_store_green); -static DEVICE_ATTR(blue, S_IWUGO, NULL, sn9c102_store_blue); -static DEVICE_ATTR(red, S_IWUGO, NULL, sn9c102_store_red); +static DEVICE_ATTR(green, S_IWUSR, NULL, sn9c102_store_green); +static DEVICE_ATTR(blue, S_IWUSR, NULL, sn9c102_store_blue); +static DEVICE_ATTR(red, S_IWUSR, NULL, sn9c102_store_red); static DEVICE_ATTR(frame_header, S_IRUGO, sn9c102_show_frame_header, NULL); diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 838b56f..e24f913 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -1261,6 +1261,14 @@ static int uvc_scan_chain_entity(struct uvc_video_chain *chain, break; + case UVC_OTT_VENDOR_SPECIFIC: + case UVC_OTT_DISPLAY: + case UVC_OTT_MEDIA_TRANSPORT_OUTPUT: + if (uvc_trace_param & UVC_TRACE_PROBE) + printk(" OT %d", entity->id); + + break; + case UVC_TT_STREAMING: if (UVC_ENTITY_IS_ITERM(entity)) { if (uvc_trace_param & UVC_TRACE_PROBE) diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 53f3ef4..4b0870c 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -65,15 +65,19 @@ int uvc_query_ctrl(struct uvc_device *dev, __u8 query, __u8 unit, static void uvc_fixup_video_ctrl(struct uvc_streaming *stream, struct uvc_streaming_control *ctrl) { - struct uvc_format *format; + struct uvc_format *format = NULL; struct uvc_frame *frame = NULL; unsigned int i; - if (ctrl->bFormatIndex <= 0 || - ctrl->bFormatIndex > stream->nformats) - return; + for (i = 0; i < stream->nformats; ++i) { + if (stream->format[i].index == ctrl->bFormatIndex) { + format = &stream->format[i]; + break; + } + } - format = &stream->format[ctrl->bFormatIndex - 1]; + if (format == NULL) + return; for (i = 0; i < format->nframes; ++i) { if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) { diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index 9004a5f..85b175d 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c @@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u struct video_code32 { char loadwhat[16]; /* name or tag of file being passed */ compat_int_t datasize; - unsigned char *data; + compat_uptr_t data; }; -static int get_microcode32(struct video_code *kp, struct video_code32 __user *up) +static struct video_code __user *get_microcode32(struct video_code32 *kp) { - if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) || - copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) || - get_user(kp->datasize, &up->datasize) || - copy_from_user(kp->data, up->data, up->datasize)) - return -EFAULT; - return 0; + struct video_code __user *up; + + up = compat_alloc_user_space(sizeof(*up)); + + /* + * NOTE! We don't actually care if these fail. If the + * user address is invalid, the native ioctl will do + * the error handling for us + */ + (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat)); + (void) put_user(kp->datasize, &up->datasize); + (void) put_user(compat_ptr(kp->data), &up->data); + return up; } #define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32) @@ -744,7 +751,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar struct video_tuner vt; struct video_buffer vb; struct video_window vw; - struct video_code vc; + struct video_code32 vc; struct video_audio va; #endif struct v4l2_format v2f; @@ -823,8 +830,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar break; case VIDIOCSMICROCODE: - err = get_microcode32(&karg.vc, up); - compatible_arg = 0; + /* Copy the 32-bit "video_code32" to kernel space */ + if (copy_from_user(&karg.vc, up, sizeof(karg.vc))) + return -EFAULT; + /* Convert the 32-bit version to a 64-bit version in user space */ + up = get_microcode32(&karg.vc); break; case VIDIOCSFREQ: diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 8327e24..300ec15 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -1040,6 +1040,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card) snprintf(s_attr->name, sizeof(s_attr->name), "attr_x%02x", attr->entries[cnt].id); + sysfs_attr_init(&s_attr->dev_attr.attr); s_attr->dev_attr.attr.name = s_attr->name; s_attr->dev_attr.attr.mode = S_IRUGO; s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id); @@ -1330,13 +1331,14 @@ static void mspro_block_remove(struct memstick_dev *card) struct mspro_block_data *msb = memstick_get_drvdata(card); unsigned long flags; - del_gendisk(msb->disk); - dev_dbg(&card->dev, "mspro block remove\n"); spin_lock_irqsave(&msb->q_lock, flags); msb->eject = 1; blk_start_queue(msb->queue); spin_unlock_irqrestore(&msb->q_lock, flags); + del_gendisk(msb->disk); + dev_dbg(&card->dev, "mspro block remove\n"); + blk_cleanup_queue(msb->queue); msb->queue = NULL; diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index f06b291..4dbe546 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -588,6 +588,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) } static int +mptctl_release(struct inode *inode, struct file *filep) +{ + fasync_helper(-1, filep, 0, &async_queue); + return 0; +} + +static int mptctl_fasync(int fd, struct file *filep, int mode) { MPT_ADAPTER *ioc; @@ -2796,6 +2803,7 @@ static const struct file_operations mptctl_fops = { .llseek = no_llseek, .fasync = mptctl_fasync, .unlocked_ioctl = mptctl_ioctl, + .release = mptctl_release, #ifdef CONFIG_COMPAT .compat_ioctl = compat_mpctl_ioctl, #endif diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 5c53624..ab783a3 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -1848,8 +1848,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) } out: - printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", - ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); + printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", + ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, + SCpnt, SCpnt->serial_number); return retval; } @@ -1886,7 +1887,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { - retval = SUCCESS; + retval = 0; goto out; } @@ -2459,6 +2460,8 @@ mptscsih_slave_configure(struct scsi_device *sdev) ioc->name,sdev->tagged_supported, sdev->simple_tags, sdev->ordered_tags)); + blk_queue_dma_alignment (sdev->request_queue, 512 - 1); + return 0; } diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 66379b4..c55e0a3 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -611,7 +611,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) ab3100_get_priv.ab3100 = ab3100; ab3100_get_priv.mode = false; ab3100_get_reg_file = debugfs_create_file("get_reg", - S_IWUGO, ab3100_dir, &ab3100_get_priv, + S_IWUSR, ab3100_dir, &ab3100_get_priv, &ab3100_get_set_reg_fops); if (!ab3100_get_reg_file) { err = -ENOMEM; @@ -621,7 +621,7 @@ static void ab3100_setup_debugfs(struct ab3100 *ab3100) ab3100_set_priv.ab3100 = ab3100; ab3100_set_priv.mode = true; ab3100_set_reg_file = debugfs_create_file("set_reg", - S_IWUGO, ab3100_dir, &ab3100_set_priv, + S_IWUSR, ab3100_dir, &ab3100_set_priv, &ab3100_get_set_reg_fops); if (!ab3100_set_reg_file) { err = -ENOMEM; diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 000cb41..92b85e2 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c @@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) idev->close = ucb1x00_ts_close; __set_bit(EV_ABS, idev->evbit); - __set_bit(ABS_X, idev->absbit); - __set_bit(ABS_Y, idev->absbit); - __set_bit(ABS_PRESSURE, idev->absbit); input_set_drvdata(idev, ts); + ucb1x00_adc_enable(ts->ucb); + ts->x_res = ucb1x00_ts_read_xres(ts); + ts->y_res = ucb1x00_ts_read_yres(ts); + ucb1x00_adc_disable(ts->ucb); + + input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); + input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); + input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); + err = input_register_device(idev); if (err) goto fail; diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 1a968f3..0f87572 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -1463,7 +1463,11 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret); goto err; } - if (ret != 0x6204) { + switch (ret) { + case 0x6204: + case 0x6246: + break; + default: dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret); ret = -EINVAL; goto err; @@ -1604,7 +1608,7 @@ static int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8320: ret = mfd_add_devices(wm831x->dev, -1, wm8320_devs, ARRAY_SIZE(wm8320_devs), - NULL, 0); + NULL, wm831x->irq_base); break; default: diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 7dabe4d..294183b 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type) irq = irq - wm831x->irq_base; - if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) - return -EINVAL; + if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { + /* Ignore internal-only IRQs */ + if (irq >= 0 && irq < WM831X_NUM_IRQS) + return 0; + else + return -EINVAL; + } switch (type) { case IRQ_TYPE_EDGE_BOTH: diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c index b8c6df9..6cfcb63 100644 --- a/drivers/misc/ad525x_dpot-spi.c +++ b/drivers/misc/ad525x_dpot-spi.c @@ -53,13 +53,13 @@ static int write8(void *client, u8 val) static int write16(void *client, u8 reg, u8 val) { u8 data[2] = {reg, val}; - return spi_write(client, data, 1); + return spi_write(client, data, 2); } static int write24(void *client, u8 reg, u16 val) { u8 data[3] = {reg, val >> 8, val}; - return spi_write(client, data, 1); + return spi_write(client, data, 3); } static int read8(void *client) diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index 48c84a5..00e5fcac8 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -285,8 +285,11 @@ enclosure_component_register(struct enclosure_device *edev, cdev->groups = enclosure_groups; err = device_register(cdev); - if (err) - ERR_PTR(err); + if (err) { + ecomp->number = -1; + put_device(cdev); + return ERR_PTR(err); + } return ecomp; } diff --git a/drivers/misc/ep93xx_pwm.c b/drivers/misc/ep93xx_pwm.c index 46b3439..16d7179 100644 --- a/drivers/misc/ep93xx_pwm.c +++ b/drivers/misc/ep93xx_pwm.c @@ -249,11 +249,11 @@ static ssize_t ep93xx_pwm_set_invert(struct device *dev, static DEVICE_ATTR(min_freq, S_IRUGO, ep93xx_pwm_get_min_freq, NULL); static DEVICE_ATTR(max_freq, S_IRUGO, ep93xx_pwm_get_max_freq, NULL); -static DEVICE_ATTR(freq, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(freq, S_IWUSR | S_IRUGO, ep93xx_pwm_get_freq, ep93xx_pwm_set_freq); -static DEVICE_ATTR(duty_percent, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(duty_percent, S_IWUSR | S_IRUGO, ep93xx_pwm_get_duty_percent, ep93xx_pwm_set_duty_percent); -static DEVICE_ATTR(invert, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(invert, S_IWUSR | S_IRUGO, ep93xx_pwm_get_invert, ep93xx_pwm_set_invert); static struct attribute *ep93xx_pwm_attrs[] = { diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index d551f09..6956f7e 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -439,18 +439,23 @@ xpc_discovery(void) * nodes that can comprise an access protection grouping. The access * protection is in regards to memory, IOI and IPI. */ - max_regions = 64; region_size = xp_region_size; - switch (region_size) { - case 128: - max_regions *= 2; - case 64: - max_regions *= 2; - case 32: - max_regions *= 2; - region_size = 16; - DBUG_ON(!is_shub2()); + if (is_uv()) + max_regions = 256; + else { + max_regions = 64; + + switch (region_size) { + case 128: + max_regions *= 2; + case 64: + max_regions *= 2; + case 32: + max_regions *= 2; + region_size = 16; + DBUG_ON(!is_shub2()); + } } for (region = 0; region < max_regions; region++) { diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 1f59ee2..17bbacb 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -417,6 +417,7 @@ xpc_process_activate_IRQ_rcvd_uv(void) static void xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, struct xpc_activate_mq_msghdr_uv *msg_hdr, + int part_setup, int *wakeup_hb_checker) { unsigned long irq_flags; @@ -481,6 +482,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closerequest_uv, hdr); @@ -497,6 +501,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closereply_uv, hdr); @@ -511,6 +518,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openrequest_uv, hdr); @@ -528,6 +538,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openreply_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; @@ -545,6 +558,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; + if (!part_setup) + break; + msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); @@ -621,6 +637,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id) part_referenced = xpc_part_ref(part); xpc_handle_activate_mq_msg_uv(part, msg_hdr, + part_referenced, &wakeup_hb_checker); if (part_referenced) xpc_part_deref(part); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 569e94d..c9a8766 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1057,6 +1057,17 @@ void mmc_rescan(struct work_struct *work) container_of(work, struct mmc_host, detect.work); u32 ocr; int err; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (host->rescan_disable) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + spin_unlock_irqrestore(&host->lock, flags); + mmc_bus_get(host); @@ -1149,7 +1160,7 @@ void mmc_stop_host(struct mmc_host *host) if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); - cancel_delayed_work(&host->detect); + cancel_delayed_work_sync(&host->detect); mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ @@ -1310,28 +1321,62 @@ int mmc_resume_host(struct mmc_host *host) printk(KERN_WARNING "%s: error %d during resume " "(card was removed?)\n", mmc_hostname(host), err); - if (host->bus_ops->remove) - host->bus_ops->remove(host); - mmc_claim_host(host); - mmc_detach_bus(host); - mmc_release_host(host); - /* no need to bother upper layers */ err = 0; } } mmc_bus_put(host); - /* - * We add a slight delay here so that resume can progress - * in parallel. - */ - mmc_detect_change(host, 1); - return err; } - EXPORT_SYMBOL(mmc_resume_host); +/* Do the card removal on suspend if card is assumed removeable + * Do that in pm notifier while userspace isn't yet frozen, so we will be able + to sync the card. +*/ +int mmc_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) +{ + struct mmc_host *host = container_of( + notify_block, struct mmc_host, pm_notify); + unsigned long flags; + + + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + + spin_lock_irqsave(&host->lock, flags); + host->rescan_disable = 1; + spin_unlock_irqrestore(&host->lock, flags); + cancel_delayed_work_sync(&host->detect); + + if (!host->bus_ops || host->bus_ops->suspend) + break; + + mmc_claim_host(host); + + if (host->bus_ops->remove) + host->bus_ops->remove(host); + + mmc_detach_bus(host); + mmc_release_host(host); + host->pm_flags = 0; + break; + + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + + spin_lock_irqsave(&host->lock, flags); + host->rescan_disable = 0; + spin_unlock_irqrestore(&host->lock, flags); + mmc_detect_change(host, 0); + + } + + return 0; +} #endif static int __init mmc_init(void) diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 4735390..d80cfdc 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -17,6 +17,7 @@ #include #include #include +#include #include @@ -85,6 +86,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); +#ifdef CONFIG_PM + host->pm_notify.notifier_call = mmc_pm_notify; +#endif /* * By default, hosts do not support SGIO or large requests. @@ -133,6 +137,7 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); + register_pm_notifier(&host->pm_notify); return 0; } @@ -149,6 +154,7 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { + unregister_pm_notifier(&host->pm_notify); mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index b9dee28..ac3f2c0 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -309,6 +309,14 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, if (err) goto remove; + /* + * Update oldcard with the new RCA received from the SDIO + * device -- we're doing this so that it's updated in the + * "card" struct when oldcard overwrites that later. + */ + if (oldcard) + oldcard->rca = card->rca; + mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); } diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index 5f3a599..e29a5fb 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -68,6 +68,7 @@ #include #include +#include #include #include @@ -492,10 +493,14 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command else if (data->flags & MMC_DATA_WRITE) cmdr |= AT91_MCI_TRCMD_START; - if (data->flags & MMC_DATA_STREAM) - cmdr |= AT91_MCI_TRTYP_STREAM; - if (data->blocks > 1) - cmdr |= AT91_MCI_TRTYP_MULTIPLE; + if (cmd->opcode == SD_IO_RW_EXTENDED) { + cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK; + } else { + if (data->flags & MMC_DATA_STREAM) + cmdr |= AT91_MCI_TRTYP_STREAM; + if (data->blocks > 1) + cmdr |= AT91_MCI_TRTYP_MULTIPLE; + } } else { block_length = 0; diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 95ef864..3a569bf 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -26,6 +26,7 @@ #include #include +#include #include #include @@ -532,12 +533,17 @@ static u32 atmci_prepare_command(struct mmc_host *mmc, data = cmd->data; if (data) { cmdr |= MCI_CMDR_START_XFER; - if (data->flags & MMC_DATA_STREAM) - cmdr |= MCI_CMDR_STREAM; - else if (data->blocks > 1) - cmdr |= MCI_CMDR_MULTI_BLOCK; - else - cmdr |= MCI_CMDR_BLOCK; + + if (cmd->opcode == SD_IO_RW_EXTENDED) { + cmdr |= MCI_CMDR_SDIO_BLOCK; + } else { + if (data->flags & MMC_DATA_STREAM) + cmdr |= MCI_CMDR_STREAM; + else if (data->blocks > 1) + cmdr |= MCI_CMDR_MULTI_BLOCK; + else + cmdr |= MCI_CMDR_BLOCK; + } if (data->flags & MMC_DATA_READ) cmdr |= MCI_CMDR_TRDIR_READ; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index ad30f07..76c0ec3 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -372,8 +372,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) sdhci_remove_host(host, 1); for (ptr = 0; ptr < 3; ptr++) { - clk_disable(sc->clk_bus[ptr]); - clk_put(sc->clk_bus[ptr]); + if (sc->clk_bus[ptr]) { + clk_disable(sc->clk_bus[ptr]); + clk_put(sc->clk_bus[ptr]); + } } clk_disable(sc->clk_io); clk_put(sc->clk_io); diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index ee7d0a5..69d98e3 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -164,6 +164,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) { struct mmc_data *data = host->data; + void *sg_virt; unsigned short *buf; unsigned int count; unsigned long flags; @@ -173,8 +174,8 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) return; } - buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) + - host->sg_off); + sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); + buf = (unsigned short *)(sg_virt + host->sg_off); count = host->sg_ptr->length - host->sg_off; if (count > data->blksz) @@ -191,7 +192,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) host->sg_off += count; - tmio_mmc_kunmap_atomic(host, &flags); + tmio_mmc_kunmap_atomic(sg_virt, &flags); if (host->sg_off == host->sg_ptr->length) tmio_mmc_next_sg(host); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 64f7d5d..0fedc78 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -82,10 +82,7 @@ #define ack_mmc_irqs(host, i) \ do { \ - u32 mask;\ - mask = sd_ctrl_read32((host), CTL_STATUS); \ - mask &= ~((i) & TMIO_MASK_IRQ); \ - sd_ctrl_write32((host), CTL_STATUS, mask); \ + sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ } while (0) @@ -177,19 +174,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) return --host->sg_len; } -static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host, +static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) { - struct scatterlist *sg = host->sg_ptr; - local_irq_save(*flags); return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; } -static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host, +static inline void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags) { - kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ); + kunmap_atomic(virt, KM_BIO_SRC_IRQ); local_irq_restore(*flags); } diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 62f3ea9..3364061 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -717,7 +717,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, chip = &newcfi->chips[0]; for (i = 0; i < cfi->numchips; i++) { shared[i].writing = shared[i].erasing = NULL; - spin_lock_init(&shared[i].lock); + mutex_init(&shared[i].lock); for (j = 0; j < numparts; j++) { *chip = cfi->chips[i]; chip->start += j << partshift; @@ -886,7 +886,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr */ struct flchip_shared *shared = chip->priv; struct flchip *contender; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* @@ -899,7 +899,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr * get_chip returns success we're clear to go ahead. */ ret = mutex_trylock(&contender->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); if (!ret) goto retry; mutex_unlock(&chip->mutex); @@ -914,7 +914,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr mutex_unlock(&contender->mutex); return ret; } - spin_lock(&shared->lock); + mutex_lock(&shared->lock); /* We should not own chip if it is already * in FL_SYNCING state. Put contender and retry. */ @@ -930,7 +930,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr * on this chip. Sleep. */ if (mode == FL_ERASING && shared->erasing && shared->erasing->oldstate == FL_ERASING) { - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); @@ -944,7 +944,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr shared->writing = chip; if (mode == FL_ERASING) shared->erasing = chip; - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } ret = chip_ready(map, chip, adr, mode); if (ret == -EAGAIN) @@ -959,7 +959,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad if (chip->priv) { struct flchip_shared *shared = chip->priv; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; @@ -967,7 +967,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad /* give back ownership to who we loaned it from */ struct flchip *loaner = shared->writing; mutex_lock(&loaner->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); mutex_unlock(&chip->mutex); put_chip(map, loaner, loaner->start); mutex_lock(&chip->mutex); @@ -985,11 +985,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); wake_up(&chip->wq); return; } - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } switch(chip->oldstate) { diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index fece5be..04fdfcc 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c @@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; for (i = 0; i < numchips; i++) { shared[i].writing = shared[i].erasing = NULL; - spin_lock_init(&shared[i].lock); + mutex_init(&shared[i].lock); for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { *chip = lpddr->chips[i]; chip->start += j << lpddr->chipshift; @@ -217,7 +217,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) */ struct flchip_shared *shared = chip->priv; struct flchip *contender; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); contender = shared->writing; if (contender && contender != chip) { /* @@ -230,7 +230,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) * get_chip returns success we're clear to go ahead. */ ret = mutex_trylock(&contender->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); if (!ret) goto retry; mutex_unlock(&chip->mutex); @@ -245,7 +245,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) mutex_unlock(&contender->mutex); return ret; } - spin_lock(&shared->lock); + mutex_lock(&shared->lock); /* We should not own chip if it is already in FL_SYNCING * state. Put contender and retry. */ @@ -261,7 +261,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) Must sleep in such a case. */ if (mode == FL_ERASING && shared->erasing && shared->erasing->oldstate == FL_ERASING) { - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&chip->wq, &wait); mutex_unlock(&chip->mutex); @@ -275,7 +275,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) shared->writing = chip; if (mode == FL_ERASING) shared->erasing = chip; - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } ret = chip_ready(map, chip, mode); @@ -348,7 +348,7 @@ static void put_chip(struct map_info *map, struct flchip *chip) { if (chip->priv) { struct flchip_shared *shared = chip->priv; - spin_lock(&shared->lock); + mutex_lock(&shared->lock); if (shared->writing == chip && chip->oldstate == FL_READY) { /* We own the ability to write, but we're done */ shared->writing = shared->erasing; @@ -356,7 +356,7 @@ static void put_chip(struct map_info *map, struct flchip *chip) /* give back the ownership */ struct flchip *loaner = shared->writing; mutex_lock(&loaner->mutex); - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); mutex_unlock(&chip->mutex); put_chip(map, loaner); mutex_lock(&chip->mutex); @@ -374,11 +374,11 @@ static void put_chip(struct map_info *map, struct flchip *chip) * Don't let the switch below mess things up since * we don't have ownership to resume anything. */ - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); wake_up(&chip->wq); return; } - spin_unlock(&shared->lock); + mutex_unlock(&shared->lock); } switch (chip->oldstate) { diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 82e9438..8878503 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -604,8 +604,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, /* Command pre-processing step */ switch (command) { case NAND_CMD_RESET: - send_cmd(host, command, false); preset(mtd); + send_cmd(host, command, false); break; case NAND_CMD_STATUS: diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 4a7b864..5bcc34a 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2852,6 +2852,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, */ if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && id_data[0] == NAND_MFR_SAMSUNG && + (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && id_data[5] != 0x00) { /* Calc pagesize */ mtd->writesize = 2048 << (extid & 0x03); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index ee87325..f613eb2 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1077,6 +1077,6 @@ static void __exit omap_nand_exit(void) module_init(omap_nand_init); module_exit(omap_nand_exit); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 8d46731..317aff4 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c @@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) struct resource *res; int err = 0; + if (pdata->chip.nr_chips < 1) { + dev_err(&pdev->dev, "invalid number of chips specified\n"); + return -EINVAL; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; @@ -91,7 +96,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) } /* Scan to find existance of the device */ - if (nand_scan(&data->mtd, 1)) { + if (nand_scan(&data->mtd, pdata->chip.nr_chips)) { err = -ENXIO; goto out; } diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index e02fa4f..4d89f37 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { #define tAR_NDTR1(r) (((r) >> 0) & 0xf) /* convert nano-seconds to nand flash controller clock cycles */ -#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) +#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) /* convert nand flash controller clock cycles to nano-seconds */ #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index e789149..ac08750 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c @@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = { .remove = __devexit_p(generic_onenand_remove), }; -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); static int __init generic_onenand_init(void) { diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 9f322f1..348ce71 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -815,7 +815,7 @@ static void __exit omap2_onenand_exit(void) module_init(omap2_onenand_init); module_exit(omap2_onenand_exit); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jarkko Lavinen "); MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index 284a5f4..7b1b256 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status) { unsigned int protocol = (status >> 16) & 0x3; - if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) + if (((protocol == RxProtoTCP) && !(status & TCPFail)) || + ((protocol == RxProtoUDP) && !(status & UDPFail))) return 1; - else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) - return 1; - else if ((protocol == RxProtoIP) && (!(status & IPFail))) - return 1; - return 0; + else + return 0; } static int cp_rx_poll(struct napi_struct *napi, int budget) diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 63b9ba0..bbd6e30 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -2847,10 +2847,11 @@ static int atl1_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3cold, 0); atl1_reset_hw(&adapter->hw); - adapter->cmb.cmb->int_stats = 0; - if (netif_running(netdev)) + if (netif_running(netdev)) { + adapter->cmb.cmb->int_stats = 0; atl1_up(adapter); + } netif_device_attach(netdev); return 0; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 293f9c1..4173759 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -2168,8 +2168,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev, dev->irq = sdev->irq; SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); - netif_carrier_off(dev); - err = ssb_bus_powerup(sdev->bus, 0); if (err) { dev_err(sdev->dev, @@ -2209,6 +2207,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev, goto err_out_powerdown; } + netif_carrier_off(dev); + ssb_set_drvdata(sdev, dev); /* Chip reset provides power to the b44 MAC & PCI cores, which diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index b9ad799..2d6b77c 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1176,7 +1176,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, i = 0; netdev_for_each_mc_addr(ha, netdev) - memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); + memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN); } else { req->promiscuous = 1; } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 822f586..0ddf4c6 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac if (!(dev->flags & IFF_MASTER)) goto out; + if (!pskb_may_pull(skb, sizeof(struct lacpdu))) + goto out; + read_lock(&bond->lock); slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), orig_dev); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8d7dfd2..0cb1ded 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -369,6 +369,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct goto out; } + if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) + goto out; + if (skb->len < sizeof(struct arp_pkt)) { pr_debug("Packet is too small to be an ARP\n"); goto out; diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c index 969ffed..07fa65f 100644 --- a/drivers/net/bonding/bond_ipv6.c +++ b/drivers/net/bonding/bond_ipv6.c @@ -70,6 +70,13 @@ static void bond_na_send(struct net_device *slave_dev, }; struct sk_buff *skb; + /* The Ethernet header is built in ndisc_send_skb(), not + * ndisc_build_skb(), so we cannot insert a VLAN tag. Only an + * out-of-line tag inserted by the hardware will work. + */ + if (vlan_id && !(slave_dev->features & NETIF_F_HW_VLAN_TX)) + return; + icmp6h.icmp6_router = router; icmp6h.icmp6_solicited = 0; icmp6h.icmp6_override = 1; @@ -88,7 +95,7 @@ static void bond_na_send(struct net_device *slave_dev, } if (vlan_id) { - skb = vlan_put_tag(skb, vlan_id); + skb = __vlan_hwaccel_put_tag(skb, vlan_id); if (!skb) { pr_err("failed to insert VLAN tag\n"); return; diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 2aa3367..35cf491 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -235,11 +235,11 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n bond_for_each_slave(bond, slave, i) { if (slave->dev == slave_dev) { - break; + return slave; } } - return slave; + return 0; } static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index e3f1b85..3e0f19f 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) case CHELSIO_GET_QSET_NUM:{ struct ch_reg edata; + memset(&edata, 0, sizeof(struct ch_reg)); + edata.cmd = CHELSIO_GET_QSET_NUM; edata.val = pi->nqsets; if (copy_to_user(useraddr, &edata, sizeof(edata))) diff --git a/drivers/net/e100.c b/drivers/net/e100.c index b194bad..8e2eab4 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1779,6 +1779,7 @@ static int e100_tx_clean(struct nic *nic) for (cb = nic->cb_to_clean; cb->status & cpu_to_le16(cb_complete); cb = nic->cb_to_clean = cb->next) { + rmb(); /* read skb after status */ netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev, "cb[%d]->status = 0x%04X\n", (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), @@ -1927,6 +1928,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx, netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev, "status=0x%04X\n", rfd_status); + rmb(); /* read size after status bit */ /* If data isn't ready, nothing to indicate */ if (unlikely(!(rfd_status & cb_complete))) { diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 68a8089..a7f39ea 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k6-NAPI" +#define DRV_VERSION "7.3.21-k8-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -483,9 +483,6 @@ void e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; u32 rctl, tctl; - /* signal that we're down so the interrupt handler does not - * reschedule our watchdog timer */ - set_bit(__E1000_DOWN, &adapter->flags); /* disable receives in the hardware */ rctl = er32(RCTL); @@ -506,6 +503,13 @@ void e1000_down(struct e1000_adapter *adapter) e1000_irq_disable(adapter); + /* + * Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * timers and tasks from rescheduling. + */ + set_bit(__E1000_DOWN, &adapter->flags); + del_timer_sync(&adapter->tx_fifo_stall_timer); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); @@ -3448,6 +3452,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -3637,6 +3642,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; @@ -3843,6 +3849,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c index f654db9..d206f21 100644 --- a/drivers/net/e1000e/82571.c +++ b/drivers/net/e1000e/82571.c @@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); icr = er32(ICR); - /* Install any alternate MAC address into RAR0 */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - return ret_val; + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; - e1000e_set_laa_state_82571(hw, true); + e1000e_set_laa_state_82571(hw, true); + } /* Reinitialize the 82571 serdes link state machine */ if (hw->phy.media_type == e1000_media_type_internal_serdes) @@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) { s32 ret_val = 0; - /* - * If there's an alternate MAC address place it in RAR0 - * so that it will override the Si installed default perm - * address. - */ - ret_val = e1000_check_alt_mac_addr_generic(hw); - if (ret_val) - goto out; + if (hw->mac.type == e1000_82571) { + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + } ret_val = e1000_read_mac_addr_generic(hw); @@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = { | FLAG_HAS_SMART_POWER_DOWN | FLAG_HAS_AMT | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1, .pba = 20, .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index 4dc02c7..75289ca 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -620,6 +620,7 @@ #define E1000_FLASH_UPDATES 2000 /* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 #define NVM_ID_LED_SETTINGS 0x0004 #define NVM_INIT_CONTROL2_REG 0x000F #define NVM_INIT_CONTROL3_PORT_B 0x0014 @@ -642,6 +643,9 @@ /* Mask bits for fields in Word 0x1a of the NVM */ #define NVM_WORD1A_ASPM_MASK 0x000C +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 5d1220d..664ed58 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -308,7 +308,7 @@ enum e1e_registers { #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 -#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 #define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400 #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c index a968e3a..768c105 100644 --- a/drivers/net/e1000e/lib.c +++ b/drivers/net/e1000e/lib.c @@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) u16 offset, nvm_alt_mac_addr_offset, nvm_data; u8 alt_mac_addr[ETH_ALEN]; + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + goto out; + + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ + if (!((nvm_data & NVM_COMPAT_LOM) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) + goto out; + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, &nvm_alt_mac_addr_offset); if (ret_val) { diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 57a7e41..513a4b8 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -774,6 +774,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; @@ -984,6 +985,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for (; !cleaned; count++) { tx_desc = E1000_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1080,6 +1082,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, break; (*work_done)++; skb = buffer_info->skb; + rmb(); /* read descriptor and rx_buffer_info after status DD */ /* in the packet split case this is header only */ prefetch(skb->data - NET_IP_ALIGN); @@ -1279,6 +1282,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; @@ -3419,13 +3423,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter) /* disable SERR in case the MSI write causes a master abort */ pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); - pci_write_config_word(adapter->pdev, PCI_COMMAND, - pci_cmd & ~PCI_COMMAND_SERR); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); err = e1000_test_msi_interrupt(adapter); - /* restore previous setting of command word */ - pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } /* success ! */ if (!err) @@ -5669,7 +5678,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = er32(WUC); eeprom_apme_mask = E1000_WUC_APME; - if (eeprom_data & E1000_WUC_PHY_WAKE) + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && diff --git a/drivers/net/eql.c b/drivers/net/eql.c index dda2c79..0cb1cf9 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) equalizer_t *eql; master_config_t mc; + memset(&mc, 0, sizeof(master_config_t)); + if (eql_is_master(dev)) { eql = netdev_priv(dev); mc.max_slaves = eql->max_slaves; diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 28b53d1..26a1aca 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2427,7 +2427,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size && skb_recycle_check(skb, priv->rx_buffer_size + RXBUF_ALIGNMENT)) - __skb_queue_head(&priv->rx_recycle, skb); + skb_queue_head(&priv->rx_recycle, skb); else dev_kfree_skb_any(skb); @@ -2498,7 +2498,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) struct gfar_private *priv = netdev_priv(dev); struct sk_buff *skb = NULL; - skb = __skb_dequeue(&priv->rx_recycle); + skb = skb_dequeue(&priv->rx_recycle); if (!skb) skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); @@ -2675,7 +2675,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) * recycle list. */ skb_reserve(skb, -GFAR_CB(skb)->alignamount); - __skb_queue_head(&priv->rx_recycle, skb); + skb_queue_head(&priv->rx_recycle, skb); } } else { /* Increment the number of packets */ diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 9bda023..ddab703 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -538,7 +538,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data) unlock_tx_qs(priv); unlock_rx_qs(priv); - local_irq_save(flags); + local_irq_restore(flags); for (i = 0; i < priv->num_rx_queues; i++) gfar_clean_rx_ring(priv->rx_queue[i], diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index ab9f675..fe337bd 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -104,6 +104,8 @@ static void ri_tasklet(unsigned long dev) rcu_read_unlock(); dev_kfree_skb(skb); stats->tx_dropped++; + if (skb_queue_len(&dp->tq) != 0) + goto resched; break; } rcu_read_unlock(); diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index cea37e0..c9cb9c4 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -630,9 +630,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) for (; i < adapter->rss_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); - for (; j < adapter->rss_queues; j++) - adapter->tx_ring[j]->reg_idx = rbase_offset + - Q_IDX_82576(j); } case e1000_82575: case e1000_82580: @@ -996,7 +993,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* Number of supported queues. */ adapter->num_rx_queues = adapter->rss_queues; - adapter->num_tx_queues = adapter->rss_queues; + if (adapter->vfs_allocated_count) + adapter->num_tx_queues = 1; + else + adapter->num_tx_queues = adapter->rss_queues; /* start with one vector for every rx queue */ numvecs = adapter->num_rx_queues; @@ -5344,6 +5344,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { + rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -5549,6 +5550,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, if (*work_done >= budget) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ skb = buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h index debeee2..f7ae284 100644 --- a/drivers/net/igbvf/igbvf.h +++ b/drivers/net/igbvf/igbvf.h @@ -126,7 +126,6 @@ struct igbvf_buffer { unsigned int page_offset; }; }; - struct page *page; }; union igbvf_desc { diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c index 5e2b2a8..57b5fee 100644 --- a/drivers/net/igbvf/netdev.c +++ b/drivers/net/igbvf/netdev.c @@ -248,6 +248,7 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, if (*work_done >= work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ buffer_info = &rx_ring->buffer_info[i]; @@ -780,6 +781,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && (count < tx_ring->count)) { + rmb(); /* read buffer_info after eop_desc status */ for (cleaned = false; !cleaned; count++) { tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index c6b75c8..45fc89b 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1816,6 +1816,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter) while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) { + rmb(); /* read buffer_info after eop_desc */ for (cleaned = false; !cleaned; ) { tx_desc = IXGB_TX_DESC(*tx_ring, i); buffer_info = &tx_ring->buffer_info[i]; @@ -1976,6 +1977,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ status = rx_desc->status; skb = buffer_info->skb; buffer_info->skb = NULL; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 74d9b6d..7782437 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -748,6 +748,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && (count < tx_ring->work_limit)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); @@ -2657,6 +2658,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) if (!adapter->num_vfs) adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; + /* Disable packet split due to 82599 erratum #45 */ + if (hw->mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; + /* Set the RX buffer length according to the mode */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { rx_buf_len = IXGBE_RX_HDR_SIZE; diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index a16cff7..3ea59f1 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -231,6 +231,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && (count < tx_ring->work_limit)) { bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ for ( ; !cleaned; count++) { struct sk_buff *skb; tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); @@ -518,6 +519,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, break; (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> diff --git a/drivers/net/jme.c b/drivers/net/jme.c index 99f24f5..f0643ac 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -1575,6 +1575,16 @@ jme_free_irq(struct jme_adapter *jme) } } +static inline void +jme_phy_on(struct jme_adapter *jme) +{ + u32 bmcr; + + bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR); + bmcr &= ~BMCR_PDOWN; + jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr); +} + static int jme_open(struct net_device *netdev) { @@ -1595,10 +1605,12 @@ jme_open(struct net_device *netdev) jme_start_irq(jme); - if (test_bit(JME_FLAG_SSET, &jme->flags)) + if (test_bit(JME_FLAG_SSET, &jme->flags)) { + jme_phy_on(jme); jme_set_settings(netdev, &jme->old_ecmd); - else + } else { jme_reset_phy_processor(jme); + } jme_reset_link(jme); @@ -3006,10 +3018,12 @@ jme_resume(struct pci_dev *pdev) jme_clear_pm(jme); pci_restore_state(pdev); - if (test_bit(JME_FLAG_SSET, &jme->flags)) + if (test_bit(JME_FLAG_SSET, &jme->flags)) { + jme_phy_on(jme); jme_set_settings(netdev, &jme->old_ecmd); - else + } else { jme_reset_phy_processor(jme); + } jme_start_irq(jme); netif_device_attach(netdev); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e0b47cc..2437c7e 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -3600,6 +3600,7 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp) dma_free_coherent(&pdev->dev, bytes, ss->fw_stats, ss->fw_stats_bus); ss->fw_stats = NULL; + netif_napi_del(&ss->napi); } } kfree(mgp->ss); diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index c865dda..81f982a 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, if (pkt_offset) skb_pull(skb, pkt_offset); - skb->truesize = skb->len + sizeof(struct sk_buff); skb->protocol = eth_type_trans(skb, netdev); napi_gro_receive(&sds_ring->napi, skb); @@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter, skb_put(skb, lro_length + data_offset); - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); - skb_pull(skb, l2_hdr_offset); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index bfdef72..c5f1f25 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1602,6 +1602,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9), PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), + PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 71a4e66..a4c69d5 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -1363,7 +1363,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, if (pkt_offset) skb_pull(skb, pkt_offset); - skb->truesize = skb->len + sizeof(struct sk_buff); skb->protocol = eth_type_trans(skb, netdev); napi_gro_receive(&sds_ring->napi, skb); @@ -1425,8 +1424,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, skb_put(skb, lro_length + data_offset); - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); - skb_pull(skb, l2_hdr_offset); skb->protocol = eth_type_trans(skb, netdev); @@ -1659,8 +1656,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, if (pkt_offset) skb_pull(skb, pkt_offset); - skb->truesize = skb->len + sizeof(struct sk_buff); - if (!qlcnic_check_loopback_buff(skb->data)) adapter->diag_cnt++; diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 9a251ac..b7758d3 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -988,16 +988,18 @@ static void r6040_multicast_list(struct net_device *dev) /* Multicast Address 1~4 case */ i = 0; netdev_for_each_mc_addr(ha, dev) { - if (i < MCAST_MAX) { - adrp = (u16 *) ha->addr; - iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); - iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); - iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); - } else { - iowrite16(0xffff, ioaddr + MID_1L + 8 * i); - iowrite16(0xffff, ioaddr + MID_1M + 8 * i); - iowrite16(0xffff, ioaddr + MID_1H + 8 * i); - } + if (i >= MCAST_MAX) + break; + adrp = (u16 *) ha->addr; + iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); + iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); + iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); + i++; + } + while (i < MCAST_MAX) { + iowrite16(0xffff, ioaddr + MID_1L + 8 * i); + iowrite16(0xffff, ioaddr + MID_1M + 8 * i); + iowrite16(0xffff, ioaddr + MID_1H + 8 * i); i++; } } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index cdc6a5c..48ed375 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -855,10 +856,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) else tp->features &= ~RTL_FEATURE_WOL; __rtl8169_set_wol(tp, wol->wolopts); - device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); - spin_unlock_irq(&tp->lock); + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + return 0; } @@ -3040,6 +3041,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) mii->reg_num_mask = 0x1f; mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + /* enable device (incl. PCI PM wakeup and hotplug setup) */ rc = pci_enable_device(pdev); if (rc < 0) { @@ -3744,7 +3750,8 @@ static void rtl_hw_start_8168(struct net_device *dev) RTL_W16(IntrMitigate, 0x5151); /* Work around for RxFIFO overflow. */ - if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + if (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22) { tp->intr_event |= RxFIFOOver | PCSTimeout; tp->intr_event &= ~RxOverflow; } @@ -4008,7 +4015,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev, struct RxDesc *desc, int rx_buf_sz, - unsigned int align) + unsigned int align, gfp_t gfp) { struct sk_buff *skb; dma_addr_t mapping; @@ -4016,7 +4023,7 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, pad = align ? align : NET_IP_ALIGN; - skb = netdev_alloc_skb(dev, rx_buf_sz + pad); + skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp); if (!skb) goto err_out; @@ -4047,7 +4054,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) } static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, - u32 start, u32 end) + u32 start, u32 end, gfp_t gfp) { u32 cur; @@ -4062,7 +4069,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->RxDescArray + i, - tp->rx_buf_sz, tp->align); + tp->rx_buf_sz, tp->align, gfp); if (!skb) break; @@ -4090,7 +4097,7 @@ static int rtl8169_init_ring(struct net_device *dev) memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); - if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) + if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC) goto err_out; rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); @@ -4458,14 +4465,12 @@ static inline int rtl8169_fragmented_frame(u32 status) return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); } -static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc) +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) { - u32 opts1 = le32_to_cpu(desc->opts1); u32 status = opts1 & RxProtoMask; if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || - ((status == RxProtoUDP) && !(opts1 & UDPFail)) || - ((status == RxProtoIP) && !(opts1 & IPFail))) + ((status == RxProtoUDP) && !(opts1 & UDPFail))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; @@ -4554,8 +4559,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev, continue; } - rtl8169_rx_csum(skb, desc); - if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { pci_dma_sync_single_for_device(pdev, addr, pkt_size, PCI_DMA_FROMDEVICE); @@ -4566,6 +4569,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, tp->Rx_skbuff[entry] = NULL; } + rtl8169_rx_csum(skb, status); skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); @@ -4591,7 +4595,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; - delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); + delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC); if (!delta && count) netif_info(tp, intr, dev, "no Rx buffer allocated\n"); tp->dirty_rx += delta; @@ -4634,7 +4638,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) /* Work around for rx fifo overflow */ if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { + (tp->mac_version == RTL_GIGA_MAC_VER_11 || + tp->mac_version == RTL_GIGA_MAC_VER_22)) { netif_stop_queue(dev); rtl8169_tx_timeout(dev); break; @@ -4895,6 +4900,9 @@ static int rtl8169_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_phy(dev, tp); if (netif_running(dev)) __rtl8169_resume(dev); @@ -4935,6 +4943,8 @@ static int rtl8169_runtime_resume(struct device *device) tp->saved_wolopts = 0; spin_unlock_irq(&tp->lock); + rtl8169_init_phy(dev, tp); + __rtl8169_resume(dev); return 0; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 40e5c46..465ae7e 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include "skge.h" @@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev) netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); } +static int only_32bit_dma; + static int __devinit skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { @@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = { .shutdown = skge_shutdown, }; +static struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + {} +}; + static int __init skge_init_module(void) { + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; skge_debug_init(); return pci_register_driver(&skge_driver); } diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index cc55974..7a7b01a 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -84,8 +84,7 @@ struct smsc911x_data { */ spinlock_t mac_lock; - /* spinlock to ensure 16-bit accesses are serialised. - * unused with a 32-bit bus */ + /* spinlock to ensure register accesses are serialised */ spinlock_t dev_lock; struct phy_device *phy_dev; @@ -118,37 +117,33 @@ struct smsc911x_data { unsigned int hashlo; }; -/* The 16-bit access functions are significantly slower, due to the locking - * necessary. If your bus hardware can be configured to do this for you - * (in response to a single 32-bit operation from software), you should use - * the 32-bit access functions instead. */ - -static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) { if (pdata->config.flags & SMSC911X_USE_32BIT) return readl(pdata->ioaddr + reg); - if (pdata->config.flags & SMSC911X_USE_16BIT) { - u32 data; - unsigned long flags; - - /* these two 16-bit reads must be performed consecutively, so - * must not be interrupted by our own ISR (which would start - * another read operation) */ - spin_lock_irqsave(&pdata->dev_lock, flags); - data = ((readw(pdata->ioaddr + reg) & 0xFFFF) | + if (pdata->config.flags & SMSC911X_USE_16BIT) + return ((readw(pdata->ioaddr + reg) & 0xFFFF) | ((readw(pdata->ioaddr + reg + 2) & 0xFFFF) << 16)); - spin_unlock_irqrestore(&pdata->dev_lock, flags); - - return data; - } BUG(); return 0; } -static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, - u32 val) +static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) +{ + u32 data; + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + data = __smsc911x_reg_read(pdata, reg); + spin_unlock_irqrestore(&pdata->dev_lock, flags); + + return data; +} + +static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) { if (pdata->config.flags & SMSC911X_USE_32BIT) { writel(val, pdata->ioaddr + reg); @@ -156,44 +151,54 @@ static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, } if (pdata->config.flags & SMSC911X_USE_16BIT) { - unsigned long flags; - - /* these two 16-bit writes must be performed consecutively, so - * must not be interrupted by our own ISR (which would start - * another read operation) */ - spin_lock_irqsave(&pdata->dev_lock, flags); writew(val & 0xFFFF, pdata->ioaddr + reg); writew((val >> 16) & 0xFFFF, pdata->ioaddr + reg + 2); - spin_unlock_irqrestore(&pdata->dev_lock, flags); return; } BUG(); } +static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, + u32 val) +{ + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + __smsc911x_reg_write(pdata, reg, val); + spin_unlock_irqrestore(&pdata->dev_lock, flags); +} + /* Writes a packet to the TX_DATA_FIFO */ static inline void smsc911x_tx_writefifo(struct smsc911x_data *pdata, unsigned int *buf, unsigned int wordcount) { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { while (wordcount--) - smsc911x_reg_write(pdata, TX_DATA_FIFO, swab32(*buf++)); - return; + __smsc911x_reg_write(pdata, TX_DATA_FIFO, + swab32(*buf++)); + goto out; } if (pdata->config.flags & SMSC911X_USE_32BIT) { writesl(pdata->ioaddr + TX_DATA_FIFO, buf, wordcount); - return; + goto out; } if (pdata->config.flags & SMSC911X_USE_16BIT) { while (wordcount--) - smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); - return; + __smsc911x_reg_write(pdata, TX_DATA_FIFO, *buf++); + goto out; } BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); } /* Reads a packet out of the RX_DATA_FIFO */ @@ -201,24 +206,31 @@ static inline void smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf, unsigned int wordcount) { + unsigned long flags; + + spin_lock_irqsave(&pdata->dev_lock, flags); + if (pdata->config.flags & SMSC911X_SWAP_FIFO) { while (wordcount--) - *buf++ = swab32(smsc911x_reg_read(pdata, RX_DATA_FIFO)); - return; + *buf++ = swab32(__smsc911x_reg_read(pdata, + RX_DATA_FIFO)); + goto out; } if (pdata->config.flags & SMSC911X_USE_32BIT) { readsl(pdata->ioaddr + RX_DATA_FIFO, buf, wordcount); - return; + goto out; } if (pdata->config.flags & SMSC911X_USE_16BIT) { while (wordcount--) - *buf++ = smsc911x_reg_read(pdata, RX_DATA_FIFO); - return; + *buf++ = __smsc911x_reg_read(pdata, RX_DATA_FIFO); + goto out; } BUG(); +out: + spin_unlock_irqrestore(&pdata->dev_lock, flags); } /* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 737df60..2ce585a 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -324,7 +324,7 @@ static int bdx_fw_load(struct bdx_priv *priv) ENTER; master = READ_REG(priv, regINIT_SEMAPHORE); if (!READ_REG(priv, regINIT_STATUS) && master) { - rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev); + rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev); if (rc) goto out; bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size); @@ -2516,4 +2516,4 @@ module_exit(bdx_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(BDX_DRV_DESC); -MODULE_FIRMWARE("tehuti/firmware.bin"); +MODULE_FIRMWARE("tehuti/bdx.bin"); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index dc94445..b18cda4 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -326,13 +326,13 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) { static const char ifname[] = "usbpn%d"; const struct usb_cdc_union_desc *union_header = NULL; - const struct usb_cdc_header_desc *phonet_header = NULL; const struct usb_host_interface *data_desc; struct usb_interface *data_intf; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *dev; struct usbpn_dev *pnd; u8 *data; + int phonet = 0; int len, err; data = intf->altsetting->extra; @@ -353,10 +353,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) (struct usb_cdc_union_desc *)data; break; case 0xAB: - if (phonet_header || dlen < 5) - break; - phonet_header = - (struct usb_cdc_header_desc *)data; + phonet = 1; break; } } @@ -364,7 +361,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id) len -= dlen; } - if (!union_header || !phonet_header) + if (!union_header || !phonet) return -EINVAL; data_intf = usb_ifnum_to_if(usbdev, union_header->bSlaveInterface0); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 4dd2351..e254274 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1653,6 +1653,8 @@ static int hso_get_count(struct hso_serial *serial, struct uart_icount cnow; struct hso_tiocmget *tiocmget = serial->tiocmget; + memset(&icount, 0, sizeof(struct serial_icounter_struct)); + if (!tiocmget) return -ENOENT; spin_lock_irq(&serial->serial_lock); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 81c76ad..2793cff 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -44,6 +44,7 @@ #include #include #include +#include #define DRIVER_VERSION "22-Aug-2005" @@ -1272,6 +1273,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) struct usb_device *xdev; int status; const char *name; + struct usb_driver *driver = to_usb_driver(udev->dev.driver); + + /* usbnet already took usb runtime pm, so have to enable the feature + * for usb interface, otherwise usb_autopm_get_interface may return + * failure if USB_SUSPEND(RUNTIME_PM) is enabled. + */ + if (!driver->supports_autosuspend) { + driver->supports_autosuspend = 1; + pm_runtime_enable(&udev->dev); + } name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 648972d..f1e3469 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -472,6 +473,26 @@ ath5k_pci_probe(struct pci_dev *pdev, int ret; u8 csz; + /* + * L0s needs to be disabled on all ath5k cards. + * + * For distributions shipping with CONFIG_PCIEASPM (this will be enabled + * by default in the future in 2.6.36) this will also mean both L1 and + * L0s will be disabled when a pre 1.1 PCIe device is detected. We do + * know L1 works correctly even for all ath5k pre 1.1 PCIe devices + * though but cannot currently undue the effect of a blacklist, for + * details you can read pcie_aspm_sanity_check() and see how it adjusts + * the device link capability. + * + * It may be possible in the future to implement some PCI API to allow + * drivers to override blacklists for pre 1.1 PCIe but for now it is + * best to accept that both L0s and L1 will be disabled completely for + * distributions shipping with CONFIG_PCIEASPM rather than having this + * issue present. Motivation for adding this new API will be to help + * with power consumption for some of these devices. + */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); + ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "can't enable device\n"); @@ -1297,6 +1318,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, PCI_DMA_TODEVICE); rate = ieee80211_get_tx_rate(sc->hw, info); + if (!rate) { + ret = -EINVAL; + goto err_unmap; + } if (info->flags & IEEE80211_TX_CTL_NO_ACK) flags |= AR5K_TXDESC_NOACK; diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index 5fdbb53..dabafb8 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -239,7 +239,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) if (qCoff > 15) qCoff = 15; else if (qCoff <= -16) - qCoff = 16; + qCoff = -16; ath_print(common, ATH_DBG_CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 23eb60e..5543465 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -768,7 +768,7 @@ static bool ar9300_uncompress_block(struct ath_hw *ah, length = block[it+1]; length &= 0xff; - if (length > 0 && spot >= 0 && spot+length < mdataSize) { + if (length > 0 && spot >= 0 && spot+length <= mdataSize) { ath_print(common, ATH_DBG_EEPROM, "Restore at %d: spot=%d " "offset=%d length=%d\n", @@ -944,7 +944,7 @@ static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah, return 1; } -static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { return -EINVAL; diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 5ea8773..dcfe6e1 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -174,8 +174,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, /* returns delimiter padding required given the packet length */ #define ATH_AGGR_GET_NDELIM(_len) \ - (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \ - (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2) + (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \ + DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ)) #define BAW_WITHIN(_start, _bawsz, _seqno) \ ((((_seqno) - (_start)) & 4095) < (_bawsz)) diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 21354c1..0e8489d 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -62,7 +62,7 @@ #define SD_NO_CTL 0xE0 #define NO_CTL 0xff -#define CTL_MODE_M 7 +#define CTL_MODE_M 0xf #define CTL_11A 0 #define CTL_11B 1 #define CTL_11G 2 @@ -669,7 +669,7 @@ struct eeprom_ops { int (*get_eeprom_ver)(struct ath_hw *hw); int (*get_eeprom_rev)(struct ath_hw *hw); u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band); - u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, + u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw, struct ath9k_channel *chan); void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 41a77d1..1576bbbd 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -1149,13 +1149,13 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, } } -static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; - return pModal->antCtrlCommon & 0xFFFF; + return pModal->antCtrlCommon; } static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index b471db5..2705eb0 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -1131,13 +1131,13 @@ static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah, return 1; } -static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar9287_eeprom *eep = &ah->eeprom.map9287; struct modal_eep_ar9287_header *pModal = &eep->modalHeader; - return pModal->antCtrlCommon & 0xFFFF; + return pModal->antCtrlCommon; } static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 7e1ed78..8f57ebf 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -729,7 +729,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah, vpdTableI[i][sizeCurrVpdTable - 2]); vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep); - if (tgtIndex > maxIndex) { + if (tgtIndex >= maxIndex) { while ((ss <= tgtIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] + @@ -1064,15 +1064,19 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, case 1: break; case 2: - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + else + scaledPower = 0; break; case 3: - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + else + scaledPower = 0; break; } - scaledPower = max((u16)0, scaledPower); - if (IS_CHAN_2GHZ(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; @@ -1437,14 +1441,14 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, return num_ant_config; } -static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah, +static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah, struct ath9k_channel *chan) { struct ar5416_eeprom_def *eep = &ah->eeprom.def; struct modal_eep_header *pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); - return pModal->antCtrlCommon & 0xFFFF; + return pModal->antCtrlCommon; } static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9d371c1..5f3c47d 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -370,7 +370,8 @@ static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv, priv->tgt_rate.rates.ht_rates.rs_nrates = j; caps = WLAN_RC_HT_FLAG; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) + if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) && + (conf_is_ht40(&priv->hw->conf))) caps |= WLAN_RC_40_FLAG; if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) caps |= WLAN_RC_SGI_FLAG; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 2571b44..5fcbc2f 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -68,18 +68,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_sta *sta = tx_info->control.sta; struct ath9k_htc_sta *ista; - struct ath9k_htc_vif *avp; struct ath9k_htc_tx_ctl tx_ctl; enum htc_endpoint_id epid; u16 qnum, hw_qnum; __le16 fc; u8 *tx_fhdr; - u8 sta_idx; + u8 sta_idx, vif_idx; hdr = (struct ieee80211_hdr *) skb->data; fc = hdr->frame_control; - avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv; + if (tx_info->control.vif && + (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) + vif_idx = ((struct ath9k_htc_vif *) + tx_info->control.vif->drv_priv)->index; + else + vif_idx = priv->nvifs; + if (sta) { ista = (struct ath9k_htc_sta *) sta->drv_priv; sta_idx = ista->index; @@ -96,7 +101,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); tx_hdr.node_idx = sta_idx; - tx_hdr.vif_idx = avp->index; + tx_hdr.vif_idx = vif_idx; if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { tx_ctl.type = ATH9K_HTC_AMPDU; @@ -156,7 +161,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) tx_ctl.type = ATH9K_HTC_NORMAL; mgmt_hdr.node_idx = sta_idx; - mgmt_hdr.vif_idx = avp->index; + mgmt_hdr.vif_idx = vif_idx; mgmt_hdr.tidno = 0; mgmt_hdr.flags = 0; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index c33f17d..4b9378f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -519,6 +519,8 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->hw_version.devid == AR5416_AR9100_DEVID) ah->hw_version.macVersion = AR_SREV_VERSION_9100; + ath9k_hw_read_revisions(ah); + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { ath_print(common, ATH_DBG_FATAL, "Couldn't reset chip\n"); @@ -537,7 +539,8 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) { if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI || - (AR_SREV_9280(ah) && !ah->is_pciexpress)) { + ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) && + !ah->is_pciexpress)) { ah->config.serialize_regmode = SER_REG_MODE_ON; } else { @@ -1095,8 +1098,6 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) return false; } - ath9k_hw_read_revisions(ah); - return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); } @@ -1232,9 +1233,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (!ah->chip_fullsleep) { ath9k_hw_abortpcurecv(ah); - if (!ath9k_hw_stopdmarecv(ah)) + if (!ath9k_hw_stopdmarecv(ah)) { ath_print(common, ATH_DBG_XMIT, "Failed to stop receive dma\n"); + bChannelChange = false; + } } if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) @@ -1265,7 +1268,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; /* For chips on which RTC reset is done, save TSF before it gets cleared */ - if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + if (AR_SREV_9100(ah) || + (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))) tsf = ath9k_hw_gettsf64(ah); saveLedState = REG_READ(ah, AR_CFG_LED) & @@ -1297,7 +1301,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, } /* Restore TSF */ - if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)) + if (tsf) ath9k_hw_settsf64(ah, tsf); if (AR_SREV_9280_10_OR_LATER(ah)) @@ -1307,6 +1311,17 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, if (r) return r; + /* + * Some AR91xx SoC devices frequently fail to accept TSF writes + * right after the chip reset. When that happens, write a new + * value after the initvals have been applied, with an offset + * based on measured time difference + */ + if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { + tsf += 1500; + ath9k_hw_settsf64(ah, tsf); + } + /* Setup MFP options for CCMP */ if (AR_SREV_9280_20_OR_LATER(ah)) { /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1e2a68e..2b8c6e8 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1124,6 +1124,8 @@ static int ath9k_start(struct ieee80211_hw *hw) "Starting driver with initial channel: %d MHz\n", curchan->center_freq); + ath9k_ps_wakeup(sc); + mutex_lock(&sc->mutex); if (ath9k_wiphy_started(sc)) { @@ -1238,6 +1240,8 @@ static int ath9k_start(struct ieee80211_hw *hw) mutex_unlock: mutex_unlock(&sc->mutex); + ath9k_ps_restore(sc); + return r; } @@ -1501,6 +1505,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ath_softc *sc = aphy->sc; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; + bool bs_valid = false; int i; ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); @@ -1529,7 +1534,15 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, "slot\n", __func__); sc->beacon.bslot[i] = NULL; sc->beacon.bslot_aphy[i] = NULL; - } + } else if (sc->beacon.bslot[i]) + bs_valid = true; + } + if (!bs_valid && (sc->sc_ah->imask & ATH9K_INT_SWBA)) { + /* Disable SWBA interrupt */ + sc->sc_ah->imask &= ~ATH9K_INT_SWBA; + ath9k_ps_wakeup(sc); + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); + ath9k_ps_restore(sc); } sc->nvifs--; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 859aa4a..d8dd503 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -328,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, u32 ba[WME_BA_BMP_SIZE >> 5]; int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; bool rc_update = true; + struct ieee80211_tx_rate rates[4]; skb = bf->bf_mpdu; hdr = (struct ieee80211_hdr *)skb->data; @@ -335,12 +336,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, tx_info = IEEE80211_SKB_CB(skb); hw = bf->aphy->hw; + memcpy(rates, tx_info->control.rates, sizeof(rates)); + rcu_read_lock(); /* XXX: use ieee80211_find_sta! */ sta = ieee80211_find_sta_by_hw(hw, hdr->addr1); if (!sta) { rcu_read_unlock(); + + INIT_LIST_HEAD(&bf_head); + while (bf) { + bf_next = bf->bf_next; + + bf->bf_state.bf_type |= BUF_XRETRY; + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) || + !bf->bf_stale || bf_next != NULL) + list_move_tail(&bf->list, &bf_head); + + ath_tx_rc_status(bf, ts, 0, 0, false); + ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, + 0, 0); + + bf = bf_next; + } return; } @@ -375,6 +394,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, txfail = txpending = 0; bf_next = bf->bf_next; + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { /* transmit completion, subframe is * acked by block ack */ @@ -428,6 +450,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, spin_unlock_bh(&txq->axq_lock); if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { + memcpy(tx_info->control.rates, rates, sizeof(rates)); ath_tx_rc_status(bf, ts, nbad, txok, true); rc_update = false; } else { @@ -487,6 +510,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, bf = bf_next; } + /* prepend un-acked frames to the beginning of the pending frame queue */ + if (!list_empty(&bf_pending)) { + spin_lock_bh(&txq->axq_lock); + list_splice(&bf_pending, &tid->buf_q); + ath_tx_queue_tid(txq, tid); + spin_unlock_bh(&txq->axq_lock); + } + if (tid->state & AGGR_CLEANUP) { if (tid->baw_head == tid->baw_tail) { tid->state &= ~AGGR_ADDBA_COMPLETE; @@ -499,14 +530,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, return; } - /* prepend un-acked frames to the beginning of the pending frame queue */ - if (!list_empty(&bf_pending)) { - spin_lock_bh(&txq->axq_lock); - list_splice(&bf_pending, &tid->buf_q); - ath_tx_queue_tid(txq, tid); - spin_unlock_bh(&txq->axq_lock); - } - rcu_read_unlock(); if (needreset) @@ -2050,7 +2073,7 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, tx_info->status.rates[i].idx = -1; } - tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1; + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; } static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) @@ -2161,7 +2184,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) * This frame is sent out as a single frame. * Use hardware retry status for this frame. */ - bf->bf_retries = ts.ts_longretry; if (ts.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; ath_tx_rc_status(bf, &ts, 0, txok, true); @@ -2280,7 +2302,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) txok = !(txs.ts_status & ATH9K_TXERR_MASK); if (!bf_isampdu(bf)) { - bf->bf_retries = txs.ts_longretry; if (txs.ts_status & ATH9K_TXERR_XRETRY) bf->bf_state.bf_type |= BUF_XRETRY; ath_tx_rc_status(bf, &txs, 0, txok, true); @@ -2449,37 +2470,37 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) { - int i; - struct ath_atx_ac *ac, *ac_tmp; - struct ath_atx_tid *tid, *tid_tmp; + struct ath_atx_ac *ac; + struct ath_atx_tid *tid; struct ath_txq *txq; + int i, tidno; - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { - if (ATH_TXQ_SETUP(sc, i)) { - txq = &sc->tx.txq[i]; + for (tidno = 0, tid = &an->tid[tidno]; + tidno < WME_NUM_TID; tidno++, tid++) { + i = tid->ac->qnum; - spin_lock_bh(&txq->axq_lock); + if (!ATH_TXQ_SETUP(sc, i)) + continue; - list_for_each_entry_safe(ac, - ac_tmp, &txq->axq_acq, list) { - tid = list_first_entry(&ac->tid_q, - struct ath_atx_tid, list); - if (tid && tid->an != an) - continue; - list_del(&ac->list); - ac->sched = false; - - list_for_each_entry_safe(tid, - tid_tmp, &ac->tid_q, list) { - list_del(&tid->list); - tid->sched = false; - ath_tid_drain(sc, txq, tid); - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; - } - } + txq = &sc->tx.txq[i]; + ac = tid->ac; - spin_unlock_bh(&txq->axq_lock); + spin_lock_bh(&txq->axq_lock); + + if (tid->sched) { + list_del(&tid->list); + tid->sched = false; + } + + if (ac->sched) { + list_del(&ac->list); + tid->ac->sched = false; } + + ath_tid_drain(sc, txq, tid); + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + + spin_unlock_bh(&txq->axq_lock); } } diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h index a1c3952..345dd97 100644 --- a/drivers/net/wireless/ath/regd.h +++ b/drivers/net/wireless/ath/regd.h @@ -31,7 +31,6 @@ enum ctl_group { #define NO_CTL 0xff #define SD_NO_CTL 0xE0 #define NO_CTL 0xff -#define CTL_MODE_M 7 #define CTL_11A 0 #define CTL_11B 1 #define CTL_11G 2 diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h index 248c670..5c2cfe6 100644 --- a/drivers/net/wireless/ath/regd_common.h +++ b/drivers/net/wireless/ath/regd_common.h @@ -195,6 +195,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { {APL9_WORLD, CTL_ETSI, CTL_ETSI}, {APL3_FCCA, CTL_FCC, CTL_FCC}, + {APL7_FCCA, CTL_FCC, CTL_FCC}, {APL1_ETSIC, CTL_FCC, CTL_ETSI}, {APL2_ETSIC, CTL_FCC, CTL_ETSI}, {APL2_APLD, CTL_FCC, NO_CTL}, diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index fa40fdf..b8900f0 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1538,7 +1538,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } - if (unlikely(len > ring->rx_buffersize)) { + if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h index dc91944..a9282d7 100644 --- a/drivers/net/wireless/b43/dma.h +++ b/drivers/net/wireless/b43/dma.h @@ -163,7 +163,7 @@ struct b43_dmadesc_generic { /* DMA engine tuning knobs */ #define B43_TXRING_SLOTS 256 #define B43_RXRING_SLOTS 64 -#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN +#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN) /* Pointer poison */ #define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM)) diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 4e56b7b..4808dc7 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func, err_free_ssb: kfree(sdio); err_disable_func: + sdio_claim_host(func); sdio_disable_func(func); err_release_host: sdio_release_host(func); @@ -175,7 +176,9 @@ static void b43_sdio_remove(struct sdio_func *func) struct b43_sdio *sdio = sdio_get_drvdata(func); ssb_bus_unregister(&sdio->ssb); + sdio_claim_host(func); sdio_disable_func(func); + sdio_release_host(func); kfree(sdio); sdio_set_drvdata(func, NULL); } diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index 29b31a6..357da81 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -626,15 +626,9 @@ static int prism2_config(struct pcmcia_device *link) local->hw_priv = hw_priv; hw_priv->link = link; - /* - * Make sure the IRQ handler cannot proceed until at least - * dev->base_addr is initialized. - */ - spin_lock_irqsave(&local->irq_init_lock, flags); - ret = pcmcia_request_irq(link, prism2_interrupt); if (ret) - goto failed_unlock; + goto failed; /* * This actually configures the PCMCIA socket -- setting up @@ -643,11 +637,12 @@ static int prism2_config(struct pcmcia_device *link) */ ret = pcmcia_request_configuration(link, &link->conf); if (ret) - goto failed_unlock; + goto failed; + /* IRQ handler cannot proceed until at dev->base_addr is initialized */ + spin_lock_irqsave(&local->irq_init_lock, flags); dev->irq = link->irq; dev->base_addr = link->io.BasePort1; - spin_unlock_irqrestore(&local->irq_init_lock, flags); /* Finally, report what we've done */ @@ -676,8 +671,6 @@ static int prism2_config(struct pcmcia_device *link) return ret; - failed_unlock: - spin_unlock_irqrestore(&local->irq_init_lock, flags); failed: kfree(hw_priv); prism2_release((u_long)link); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index c44a303..7b4fc94 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -915,22 +915,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, rts_retry_limit = data_retry_limit; tx_cmd->rts_retry_limit = rts_retry_limit; - if (ieee80211_is_mgmt(fc)) { - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_AUTH): - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - if (tx_flags & TX_CMD_FLG_RTS_MSK) { - tx_flags &= ~TX_CMD_FLG_RTS_MSK; - tx_flags |= TX_CMD_FLG_CTS_MSK; - } - break; - default: - break; - } - } - tx_cmd->rate = rate; tx_cmd->tx_flags = tx_flags; @@ -2853,7 +2837,6 @@ static struct iwl_lib_ops iwl3945_lib = { .config_ap = iwl3945_config_ap, .manage_ibss_station = iwl3945_manage_ibss_station, .recover_from_tx_stall = iwl_bg_monitor_recover, - .check_plcp_health = iwl3945_good_plcp_health, .debugfs_ops = { .rx_stats_read = iwl3945_ucode_rx_stats_read, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index a28af7e..0a67558 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -51,7 +51,7 @@ #include "iwl-agn-debugfs.h" /* Highest firmware API version supported */ -#define IWL5000_UCODE_API_MAX 2 +#define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 /* Lowest firmware API version supported */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c index 01658cf..2a30397 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c @@ -209,10 +209,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) } } -static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, - __le32 *tx_flags) +static void iwlagn_rts_tx_cmd_flag(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) { - *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || + info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { + *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; + return; + } + + if (priv->cfg->use_rts_for_ht && + info->flags & IEEE80211_TX_CTL_AMPDU) { + *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; + return; + } } /* Calc max signal level (dBm) among 3 possible receivers */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index cf4a95b..ca46831 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -325,18 +325,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, struct iwl_lq_sta *lq_data, struct ieee80211_sta *sta) { - if ((tid < TID_MAX_LOAD_COUNT) && - !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) { - if (priv->cfg->use_rts_for_ht) { - /* - * switch to RTS/CTS if it is the prefer protection - * method for HT traffic - */ - IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n"); - priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; - iwlcore_commit_rxon(priv); - } - } + if (tid < TID_MAX_LOAD_COUNT) + rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); + else + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", + tid, TID_MAX_LOAD_COUNT); } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 7d614c4..cf64575 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -376,10 +376,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } - priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); - - if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) - tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); if (ieee80211_is_mgmt(fc)) { @@ -453,21 +450,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; - /* Set up RTS and CTS flags for certain packets */ - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { - case cpu_to_le16(IEEE80211_STYPE_AUTH): - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): - if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { - tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; - tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; - } - break; - default: - break; - } - /* Set up antennas */ priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); @@ -1159,11 +1141,14 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { tx_info = &txq->txb[txq->q.read_ptr]; - iwlagn_tx_status(priv, tx_info->skb[0]); + + if (WARN_ON_ONCE(tx_info->skb[0] == NULL)) + continue; hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; - if (hdr && ieee80211_is_data_qos(hdr->frame_control)) + if (ieee80211_is_data_qos(hdr->frame_control)) nfreed++; + iwlagn_tx_status(priv, tx_info->skb[0]); tx_info->skb[0] = NULL; if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 24aff65..8a2c4d7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -200,13 +200,6 @@ int iwl_commit_rxon(struct iwl_priv *priv) priv->start_calib = 0; if (new_assoc) { - /* - * allow CTS-to-self if possible for new association. - * this is relevant only for 5000 series and up, - * but will not damage 4965 - */ - priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; - /* Apply the new configuration * RXON assoc doesn't clear the station table in uCode, */ @@ -1212,6 +1205,9 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv) /* only Re-enable if diabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); #ifdef CONFIG_IWLWIFI_DEBUG if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { @@ -1427,6 +1423,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv) /* only Re-enable if diabled by irq */ if (test_bit(STATUS_INT_ENABLED, &priv->status)) iwl_enable_interrupts(priv); + /* Re-enable RF_KILL if it occurred */ + else if (handled & CSR_INT_BIT_RF_KILL) + iwl_enable_rfkill_int(priv); } /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ @@ -3119,9 +3118,10 @@ static void iwl_mac_stop(struct ieee80211_hw *hw) flush_workqueue(priv->workqueue); - /* enable interrupts again in order to receive rfkill changes */ + /* User space software may expect getting rfkill changes + * even if interface is down */ iwl_write32(priv, CSR_INT, 0xFFFFFFFF); - iwl_enable_interrupts(priv); + iwl_enable_rfkill_int(priv); IWL_DEBUG_MAC80211(priv, "leave\n"); } @@ -3336,13 +3336,40 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", priv->_agn.agg_tids_count); } + if (priv->cfg->use_rts_for_ht) { + struct iwl_station_priv *sta_priv = + (void *) sta->drv_priv; + /* + * switch off RTS/CTS if it was previously enabled + */ + + sta_priv->lq_sta.lq.general_params.flags &= + ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, + CMD_ASYNC, false); + } + break; if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return 0; else return ret; case IEEE80211_AMPDU_TX_OPERATIONAL: - /* do nothing */ - return -EOPNOTSUPP; + if (priv->cfg->use_rts_for_ht) { + struct iwl_station_priv *sta_priv = + (void *) sta->drv_priv; + + /* + * switch to RTS/CTS if it is the prefer protection + * method for HT traffic + */ + + sta_priv->lq_sta.lq.general_params.flags |= + LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; + iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, + CMD_ASYNC, false); + } + ret = 0; + break; default: IWL_DEBUG_HT(priv, "unknown\n"); return -EINVAL; @@ -3423,6 +3450,49 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, return 0; } +static void iwlagn_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + priv->staging_rxon.filter_flags &= ~filter_nand; + priv->staging_rxon.filter_flags |= filter_or; + + iwlcore_commit_rxon(priv); + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + /***************************************************************************** * * driver setup and teardown @@ -3583,7 +3653,7 @@ static struct ieee80211_ops iwl_hw_ops = { .add_interface = iwl_mac_add_interface, .remove_interface = iwl_mac_remove_interface, .config = iwl_mac_config, - .configure_filter = iwl_configure_filter, + .configure_filter = iwlagn_configure_filter, .set_key = iwl_mac_set_key, .update_tkip_key = iwl_mac_update_tkip_key, .conf_tx = iwl_mac_conf_tx, @@ -3769,14 +3839,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * 8. Enable interrupts and read RFKILL state *********************************************/ - /* enable interrupts if needed: hw bug w/a */ + /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); } - iwl_enable_interrupts(priv); + iwl_enable_rfkill_int(priv); /* If platform's RF_KILL switch is NOT set to KILL */ if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 5bbc529..cd5b664 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -403,19 +403,36 @@ EXPORT_SYMBOL(iwlcore_free_geos); * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this * function. */ -void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, - __le32 *tx_flags) +void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags) { if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { *tx_flags |= TX_CMD_FLG_RTS_MSK; *tx_flags &= ~TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + + if (!ieee80211_is_mgmt(fc)) + return; + + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { + case cpu_to_le16(IEEE80211_STYPE_AUTH): + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; + *tx_flags |= TX_CMD_FLG_CTS_MSK; + break; + } } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { *tx_flags &= ~TX_CMD_FLG_RTS_MSK; *tx_flags |= TX_CMD_FLG_CTS_MSK; + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; } } EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag); + static bool is_single_rx_stream(struct iwl_priv *priv) { return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || @@ -1294,51 +1311,6 @@ out: EXPORT_SYMBOL(iwl_apm_init); - -void iwl_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - u64 multicast) -{ - struct iwl_priv *priv = hw->priv; - __le32 filter_or = 0, filter_nand = 0; - -#define CHK(test, flag) do { \ - if (*total_flags & (test)) \ - filter_or |= (flag); \ - else \ - filter_nand |= (flag); \ - } while (0) - - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", - changed_flags, *total_flags); - - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); - -#undef CHK - - mutex_lock(&priv->mutex); - - priv->staging_rxon.filter_flags &= ~filter_nand; - priv->staging_rxon.filter_flags |= filter_or; - - iwlcore_commit_rxon(priv); - - mutex_unlock(&priv->mutex); - - /* - * Receiving all multicast frames is always enabled by the - * default flags setup in iwl_connection_init_rx_config() - * since we currently do not support programming multicast - * filters into the device. - */ - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; -} -EXPORT_SYMBOL(iwl_configure_filter); - int iwl_set_hw_params(struct iwl_priv *priv) { priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; @@ -1936,6 +1908,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; else priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; + if (bss_conf->use_cts_prot) + priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; + else + priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; } if (changes & BSS_CHANGED_BASIC_RATES) { diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 31775bd..e8ef317 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -102,8 +102,9 @@ struct iwl_hcmd_utils_ops { u32 min_average_noise, u8 default_chain); void (*chain_noise_reset)(struct iwl_priv *priv); - void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, - __le32 *tx_flags); + void (*rts_tx_cmd_flag)(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags); int (*calc_rssi)(struct iwl_priv *priv, struct iwl_rx_phy_res *rx_resp); void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); @@ -355,9 +356,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, u32 decrypt_res, struct ieee80211_rx_status *stats); void iwl_irq_handle_error(struct iwl_priv *priv); -void iwl_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, u64 multicast); int iwl_set_hw_params(struct iwl_priv *priv); void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); void iwl_bss_info_changed(struct ieee80211_hw *hw, @@ -375,8 +373,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif); void iwl_mac_reset_tsf(struct ieee80211_hw *hw); int iwl_alloc_txq_mem(struct iwl_priv *priv); void iwl_free_txq_mem(struct iwl_priv *priv); -void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, - __le32 *tx_flags); +void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv, + struct ieee80211_tx_info *info, + __le16 fc, __le32 *tx_flags); #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_alloc_traffic_mem(struct iwl_priv *priv); void iwl_free_traffic_mem(struct iwl_priv *priv); diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index ae7319b..4cf864c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -193,7 +193,7 @@ TRACE_EVENT(iwlwifi_dev_tx, __entry->framelen = buf0_len + buf1_len; memcpy(__get_dynamic_array(tfd), tfd, tfdlen); memcpy(__get_dynamic_array(buf0), buf0, buf0_len); - memcpy(__get_dynamic_array(buf1), buf1, buf0_len); + memcpy(__get_dynamic_array(buf1), buf1, buf1_len); ), TP_printk("[%p] TX %.2x (%zu bytes)", __entry->priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h index 3ff6b9d..2f7ea66 100644 --- a/drivers/net/wireless/iwlwifi/iwl-helpers.h +++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h @@ -163,6 +163,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv) IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); } +static inline void iwl_enable_rfkill_int(struct iwl_priv *priv) +{ + IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n"); + iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); +} + static inline void iwl_enable_interrupts(struct iwl_priv *priv) { IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index 386c5f9..e1af9fd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c @@ -420,11 +420,10 @@ void iwl_bg_scan_check(struct work_struct *data) return; mutex_lock(&priv->mutex); - if (test_bit(STATUS_SCANNING, &priv->status) || - test_bit(STATUS_SCAN_ABORTING, &priv->status)) { - IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting " - "adapter (%dms)\n", - jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); + if (test_bit(STATUS_SCANNING, &priv->status) && + !test_bit(STATUS_SCAN_ABORTING, &priv->status)) { + IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n", + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) iwl_send_scan_abort(priv); @@ -489,12 +488,11 @@ void iwl_bg_abort_scan(struct work_struct *work) !test_bit(STATUS_GEO_CONFIGURED, &priv->status)) return; - mutex_lock(&priv->mutex); - - cancel_delayed_work_sync(&priv->scan_check); - set_bit(STATUS_SCAN_ABORTING, &priv->status); - iwl_send_scan_abort(priv); + cancel_delayed_work(&priv->scan_check); + mutex_lock(&priv->mutex); + if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) + iwl_send_scan_abort(priv); mutex_unlock(&priv->mutex); } EXPORT_SYMBOL(iwl_bg_abort_scan); diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index a27872d..39c0d2d 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -434,10 +434,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; } - priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); - - if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) - tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; + priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags); tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); if (ieee80211_is_mgmt(fc)) { @@ -3465,6 +3462,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, return 0; } + +static void iwl3945_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + u64 multicast) +{ + struct iwl_priv *priv = hw->priv; + __le32 filter_or = 0, filter_nand = 0; + +#define CHK(test, flag) do { \ + if (*total_flags & (test)) \ + filter_or |= (flag); \ + else \ + filter_nand |= (flag); \ + } while (0) + + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", + changed_flags, *total_flags); + + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); + +#undef CHK + + mutex_lock(&priv->mutex); + + priv->staging_rxon.filter_flags &= ~filter_nand; + priv->staging_rxon.filter_flags |= filter_or; + + /* + * Committing directly here breaks for some reason, + * but we'll eventually commit the filter flags + * change anyway. + */ + + mutex_unlock(&priv->mutex); + + /* + * Receiving all multicast frames is always enabled by the + * default flags setup in iwl_connection_init_rx_config() + * since we currently do not support programming multicast + * filters into the device. + */ + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; +} + + /***************************************************************************** * * sysfs attributes @@ -3870,7 +3916,7 @@ static struct ieee80211_ops iwl3945_hw_ops = { .add_interface = iwl_mac_add_interface, .remove_interface = iwl_mac_remove_interface, .config = iwl_mac_config, - .configure_filter = iwl_configure_filter, + .configure_filter = iwl3945_configure_filter, .set_key = iwl3945_mac_set_key, .conf_tx = iwl_mac_conf_tx, .reset_tsf = iwl_mac_reset_tsf, diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index ca71f08..87c7aca 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -1813,6 +1813,12 @@ static int __orinoco_commit(struct orinoco_private *priv) struct net_device *dev = priv->ndev; int err = 0; + /* If we've called commit, we are reconfiguring or bringing the + * interface up. Maintaining countermeasures across this would + * be confusing, so note that we've disabled them. The port will + * be enabled later in orinoco_commit or __orinoco_up. */ + priv->tkip_cm_active = 0; + err = orinoco_hw_program_rids(priv); /* FIXME: what about netif_tx_lock */ diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 5775124..77b2d7b 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c @@ -904,10 +904,10 @@ static int orinoco_ioctl_set_auth(struct net_device *dev, */ if (param->value) { priv->tkip_cm_active = 1; - ret = hermes_enable_port(hw, 0); + ret = hermes_disable_port(hw, 0); } else { priv->tkip_cm_active = 0; - ret = hermes_disable_port(hw, 0); + ret = hermes_enable_port(hw, 0); } break; diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c index 187e263..53d0f20 100644 --- a/drivers/net/wireless/p54/eeprom.c +++ b/drivers/net/wireless/p54/eeprom.c @@ -262,8 +262,10 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev) list->max_entries = max_channel_num; list->channels = kzalloc(sizeof(struct p54_channel_entry) * max_channel_num, GFP_KERNEL); - if (!list->channels) + if (!list->channels) { + ret = -ENOMEM; goto free; + } for (i = 0; i < max_channel_num; i++) { if (i < priv->iq_autocal_len) { diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index a5ea89c..a66b39b 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, while (i != idx) { u16 len; struct sk_buff *skb; + dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; @@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, len = priv->common.rx_mtu; } + dma_addr = le32_to_cpu(desc->host_addr); + pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { - pci_unmap_single(priv->pdev, - le32_to_cpu(desc->host_addr), - priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + pci_unmap_single(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); rx_buf[i] = NULL; - desc->host_addr = 0; + desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); + pci_dma_sync_single_for_device(priv->pdev, dma_addr, + priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 7307325..e3924a9 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -33,8 +33,18 @@ MODULE_ALIAS("prism54usb"); MODULE_FIRMWARE("isl3886usb"); MODULE_FIRMWARE("isl3887usb"); +/* + * Note: + * + * Always update our wiki's device list (located at: + * http://wireless.kernel.org/en/users/Drivers/p54/devices ), + * whenever you add a new device. + */ + static struct usb_device_id p54u_table[] __devinitdata = { /* Version 1 devices (pci chip + net2280) */ + {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ + {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ @@ -46,12 +56,20 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ + {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */ {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ + {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ + {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ + {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ + {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ + {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ + {USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */ {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ + {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */ {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ @@ -60,6 +78,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ + {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ @@ -79,7 +98,10 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ + {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ + {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ + {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ @@ -930,8 +952,8 @@ static int __devinit p54u_probe(struct usb_interface *intf, #ifdef CONFIG_PM /* ISL3887 needs a full reset on resume */ udev->reset_resume = 1; +#endif /* CONFIG_PM */ err = p54u_device_reset(dev); -#endif priv->hw_type = P54U_3887; dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr); diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c index 4e68910..7ef1e61 100644 --- a/drivers/net/wireless/p54/txrx.c +++ b/drivers/net/wireless/p54/txrx.c @@ -444,7 +444,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && - (!payload->status)) + !(payload->status & P54_TX_FAILED)) info->flags |= IEEE80211_TX_STAT_ACK; if (payload->status & P54_TX_PSM_CANCELLED) info->flags |= IEEE80211_TX_STAT_TX_FILTERED; @@ -616,7 +616,7 @@ static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, else *burst_possible = false; - if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; if (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE) @@ -702,7 +702,7 @@ int p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb) struct p54_tx_info *p54info; struct p54_hdr *hdr; struct p54_tx_data *txhdr; - unsigned int padding, len, extra_len; + unsigned int padding, len, extra_len = 0; int i, j, ridx; u16 hdr_flags = 0, aid = 0; u8 rate, queue = 0, crypt_offset = 0; diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index ad2c98a..f57f0df 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1462,8 +1462,10 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); - for (i = 0; i < 14; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 0; i < 14; i++) { + info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER); + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } return 0; } diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 41da3d2..fec3d9e 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1780,12 +1780,16 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); - for (i = 0; i < 14; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 0; i < 14; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } if (spec->num_channels > 14) { - for (i = 14; i < spec->num_channels; i++) - info[i].tx_power1 = DEFAULT_TXPOWER; + for (i = 14; i < spec->num_channels; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = DEFAULT_TXPOWER; + } } return 0; diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c index 9ae96a6..9e8b33d 100644 --- a/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/rt2x00/rt2500usb.c @@ -1690,12 +1690,16 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); - for (i = 0; i < 14; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 0; i < 14; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } if (spec->num_channels > 14) { - for (i = 14; i < spec->num_channels; i++) - info[i].tx_power1 = DEFAULT_TXPOWER; + for (i = 14; i < spec->num_channels; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = DEFAULT_TXPOWER; + } } return 0; diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h index 2aa0375..b72161c 100644 --- a/drivers/net/wireless/rt2x00/rt2800.h +++ b/drivers/net/wireless/rt2x00/rt2800.h @@ -1770,6 +1770,13 @@ struct mac_iveiv_entry { #define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00) /* + * EEPROM Maximum TX power values + */ +#define EEPROM_MAX_TX_POWER 0x0027 +#define EEPROM_MAX_TX_POWER_24GHZ FIELD16(0x00ff) +#define EEPROM_MAX_TX_POWER_5GHZ FIELD16(0xff00) + +/* * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. * This is delta in 40MHZ. * VALUE: Tx Power dalta value (MAX=4) diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index db4250d..27c13d7 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -840,27 +840,27 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev, * double meaning, and we should set a 7DBm boost flag. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST, - (info->tx_power1 >= 0)); + (info->default_power1 >= 0)); - if (info->tx_power1 < 0) - info->tx_power1 += 7; + if (info->default_power1 < 0) + info->default_power1 += 7; rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, - TXPOWER_A_TO_DEV(info->tx_power1)); + TXPOWER_A_TO_DEV(info->default_power1)); rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST, - (info->tx_power2 >= 0)); + (info->default_power2 >= 0)); - if (info->tx_power2 < 0) - info->tx_power2 += 7; + if (info->default_power2 < 0) + info->default_power2 += 7; rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, - TXPOWER_A_TO_DEV(info->tx_power2)); + TXPOWER_A_TO_DEV(info->default_power2)); } else { rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, - TXPOWER_G_TO_DEV(info->tx_power1)); + TXPOWER_G_TO_DEV(info->default_power1)); rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, - TXPOWER_G_TO_DEV(info->tx_power2)); + TXPOWER_G_TO_DEV(info->default_power2)); } rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf)); @@ -900,13 +900,11 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, - TXPOWER_G_TO_DEV(info->tx_power1)); + rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1); rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr); - rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, - TXPOWER_G_TO_DEV(info->tx_power2)); + rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2); rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); @@ -2226,6 +2224,13 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) default_lna_gain); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); + rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &word); + if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_24GHZ) == 0xff) + rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_24GHZ, MAX_G_TXPOWER); + if (rt2x00_get_field16(word, EEPROM_MAX_TX_POWER_5GHZ) == 0xff) + rt2x00_set_field16(&word, EEPROM_MAX_TX_POWER_5GHZ, MAX_A_TXPOWER); + rt2x00_eeprom_write(rt2x00dev, EEPROM_MAX_TX_POWER, word); + return 0; } EXPORT_SYMBOL_GPL(rt2800_validate_eeprom); @@ -2466,9 +2471,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *tx_power1; - char *tx_power2; + char *default_power1; + char *default_power2; unsigned int i; + unsigned short max_power; u16 eeprom; /* @@ -2566,21 +2572,26 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; - tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); - tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); + rt2x00_eeprom_read(rt2x00dev, EEPROM_MAX_TX_POWER, &eeprom); + max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_24GHZ); + default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); + default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); for (i = 0; i < 14; i++) { - info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]); - info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]); + info[i].max_power = max_power; + info[i].default_power1 = TXPOWER_G_FROM_DEV(default_power1[i]); + info[i].default_power2 = TXPOWER_G_FROM_DEV(default_power2[i]); } if (spec->num_channels > 14) { - tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); - tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); + max_power = rt2x00_get_field16(eeprom, EEPROM_MAX_TX_POWER_5GHZ); + default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); + default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); for (i = 14; i < spec->num_channels; i++) { - info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]); - info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]); + info[i].max_power = max_power; + info[i].default_power1 = TXPOWER_A_FROM_DEV(default_power1[i]); + info[i].default_power2 = TXPOWER_A_FROM_DEV(default_power2[i]); } } diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h index 6c1ff4c..0d1ff9d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00.h +++ b/drivers/net/wireless/rt2x00/rt2x00.h @@ -211,8 +211,9 @@ struct channel_info { unsigned int flags; #define GEOGRAPHY_ALLOWED 0x00000001 - short tx_power1; - short tx_power2; + short max_power; + short default_power1; + short default_power2; }; /* diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c index f20d3ee..f444f0b 100644 --- a/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -605,7 +605,7 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, for (i = 0; i < spec->num_channels; i++) { rt2x00lib_channel(&channels[i], spec->channels[i].channel, - spec->channels_info[i].tx_power1, i); + spec->channels_info[i].max_power, i); } /* diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 6a74baf..766032e 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2605,13 +2605,17 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); - for (i = 0; i < 14; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 0; i < 14; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } if (spec->num_channels > 14) { tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); - for (i = 14; i < spec->num_channels; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 14; i < spec->num_channels; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } } return 0; diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 6e0d82e..47a795c 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2092,13 +2092,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); - for (i = 0; i < 14; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 0; i < 14; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } if (spec->num_channels > 14) { tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); - for (i = 14; i < spec->num_channels; i++) - info[i].tx_power1 = TXPOWER_FROM_DEV(tx_power[i]); + for (i = 14; i < spec->num_channels; i++) { + info[i].max_power = MAX_TXPOWER; + info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + } } return 0; @@ -2388,6 +2392,7 @@ static struct usb_device_id rt73usb_device_table[] = { { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2671), USB_DEVICE_DATA(&rt73usb_ops) }, + { USB_DEVICE(0x0812, 0x3101), USB_DEVICE_DATA(&rt73usb_ops) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6196), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x18e8, 0x6229), USB_DEVICE_DATA(&rt73usb_ops) }, diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c index 515817d..15f09e8 100644 --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c @@ -688,6 +688,8 @@ void rtl8180_beacon_work(struct work_struct *work) /* grab a fresh beacon */ skb = ieee80211_beacon_get(dev, vif); + if (!skb) + goto resched; /* * update beacon timestamp w/ TSF value diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c index a37b30c..ce3722f 100644 --- a/drivers/net/wireless/wl12xx/wl1251_cmd.c +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c @@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout) cmd->timeout = timeout; - ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); + ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd)); if (ret < 0) { wl1251_error("cmd trigger scan to failed: %d", ret); goto out; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d504e2b..42dad59 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,8 +66,8 @@ struct netfront_cb { #define GRANT_INVALID_REF 0 -#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) -#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) +#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) +#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) struct netfront_info { @@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev, if (xennet_connect(netdev) != 0) break; xenbus_switch_state(dev, XenbusStateConnected); + netif_notify_peers(netdev); break; case XenbusStateClosing: diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index a9352b2..b7e755f 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -141,16 +141,6 @@ static struct notifier_block module_load_nb = { .notifier_call = module_load_notify, }; - -static void end_sync(void) -{ - end_cpu_work(); - /* make sure we don't leak task structs */ - process_task_mortuary(); - process_task_mortuary(); -} - - int sync_start(void) { int err; @@ -158,7 +148,7 @@ int sync_start(void) if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) return -ENOMEM; - start_cpu_work(); + mutex_lock(&buffer_mutex); err = task_handoff_register(&task_free_nb); if (err) @@ -173,7 +163,10 @@ int sync_start(void) if (err) goto out4; + start_cpu_work(); + out: + mutex_unlock(&buffer_mutex); return err; out4: profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); @@ -182,7 +175,6 @@ out3: out2: task_handoff_unregister(&task_free_nb); out1: - end_sync(); free_cpumask_var(marked_cpus); goto out; } @@ -190,11 +182,20 @@ out1: void sync_stop(void) { + /* flush buffers */ + mutex_lock(&buffer_mutex); + end_cpu_work(); unregister_module_notifier(&module_load_nb); profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); task_handoff_unregister(&task_free_nb); - end_sync(); + mutex_unlock(&buffer_mutex); + flush_scheduled_work(); + + /* make sure we don't leak task structs */ + process_task_mortuary(); + process_task_mortuary(); + free_cpumask_var(marked_cpus); } diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 219f79e..f179ac2 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -120,8 +120,6 @@ void end_cpu_work(void) cancel_delayed_work(&b->work); } - - flush_scheduled_work(); } /* diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index dc0ae4d..0107251 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -21,6 +21,7 @@ #include "oprof.h" static DEFINE_PER_CPU(struct hrtimer, oprofile_hrtimer); +static int ctr_running; static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer) { @@ -33,6 +34,9 @@ static void __oprofile_hrtimer_start(void *unused) { struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer); + if (!ctr_running) + return; + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = oprofile_hrtimer_notify; @@ -42,7 +46,10 @@ static void __oprofile_hrtimer_start(void *unused) static int oprofile_hrtimer_start(void) { + get_online_cpus(); + ctr_running = 1; on_each_cpu(__oprofile_hrtimer_start, NULL, 1); + put_online_cpus(); return 0; } @@ -50,6 +57,9 @@ static void __oprofile_hrtimer_stop(int cpu) { struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); + if (!ctr_running) + return; + hrtimer_cancel(hrtimer); } @@ -57,8 +67,11 @@ static void oprofile_hrtimer_stop(void) { int cpu; + get_online_cpus(); for_each_online_cpu(cpu) __oprofile_hrtimer_stop(cpu); + ctr_running = 0; + put_online_cpus(); } static int __cpuinit oprofile_cpu_notify(struct notifier_block *self, diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 188bc84..d02be78 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -176,16 +176,18 @@ static ssize_t led_proc_write(struct file *file, const char *buf, size_t count, loff_t *pos) { void *data = PDE(file->f_path.dentry->d_inode)->data; - char *cur, lbuf[count + 1]; + char *cur, lbuf[32]; int d; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - memset(lbuf, 0, count + 1); + if (count >= sizeof(lbuf)) + count = sizeof(lbuf)-1; if (copy_from_user(lbuf, buf, count)) return -EFAULT; + lbuf[count] = 0; cur = lbuf; diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0a19708..a286959 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -1414,6 +1414,11 @@ int __init enable_drhd_fault_handling(void) (unsigned long long)drhd->reg_base_addr, ret); return -1; } + + /* + * Clear any previous faults. + */ + dmar_fault(iommu->irq, iommu); } return 0; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index cb23aa2..e610cfe 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -212,6 +212,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) pdev = pci_get_slot(pbus, PCI_DEVFN(device, function)); if (pdev) { + pdev->current_state = PCI_D0; slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); pci_dev_put(pdev); } diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index c9171be..46fcc59 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -71,6 +71,49 @@ #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) +/* page table handling */ +#define LEVEL_STRIDE (9) +#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) + +static inline int agaw_to_level(int agaw) +{ + return agaw + 2; +} + +static inline int agaw_to_width(int agaw) +{ + return 30 + agaw * LEVEL_STRIDE; +} + +static inline int width_to_agaw(int width) +{ + return (width - 30) / LEVEL_STRIDE; +} + +static inline unsigned int level_to_offset_bits(int level) +{ + return (level - 1) * LEVEL_STRIDE; +} + +static inline int pfn_level_offset(unsigned long pfn, int level) +{ + return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; +} + +static inline unsigned long level_mask(int level) +{ + return -1UL << level_to_offset_bits(level); +} + +static inline unsigned long level_size(int level) +{ + return 1UL << level_to_offset_bits(level); +} + +static inline unsigned long align_to_level(unsigned long pfn, int level) +{ + return (pfn + level_size(level) - 1) & level_mask(level); +} /* VT-d pages must always be _smaller_ than MM pages. Otherwise things are never going to work. */ @@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova) } -static inline int width_to_agaw(int width); - static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) { unsigned long sagaw; @@ -646,51 +687,6 @@ out: spin_unlock_irqrestore(&iommu->lock, flags); } -/* page table handling */ -#define LEVEL_STRIDE (9) -#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) - -static inline int agaw_to_level(int agaw) -{ - return agaw + 2; -} - -static inline int agaw_to_width(int agaw) -{ - return 30 + agaw * LEVEL_STRIDE; - -} - -static inline int width_to_agaw(int width) -{ - return (width - 30) / LEVEL_STRIDE; -} - -static inline unsigned int level_to_offset_bits(int level) -{ - return (level - 1) * LEVEL_STRIDE; -} - -static inline int pfn_level_offset(unsigned long pfn, int level) -{ - return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; -} - -static inline unsigned long level_mask(int level) -{ - return -1UL << level_to_offset_bits(level); -} - -static inline unsigned long level_size(int level) -{ - return 1UL << level_to_offset_bits(level); -} - -static inline unsigned long align_to_level(unsigned long pfn, int level) -{ - return (pfn + level_size(level) - 1) & level_mask(level); -} - static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, unsigned long pfn) { @@ -1839,7 +1835,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ret = iommu_attach_domain(domain, iommu); if (ret) { - domain_exit(domain); + free_domain_mem(domain); goto error; } @@ -3030,6 +3026,34 @@ static void __init iommu_exit_mempool(void) } +static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + u32 vtbar; + int rc; + + /* We know that this device on this chipset has its own IOMMU. + * If we find it under a different IOMMU, then the BIOS is lying + * to us. Hope that the IOMMU for this device is actually + * disabled, and it needs no translation... + */ + rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); + if (rc) { + /* "can't" happen */ + dev_info(&pdev->dev, "failed to run vt-d quirk\n"); + return; + } + vtbar &= 0xffff0000; + + /* we know that the this iommu should be at offset 0xa000 from vtbar */ + drhd = dmar_find_matched_drhd_unit(pdev); + if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, + TAINT_FIRMWARE_WORKAROUND, + "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) + pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); + static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -3236,9 +3260,15 @@ static int device_notifier(struct notifier_block *nb, if (!domain) return 0; - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) + if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { domain_remove_one_dev_info(domain, pdev); + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && + list_empty(&domain->devices)) + domain_exit(domain); + } + return 0; } @@ -3387,6 +3417,11 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, domain->iommu_count--; domain_update_iommu_cap(domain); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); + + spin_lock_irqsave(&iommu->lock, tmp_flags); + clear_bit(domain->id, iommu->domain_ids); + iommu->domains[domain->id] = NULL; + spin_unlock_irqrestore(&iommu->lock, tmp_flags); } spin_unlock_irqrestore(&device_domain_lock, flags); @@ -3603,9 +3638,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, pte = dmar_domain->pgd; if (dma_pte_present(pte)) { - free_pgtable_page(dmar_domain->pgd); dmar_domain->pgd = (struct dma_pte *) phys_to_virt(dma_pte_addr(pte)); + free_pgtable_page(pte); } dmar_domain->agaw--; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 77b68ea..cd22d69 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -196,6 +196,9 @@ void unmask_msi_irq(unsigned int irq) void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) { struct msi_desc *entry = get_irq_desc_msi(desc); + + BUG_ON(entry->dev->current_state != PCI_D0); + if (entry->msi_attrib.is_msix) { void __iomem *base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; @@ -229,10 +232,32 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg) read_msi_msg_desc(desc, msg); } +void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +{ + struct msi_desc *entry = get_irq_desc_msi(desc); + + /* Assert that the cache is valid, assuming that + * valid messages are not all-zeroes. */ + BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | + entry->msg.data)); + + *msg = entry->msg; +} + +void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) +{ + struct irq_desc *desc = irq_to_desc(irq); + + get_cached_msi_msg_desc(desc, msg); +} + void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) { struct msi_desc *entry = get_irq_desc_msi(desc); - if (entry->msi_attrib.is_msix) { + + if (entry->dev->current_state != PCI_D0) { + /* Don't touch the hardware now */ + } else if (entry->msi_attrib.is_msix) { void __iomem *base; base = entry->mask_base + entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index f7b68ca..4ae494b 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c @@ -54,6 +54,9 @@ static int __init pci_stub_init(void) subdevice = PCI_ANY_ID, class=0, class_mask=0; int fields; + if (!strlen(id)) + continue; + fields = sscanf(id, "%x:%x:%x:%x:%x:%x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index c9957f6..d4ad1bd 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b) #ifdef HAVE_PCI_MMAP -int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) +int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, + enum pci_mmap_api mmap_api) { - unsigned long nr, start, size; + unsigned long nr, start, size, pci_start; + if (pci_resource_len(pdev, resno) == 0) + return 0; nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; start = vma->vm_pgoff; size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; - if (start < size && size - start >= nr) + pci_start = (mmap_api == PCI_MMAP_PROCFS) ? + pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; + if (start >= pci_start && start < pci_start + size && + start + nr <= pci_start + size) return 1; - WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", - current->comm, start, start+nr, pci_name(pdev), resno, size); return 0; } @@ -745,8 +749,14 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; - if (!pci_mmap_fits(pdev, i, vma)) + if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { + WARN(1, "process \"%s\" tried to map 0x%08lx bytes " + "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", + current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, + pci_name(pdev), i, + pci_resource_start(pdev, i), pci_resource_len(pdev, i)); return -EINVAL; + } /* pci_mmap_page_range() expects the same kind of entry as coming * from /proc/bus/pci/ which is a "user visible" value. If this is @@ -1008,7 +1018,7 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev) attr->write = write_vpd_attr; retval = sysfs_create_bin_file(&dev->dev.kobj, attr); if (retval) { - kfree(dev->vpd->attr); + kfree(attr); return retval; } dev->vpd->attr = attr; diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f8077b3..7c5c239 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -13,8 +13,13 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev); extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern void pci_cleanup_rom(struct pci_dev *dev); #ifdef HAVE_PCI_MMAP +enum pci_mmap_api { + PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices//resource */ + PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/ */ +}; extern int pci_mmap_fits(struct pci_dev *pdev, int resno, - struct vm_area_struct *vma); + struct vm_area_struct *vmai, + enum pci_mmap_api mmap_api); #endif int pci_probe_reset_function(struct pci_dev *dev); diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 449e890..64ac30b 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -260,7 +260,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) /* Make sure the caller is mapping a real resource for this device */ for (i = 0; i < PCI_ROM_RESOURCE; i++) { - if (pci_mmap_fits(dev, i, vma)) + if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) break; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 477345d..06b234f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -499,6 +499,17 @@ static void __devinit quirk_piix4_acpi(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); +#define ICH_PMBASE 0x40 +#define ICH_ACPI_CNTL 0x44 +#define ICH4_ACPI_EN 0x10 +#define ICH6_ACPI_EN 0x80 +#define ICH4_GPIOBASE 0x58 +#define ICH4_GPIO_CNTL 0x5c +#define ICH4_GPIO_EN 0x10 +#define ICH6_GPIOBASE 0x48 +#define ICH6_GPIO_CNTL 0x4c +#define ICH6_GPIO_EN 0x10 + /* * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at * 0x40 (128 bytes of ACPI, GPIO & TCO registers) @@ -507,12 +518,33 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, qui static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) { u32 region; + u8 enable; - pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); + /* + * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict + * with low legacy (and fixed) ports. We don't know the decoding + * priority and can't tell whether the legacy device or the one created + * here is really at that address. This happens on boards with broken + * BIOSes. + */ + + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); + if (enable & ICH4_ACPI_EN) { + pci_read_config_dword(dev, ICH_PMBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, + "ICH4 ACPI/GPIO/TCO"); + } - pci_read_config_dword(dev, 0x58, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); + pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); + if (enable & ICH4_GPIO_EN) { + pci_read_config_dword(dev, ICH4_GPIOBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 64, + PCI_BRIDGE_RESOURCES + 1, "ICH4 GPIO"); + } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); @@ -528,12 +560,25 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, qui static void __devinit ich6_lpc_acpi_gpio(struct pci_dev *dev) { u32 region; + u8 enable; - pci_read_config_dword(dev, 0x40, ®ion); - quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); + pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); + if (enable & ICH6_ACPI_EN) { + pci_read_config_dword(dev, ICH_PMBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, + "ICH6 ACPI/GPIO/TCO"); + } - pci_read_config_dword(dev, 0x48, ®ion); - quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); + pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); + if (enable & ICH4_GPIO_EN) { + pci_read_config_dword(dev, ICH6_GPIOBASE, ®ion); + region &= PCI_BASE_ADDRESS_IO_MASK; + if (region >= PCIBIOS_MIN_IO) + quirk_io_region(dev, region, 64, + PCI_BRIDGE_RESOURCES + 1, "ICH6 GPIO"); + } } static void __devinit ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize) @@ -2115,6 +2160,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disabl DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi); /* Disable MSI on chipsets that are known to not support it */ static void __devinit quirk_disable_msi(struct pci_dev *dev) @@ -2390,6 +2436,9 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all) int pos; int found; + if (!pci_msi_enabled()) + return; + /* check if there is HT MSI cap or enabled on this device */ found = ht_check_msi_mapping(dev); @@ -2654,6 +2703,29 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_m DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ +#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) +#define VTUNCERRMSK_REG 0x1ac +#define VTD_MSK_SPEC_ERRORS (1 << 31) +/* + * This is a quirk for masking vt-d spec defined errors to platform error + * handling logic. With out this, platforms using Intel 7500, 5500 chipsets + * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based + * on the RAS config settings of the platform) when a vt-d fault happens. + * The resulting SMI caused the system to hang. + * + * VT-d spec related errors are already handled by the VT-d OS code, so no + * need to report the same error through other channels. + */ +static void vtd_mask_spec_errors(struct pci_dev *dev) +{ + u32 word; + + pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); + pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); +#endif static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index a4cd9ad..015e274 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -651,7 +651,7 @@ EXPORT_SYMBOL(__pcmcia_request_exclusive_irq); #ifdef CONFIG_PCMCIA_PROBE /* mask of IRQs already reserved by other cards, we should avoid using them */ -static u8 pcmcia_used_irq[NR_IRQS]; +static u8 pcmcia_used_irq[32]; static irqreturn_t test_action(int cpl, void *dev_id) { @@ -674,6 +674,9 @@ static int pcmcia_setup_isa_irq(struct pcmcia_device *p_dev, int type) for (try = 0; try < 64; try++) { irq = try % 32; + if (irq > NR_IRQS) + continue; + /* marked as available by driver, not blocked by userspace? */ if (!((mask >> irq) & 1)) continue; diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 6f1a86b..fd4c25a 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -65,6 +65,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, va_end(args); } } +EXPORT_SYMBOL(soc_pcmcia_debug); #endif diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 3e1b8a2..fe7d670 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -520,6 +520,7 @@ config TOSHIBA_BT_RFKILL config ACPI_CMPC tristate "CMPC Laptop Extras" depends on X86 && ACPI + depends on RFKILL || RFKILL=n select INPUT select BACKLIGHT_CLASS_DEVICE default n diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 1ea6c43..7b32e9c 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -91,7 +91,7 @@ struct acer_quirks { */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" -#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" +#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); @@ -1066,7 +1066,7 @@ static ssize_t set_bool_threeg(struct device *dev, return -EINVAL; return count; } -static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, +static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index efe8f63..1fccc85 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -1064,9 +1064,9 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr, */ static int asus_gps_rfkill_set(void *data, bool blocked) { - acpi_handle handle = data; + struct asus_laptop *asus = data; - return asus_gps_switch(handle, !blocked); + return asus_gps_switch(asus, !blocked); } static const struct rfkill_ops asus_gps_rfkill_ops = { @@ -1093,7 +1093,7 @@ static int asus_rfkill_init(struct asus_laptop *asus) asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev, RFKILL_TYPE_GPS, - &asus_gps_rfkill_ops, NULL); + &asus_gps_rfkill_ops, asus); if (!asus->gps_rfkill) return -EINVAL; diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 92fd30c..9727b8e 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1079,14 +1079,8 @@ static int asus_hotk_add_fs(struct acpi_device *device) struct proc_dir_entry *proc; mode_t mode; - /* - * If parameter uid or gid is not changed, keep the default setting for - * our proc entries (-rw-rw-rw-) else, it means we care about security, - * and then set to -rw-rw---- - */ - if ((asus_uid == 0) && (asus_gid == 0)) { - mode = S_IFREG | S_IRUGO | S_IWUGO; + mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; } else { mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; printk(KERN_WARNING " asus_uid and asus_gid parameters are " diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 71ff154..90111d7 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -259,6 +259,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = { .callback = dmi_check_cb }, { + .ident = "Dell Mini 1012", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + .callback = dmi_check_cb + }, + { .ident = "Dell Inspiron 11z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), @@ -375,5 +383,6 @@ MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*"); +MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*"); MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*"); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 661e3ac..6110601 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -116,6 +116,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = { }, }, { + .ident = "Dell Mini 1012", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), + }, + }, + { .ident = "Dell Inspiron 11z", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 66f53c3..12a8e6f 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -221,7 +221,7 @@ static void dell_wmi_notify(u32 value, void *context) return; } - if (dell_new_hk_type) + if (dell_new_hk_type || buffer_entry[1] == 0x0) reported_key = (int)buffer_entry[2]; else reported_key = (int)buffer_entry[1] & 0xffff; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 51c07a0..f1c1862 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -58,6 +58,12 @@ enum hp_wmi_radio { HPWMI_WWAN = 2, }; +enum hp_wmi_event_ids { + HPWMI_DOCK_EVENT = 1, + HPWMI_BEZEL_BUTTON = 4, + HPWMI_WIRELESS = 5, +}; + static int __devinit hp_wmi_bios_setup(struct platform_device *device); static int __exit hp_wmi_bios_remove(struct platform_device *device); static int hp_wmi_resume_handler(struct device *device); @@ -338,7 +344,7 @@ static void hp_wmi_notify(u32 value, void *context) struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; static struct key_entry *key; union acpi_object *obj; - int eventcode; + int eventcode, key_code; acpi_status status; status = wmi_get_event_data(value, &response); @@ -357,28 +363,32 @@ static void hp_wmi_notify(u32 value, void *context) eventcode = *((u8 *) obj->buffer.pointer); kfree(obj); - if (eventcode == 0x4) - eventcode = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, - 0); - key = hp_wmi_get_entry_by_scancode(eventcode); - if (key) { - switch (key->type) { - case KE_KEY: - input_report_key(hp_wmi_input_dev, - key->keycode, 1); - input_sync(hp_wmi_input_dev); - input_report_key(hp_wmi_input_dev, - key->keycode, 0); - input_sync(hp_wmi_input_dev); - break; - } - } else if (eventcode == 0x1) { + switch (eventcode) { + case HPWMI_DOCK_EVENT: input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state()); input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, hp_wmi_tablet_state()); input_sync(hp_wmi_input_dev); - } else if (eventcode == 0x5) { + break; + case HPWMI_BEZEL_BUTTON: + key_code = hp_wmi_perform_query(HPWMI_HOTKEY_QUERY, 0, + 0); + key = hp_wmi_get_entry_by_scancode(key_code); + if (key) { + switch (key->type) { + case KE_KEY: + input_report_key(hp_wmi_input_dev, + key->keycode, 1); + input_sync(hp_wmi_input_dev); + input_report_key(hp_wmi_input_dev, + key->keycode, 0); + input_sync(hp_wmi_input_dev); + break; + } + } + break; + case HPWMI_WIRELESS: if (wifi_rfkill) rfkill_set_states(wifi_rfkill, hp_wmi_get_sw_state(HPWMI_WIFI), @@ -391,9 +401,12 @@ static void hp_wmi_notify(u32 value, void *context) rfkill_set_states(wwan_rfkill, hp_wmi_get_sw_state(HPWMI_WWAN), hp_wmi_get_hw_state(HPWMI_WWAN)); - } else + break; + default: printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n", eventcode); + break; + } } static int __init hp_wmi_input_setup(void) diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index 1fe0f1f..865ef78 100644 --- a/drivers/platform/x86/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c @@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \ return -EINVAL; \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ show_bool_##value, set_bool_##value); show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 4bdb137..babc4d4 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -1911,6 +1911,17 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */ TP_ACPI_HOTKEYSCAN_VOLUMEDOWN, TP_ACPI_HOTKEYSCAN_MUTE, TP_ACPI_HOTKEYSCAN_THINKPAD, + TP_ACPI_HOTKEYSCAN_UNK1, + TP_ACPI_HOTKEYSCAN_UNK2, + TP_ACPI_HOTKEYSCAN_UNK3, + TP_ACPI_HOTKEYSCAN_UNK4, + TP_ACPI_HOTKEYSCAN_UNK5, + TP_ACPI_HOTKEYSCAN_UNK6, + TP_ACPI_HOTKEYSCAN_UNK7, + TP_ACPI_HOTKEYSCAN_UNK8, + + /* Hotkey keymap size */ + TPACPI_HOTKEY_MAP_LEN }; enum { /* Keys/events available through NVRAM polling */ @@ -3082,6 +3093,9 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ }; +typedef u16 tpacpi_keymap_entry_t; +typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; + static int __init hotkey_init(struct ibm_init_struct *iibm) { /* Requirements for changing the default keymaps: @@ -3113,9 +3127,17 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) * If the above is too much to ask, don't change the keymap. * Ask the thinkpad-acpi maintainer to do it, instead. */ - static u16 ibm_keycode_map[] __initdata = { + + enum keymap_index { + TPACPI_KEYMAP_IBM_GENERIC = 0, + TPACPI_KEYMAP_LENOVO_GENERIC, + }; + + static const tpacpi_keymap_t tpacpi_keymaps[] __initconst = { + /* Generic keymap for IBM ThinkPads */ + [TPACPI_KEYMAP_IBM_GENERIC] = { /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ - KEY_FN_F1, KEY_FN_F2, KEY_COFFEE, KEY_SLEEP, + KEY_FN_F1, KEY_BATTERY, KEY_COFFEE, KEY_SLEEP, KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, @@ -3146,11 +3168,13 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, - }; - static u16 lenovo_keycode_map[] __initdata = { + }, + + /* Generic keymap for Lenovo ThinkPads */ + [TPACPI_KEYMAP_LENOVO_GENERIC] = { /* Scan Codes 0x00 to 0x0B: ACPI HKEY FN+F1..F12 */ KEY_FN_F1, KEY_COFFEE, KEY_BATTERY, KEY_SLEEP, - KEY_WLAN, KEY_FN_F6, KEY_SWITCHVIDEOMODE, KEY_FN_F8, + KEY_WLAN, KEY_CAMERA, KEY_SWITCHVIDEOMODE, KEY_FN_F8, KEY_FN_F9, KEY_FN_F10, KEY_FN_F11, KEY_SUSPEND, /* Scan codes 0x0C to 0x1F: Other ACPI HKEY hot keys */ @@ -3189,11 +3213,25 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) /* (assignments unknown, please report if found) */ KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, + }, }; -#define TPACPI_HOTKEY_MAP_LEN ARRAY_SIZE(ibm_keycode_map) -#define TPACPI_HOTKEY_MAP_SIZE sizeof(ibm_keycode_map) -#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(ibm_keycode_map[0]) + static const struct tpacpi_quirk tpacpi_keymap_qtable[] __initconst = { + /* Generic maps (fallback) */ + { + .vendor = PCI_VENDOR_ID_IBM, + .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, + .quirks = TPACPI_KEYMAP_IBM_GENERIC, + }, + { + .vendor = PCI_VENDOR_ID_LENOVO, + .bios = TPACPI_MATCH_ANY, .ec = TPACPI_MATCH_ANY, + .quirks = TPACPI_KEYMAP_LENOVO_GENERIC, + }, + }; + +#define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) +#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t) int res, i; int status; @@ -3202,6 +3240,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) bool tabletsw_state = false; unsigned long quirks; + unsigned long keymap_id; vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, "initializing hotkey subdriver\n"); @@ -3342,7 +3381,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) goto err_exit; /* Set up key map */ - hotkey_keycode_map = kmalloc(TPACPI_HOTKEY_MAP_SIZE, GFP_KERNEL); if (!hotkey_keycode_map) { @@ -3352,17 +3390,14 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) goto err_exit; } - if (tpacpi_is_lenovo()) { - dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, - "using Lenovo default hot key map\n"); - memcpy(hotkey_keycode_map, &lenovo_keycode_map, - TPACPI_HOTKEY_MAP_SIZE); - } else { - dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, - "using IBM default hot key map\n"); - memcpy(hotkey_keycode_map, &ibm_keycode_map, - TPACPI_HOTKEY_MAP_SIZE); - } + keymap_id = tpacpi_check_quirks(tpacpi_keymap_qtable, + ARRAY_SIZE(tpacpi_keymap_qtable)); + BUG_ON(keymap_id >= ARRAY_SIZE(tpacpi_keymaps)); + dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, + "using keymap number %lu\n", keymap_id); + + memcpy(hotkey_keycode_map, &tpacpi_keymaps[keymap_id], + TPACPI_HOTKEY_MAP_SIZE); input_set_capability(tpacpi_inputdev, EV_MSC, MSC_SCAN); tpacpi_inputdev->keycodesize = TPACPI_HOTKEY_MAP_TYPESIZE; @@ -3469,7 +3504,8 @@ static bool hotkey_notify_hotkey(const u32 hkey, *send_acpi_ev = true; *ignore_acpi_ev = false; - if (scancode > 0 && scancode < 0x21) { + /* HKEY event 0x1001 is scancode 0x00 */ + if (scancode > 0 && scancode <= TPACPI_HOTKEY_MAP_LEN) { scancode--; if (!(hotkey_source_mask & (1 << scancode))) { tpacpi_input_send_key_masked(scancode); diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index e4eaa14..0ffeb8f 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -795,7 +795,7 @@ static bool guid_already_parsed(const char *guid_string) wblock = list_entry(p, struct wmi_block, list); gblock = &wblock->gblock; - if (strncmp(gblock->guid, guid_string, 16) == 0) + if (memcmp(gblock->guid, guid_string, 16) == 0) return true; } return false; diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index f7ff628..4a4041c 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -28,7 +28,7 @@ #include "../base.h" #include "pnpacpi.h" -static int num = 0; +static int num; /* We need only to blacklist devices that have already an acpi driver that * can't use pnp layer. We don't need to blacklist device that are directly @@ -157,11 +157,24 @@ struct pnp_protocol pnpacpi_protocol = { }; EXPORT_SYMBOL(pnpacpi_protocol); +static char *pnpacpi_get_id(struct acpi_device *device) +{ + struct acpi_hardware_id *id; + + list_for_each_entry(id, &device->pnp.ids, list) { + if (ispnpidacpi(id->id)) + return id->id; + } + + return NULL; +} + static int __init pnpacpi_add_device(struct acpi_device *device) { acpi_handle temp = NULL; acpi_status status; struct pnp_dev *dev; + char *pnpid; struct acpi_hardware_id *id; /* @@ -169,11 +182,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device) * driver should not be loaded. */ status = acpi_get_handle(device->handle, "_CRS", &temp); - if (ACPI_FAILURE(status) || !ispnpidacpi(acpi_device_hid(device)) || - is_exclusive_device(device) || (!device->status.present)) + if (ACPI_FAILURE(status)) + return 0; + + pnpid = pnpacpi_get_id(device); + if (!pnpid) + return 0; + + if (is_exclusive_device(device) || !device->status.present) return 0; - dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device)); + dev = pnp_alloc_dev(&pnpacpi_protocol, num, pnpid); if (!dev) return -ENOMEM; @@ -204,7 +223,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device) pnpacpi_parse_resource_option_data(dev); list_for_each_entry(id, &device->pnp.ids, list) { - if (!strcmp(id->id, acpi_device_hid(device))) + if (!strcmp(id->id, pnpid)) continue; if (!ispnpidacpi(id->id)) continue; diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c index 936bae5..dc628cb 100644 --- a/drivers/power/apm_power.c +++ b/drivers/power/apm_power.c @@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source) empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN; now_prop = POWER_SUPPLY_PROP_ENERGY_NOW; avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG; + break; case SOURCE_VOLTAGE: full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX; empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN; diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 4d3b272..c8d26df 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -212,7 +212,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) if (di->rem_capacity > 100) di->rem_capacity = 100; - if (di->current_uA >= 100L) + if (di->current_uA < -100L) di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L) / (di->current_uA / 100L); else diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index baefcf1..efa71cd 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -271,14 +271,14 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; - val->intval = (int)be16_to_cpu(ec_word) * 9760L / 32; + val->intval = (s16)be16_to_cpu(ec_word) * 9760L / 32; break; case POWER_SUPPLY_PROP_CURRENT_AVG: ret = olpc_ec_cmd(EC_BAT_CURRENT, NULL, 0, (void *)&ec_word, 2); if (ret) return ret; - val->intval = (int)be16_to_cpu(ec_word) * 15625L / 120; + val->intval = (s16)be16_to_cpu(ec_word) * 15625L / 120; break; case POWER_SUPPLY_PROP_CAPACITY: ret = olpc_ec_cmd(EC_BAT_SOC, NULL, 0, &ec_byte, 1); @@ -299,7 +299,7 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; - val->intval = (int)be16_to_cpu(ec_word) * 100 / 256; + val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256; break; case POWER_SUPPLY_PROP_TEMP_AMBIENT: ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2); @@ -313,7 +313,7 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; - val->intval = (int)be16_to_cpu(ec_word) * 6250 / 15; + val->intval = (s16)be16_to_cpu(ec_word) * 6250 / 15; break; case POWER_SUPPLY_PROP_SERIAL_NUMBER: ret = olpc_ec_cmd(EC_BAT_SERIAL, NULL, 0, (void *)&ser_buf, 8); diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c index 5a1dc8a..03713bc 100644 --- a/drivers/regulator/wm8994-regulator.c +++ b/drivers/regulator/wm8994-regulator.c @@ -219,8 +219,6 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev) ldo->wm8994 = wm8994; - ldo->is_enabled = true; - if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) { ldo->enable = pdata->ldo[id].enable; @@ -237,7 +235,8 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev) ret); goto err_gpio; } - } + } else + ldo->is_enabled = true; ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev, pdata->ldo[id].init_data, ldo); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 11b8ea2..bf588d9 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -36,6 +36,7 @@ #include #include #include +#include /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include @@ -850,7 +851,7 @@ static void __exit cmos_do_remove(struct device *dev) #ifdef CONFIG_PM -static int cmos_suspend(struct device *dev, pm_message_t mesg) +static int cmos_suspend(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char tmp; @@ -898,7 +899,7 @@ static int cmos_suspend(struct device *dev, pm_message_t mesg) */ static inline int cmos_poweroff(struct device *dev) { - return cmos_suspend(dev, PMSG_HIBERNATE); + return cmos_suspend(dev); } static int cmos_resume(struct device *dev) @@ -945,9 +946,9 @@ static int cmos_resume(struct device *dev) return 0; } +static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume); + #else -#define cmos_suspend NULL -#define cmos_resume NULL static inline int cmos_poweroff(struct device *dev) { @@ -1083,7 +1084,7 @@ static void __exit cmos_pnp_remove(struct pnp_dev *pnp) static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg) { - return cmos_suspend(&pnp->dev, mesg); + return cmos_suspend(&pnp->dev); } static int cmos_pnp_resume(struct pnp_dev *pnp) @@ -1163,8 +1164,9 @@ static struct platform_driver cmos_platform_driver = { .shutdown = cmos_platform_shutdown, .driver = { .name = (char *) driver_name, - .suspend = cmos_suspend, - .resume = cmos_resume, +#ifdef CONFIG_PM + .pm = &cmos_pm_ops, +#endif } }; diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c index 37268e9..afeb546 100644 --- a/drivers/rtc/rtc-ds1511.c +++ b/drivers/rtc/rtc-ds1511.c @@ -485,7 +485,7 @@ ds1511_nvram_write(struct file *filp, struct kobject *kobj, static struct bin_attribute ds1511_nvram_attr = { .attr = { .name = "nvram", - .mode = S_IRUGO | S_IWUGO, + .mode = S_IRUGO | S_IWUSR, }, .size = DS1511_RAM_MAX, .read = ds1511_nvram_read, diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c index 90cf0a6..dd14e20 100644 --- a/drivers/rtc/rtc-rs5c372.c +++ b/drivers/rtc/rtc-rs5c372.c @@ -207,7 +207,7 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm) static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm) { struct rs5c372 *rs5c = i2c_get_clientdata(client); - unsigned char buf[8]; + unsigned char buf[7]; int addr; dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d " diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 70b68d3..2a305ef 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -298,11 +298,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) s3c_rtc_setaie(alrm->enabled); - if (alrm->enabled) - enable_irq_wake(s3c_rtc_alarmno); - else - disable_irq_wake(s3c_rtc_alarmno); - return 0; } @@ -547,6 +542,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) ticnt_en_save &= S3C64XX_RTCCON_TICEN; } s3c_rtc_enable(pdev, 0); + + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(s3c_rtc_alarmno); + return 0; } @@ -560,6 +559,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) tmp = readb(s3c_rtc_base + S3C2410_RTCCON); writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } + + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(s3c_rtc_alarmno); + return 0; } #else diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index ab84da5..06f249b 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -63,7 +63,7 @@ static struct dasd_discipline dasd_eckd_discipline; static struct ccw_device_id dasd_eckd_ids[] = { { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, - { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, + { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 18d9a49..9b93e4a 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c @@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp; - int ct, perm; + unsigned int ct; + int perm; argp = (void __user *)arg; diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 1a970a7..3d9f55f 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -720,6 +720,8 @@ static const struct scsi_dh_devlist alua_dev_list[] = { {"Intel", "Multi-Flex"}, {"NETAPP", "LUN"}, {"AIX", "NVDISK"}, + {"NETAPP", "LUN"}, + {"AIX", "NVDISK"}, {NULL, NULL} }; diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index f672d62..3ccbf76 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -4175,6 +4175,14 @@ static int ioc_general(void __user *arg, char *cmnd) ha = gdth_find_ha(gen.ionode); if (!ha) return -EFAULT; + + if (gen.data_len > INT_MAX) + return -EINVAL; + if (gen.sense_len > INT_MAX) + return -EINVAL; + if (gen.data_len + gen.sense_len > INT_MAX) + return -EINVAL; + if (gen.data_len + gen.sense_len != 0) { if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, FALSE, &paddr))) diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 8c496b5..fb78856 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -347,6 +347,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, static struct ata_port_operations sas_sata_ops = { .phy_reset = sas_ata_phy_reset, .post_internal_cmd = sas_ata_post_internal, + .qc_defer = ata_std_qc_defer, .qc_prep = ata_noop_qc_prep, .qc_issue = sas_ata_qc_issue, .qc_fill_rtf = sas_ata_qc_fill_rtf, diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index a7890c6..87f58f6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -649,6 +649,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost) spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->eh_cmd_q, &eh_work_q); + shost->host_eh_scheduled = 0; spin_unlock_irqrestore(shost->host_lock, flags); SAS_DPRINTK("Enter %s\n", __func__); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 0ec1ed3..09433df 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -2050,9 +2050,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* adjust hba_queue_depth, reply_free_queue_depth, * and queue_size */ - ioc->hba_queue_depth -= queue_diff; - ioc->reply_free_queue_depth -= queue_diff; - queue_size -= queue_diff; + ioc->hba_queue_depth -= (queue_diff / 2); + ioc->reply_free_queue_depth -= (queue_diff / 2); + queue_size = facts->MaxReplyDescriptorPostQueueDepth; } ioc->reply_post_queue_depth = queue_size; @@ -3736,6 +3736,8 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) static void _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) { + mpt2sas_scsih_reset_handler(ioc, reset_phase); + mpt2sas_ctl_reset_handler(ioc, reset_phase); switch (reset_phase) { case MPT2_IOC_PRE_RESET: dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " @@ -3766,8 +3768,6 @@ _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) "MPT2_IOC_DONE_RESET\n", ioc->name, __func__)); break; } - mpt2sas_scsih_reset_handler(ioc, reset_phase); - mpt2sas_ctl_reset_handler(ioc, reset_phase); } /** @@ -3821,6 +3821,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, { int r; unsigned long flags; + u8 pe_complete = ioc->wait_for_port_enable_to_complete; dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name, __func__)); @@ -3845,6 +3846,14 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag, if (r) goto out; _base_reset_handler(ioc, MPT2_IOC_AFTER_RESET); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (pe_complete) { + r = -EFAULT; + goto out; + } r = _base_make_ioc_operational(ioc, sleep_flag); if (!r) _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index c5ff26a..04baaf4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2759,9 +2759,6 @@ _scsih_check_topo_delete_events(struct MPT2SAS_ADAPTER *ioc, u16 handle; for (i = 0 ; i < event_data->NumEntries; i++) { - if (event_data->PHY[i].PhyStatus & - MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) - continue; handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); if (!handle) continue; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 8ef9453..1fcb89e 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1122,6 +1122,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->additional_cdb_len |= 2; int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); + host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index be3d8be..f347ea4 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1119,9 +1119,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, fcport->port_type = FCT_TARGET; if (iop[0] & BIT_8) fcport->flags |= FCF_FCP2_DEVICE; - } - if (iop[0] & BIT_5) + } else if (iop[0] & BIT_5) fcport->port_type = FCT_INITIATOR; + if (logio->io_parameter[7] || logio->io_parameter[8]) fcport->supported_classes |= FC_COS_CLASS2; if (logio->io_parameter[9] || logio->io_parameter[10]) diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index ff562de..e9d9333 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2798,6 +2798,7 @@ sufficient_dsds: goto queuing_error_fcp_cmnd; int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* build FCP_CMND IU */ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index be1a8fc..8dc5fb1 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2102,6 +2102,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_82XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla82xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ad0ed21..348fba0 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1046,13 +1046,13 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, /* If the user actually wanted this page, we can skip the rest */ if (page == 0) - return -EINVAL; + return 0; for (i = 0; i < min((int)buf[3], buf_len - 4); i++) if (buf[i + 4] == page) goto found; - if (i < buf[3] && i > buf_len) + if (i < buf[3] && i >= buf_len - 4) /* ran off the end of the buffer, give us benefit of doubt */ goto found; /* The device claims it doesn't support the requested page */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1646fe7..c2a9e12 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1640,9 +1640,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); - /* New queue, no concurrency on queue_flags */ if (!shost->use_clustering) - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); + q->limits.cluster = 0; /* * set a reasonable default alignment on word boundaries: the @@ -2436,7 +2435,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev) sdev->sdev_state = SDEV_RUNNING; else if (sdev->sdev_state == SDEV_CREATED_BLOCK) sdev->sdev_state = SDEV_CREATED; - else + else if (sdev->sdev_state != SDEV_CANCEL && + sdev->sdev_state != SDEV_OFFLINE) return -EINVAL; spin_lock_irqsave(q->queue_lock, flags); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index c23ab97..86ea5f3 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -990,10 +990,11 @@ static void __scsi_remove_target(struct scsi_target *starget) list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->channel != starget->channel || sdev->id != starget->id || - sdev->sdev_state == SDEV_DEL) + scsi_device_get(sdev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_remove_device(sdev); + scsi_device_put(sdev); spin_lock_irqsave(shost->host_lock, flags); goto restart; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8802e48..2452686 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1111,6 +1111,12 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); u64 bad_lba; int info_valid; + /* + * resid is optional but mostly filled in. When it's unused, + * its value is zero, so we assume the whole buffer transferred + */ + unsigned int transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd); + unsigned int good_bytes; if (!blk_fs_request(scmd->request)) return 0; @@ -1144,7 +1150,8 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) /* This computation should always be done in terms of * the resolution of the device's medium. */ - return (bad_lba - start_lba) * scmd->device->sector_size; + good_bytes = (bad_lba - start_lba) * scmd->device->sector_size; + return min(good_bytes, transferred); } /** @@ -2204,11 +2211,10 @@ static void sd_probe_async(void *data, async_cookie_t cookie) index = sdkp->index; dev = &sdp->sdev_gendev; - if (index < SD_MAX_DISKS) { - gd->major = sd_major((index & 0xf0) >> 4); - gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); - gd->minors = SD_MINORS; - } + gd->major = sd_major((index & 0xf0) >> 4); + gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); + gd->minors = SD_MINORS; + gd->fops = &sd_fops; gd->private_data = &sdkp->driver; gd->queue = sdkp->device->request_queue; @@ -2297,6 +2303,12 @@ static int sd_probe(struct device *dev) if (error) goto out_put; + if (index >= SD_MAX_DISKS) { + error = -ENODEV; + sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name space exhausted.\n"); + goto out_free_index; + } + error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN); if (error) goto out_free_index; diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 7f5a6a8..3b00e90 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -390,9 +390,9 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, len = (desc_ptr[2] << 8) + desc_ptr[3]; /* skip past overall descriptor */ desc_ptr += len + 4; - if (ses_dev->page10) - addl_desc_ptr = ses_dev->page10 + 8; } + if (ses_dev->page10) + addl_desc_ptr = ses_dev->page10 + 8; type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11]; components = 0; for (i = 0; i < types; i++, type_ptr += 4) { diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 891e1dd..5bab62d 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -256,7 +256,8 @@ static const struct serial8250_config uart_config[] = { .fifo_size = 128, .tx_loadsz = 128, .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, - .flags = UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP, + /* UART_CAP_EFR breaks billionon CF bluetooth card. */ + .flags = UART_CAP_FIFO | UART_CAP_SLEEP, }, [PORT_RSA] = { .name = "RSA", diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 746a446..53be4d3 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -994,6 +994,7 @@ static int skip_tx_en_setup(struct serial_private *priv, #define PCI_DEVICE_ID_TITAN_800E 0xA014 #define PCI_DEVICE_ID_TITAN_200EI 0xA016 #define PCI_DEVICE_ID_TITAN_200EISI 0xA017 +#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 @@ -1542,6 +1543,8 @@ enum pci_board_num_t { pbn_b2_4_921600, pbn_b2_8_921600, + pbn_b2_8_1152000, + pbn_b2_bt_1_115200, pbn_b2_bt_2_115200, pbn_b2_bt_4_115200, @@ -1960,6 +1963,13 @@ static struct pciserial_board pci_boards[] __devinitdata = { .uart_offset = 8, }, + [pbn_b2_8_1152000] = { + .flags = FL_BASE2, + .num_ports = 8, + .base_baud = 1152000, + .uart_offset = 8, + }, + [pbn_b2_bt_1_115200] = { .flags = FL_BASE2|FL_BASE_BARS, .num_ports = 1, @@ -2875,6 +2885,9 @@ static struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_921600 }, + { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958, + PCI_ANY_ID , PCI_ANY_ID, 0, 0, + pbn_b2_8_1152000 }, /* * Oxford Semiconductor Inc. Tornado PCI express device range. diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index e57fb3d..5318dd3 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c @@ -121,7 +121,7 @@ static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate) unsigned int sclk = get_sclk(); /* Set TCR1 and TCR2, TFSR is not enabled for uart */ - SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); + SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); SPORT_PUT_TCR2(up, size + 1); pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index eacb588..6d2028e 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -383,12 +383,13 @@ static void imx_start_tx(struct uart_port *port) static irqreturn_t imx_rtsint(int irq, void *dev_id) { struct imx_port *sport = dev_id; - unsigned int val = readl(sport->port.membase + USR1) & USR1_RTSS; + unsigned int val; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); writel(USR1_RTSD, sport->port.membase + USR1); + val = readl(sport->port.membase + USR1) & USR1_RTSS; uart_handle_cts_change(&sport->port, !!val); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c index 544f2e2..6381a02 100644 --- a/drivers/serial/suncore.c +++ b/drivers/serial/suncore.c @@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors); int sunserial_console_match(struct console *con, struct device_node *dp, struct uart_driver *drv, int line, bool ignore_line) { - if (!con || of_console_device != dp) + if (!con) + return 0; + + drv->cons = con; + + if (of_console_device != dp) return 0; if (!ignore_line) { @@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp, return 0; } - con->index = line; - drv->cons = con; - - if (!console_set_on_cmdline) + if (!console_set_on_cmdline) { + con->index = line; add_preferred_console(con->name, line, NULL); - + } return 1; } EXPORT_SYMBOL(sunserial_console_match); diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index ef9c6a0..744d3f6 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c @@ -24,6 +24,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, + { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) }, diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 59ae76b..d9c7e54 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -235,6 +235,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc) return; /* We don't have a ChipCommon */ if (cc->dev->id.revision >= 11) cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); + ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status); ssb_pmu_init(cc); chipco_powercontrol_init(cc); ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST); diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 6dcda86..6e88d2b 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, return -ENODEV; } if (bus->chipco.dev) { /* can be unavailible! */ - bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? - SSB_SPROM_BASE1 : SSB_SPROM_BASE31; + /* + * get SPROM offset: SSB_SPROM_BASE1 except for + * chipcommon rev >= 31 or chip ID is 0x4312 and + * chipcommon status & 3 == 2 + */ + if (bus->chipco.dev->id.revision >= 31) + bus->sprom_offset = SSB_SPROM_BASE31; + else if (bus->chip_id == 0x4312 && + (bus->chipco.status & 0x03) == 2) + bus->sprom_offset = SSB_SPROM_BASE31; + else + bus->sprom_offset = SSB_SPROM_BASE1; } else { bus->sprom_offset = SSB_SPROM_BASE1; } + ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset); buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); if (!buf) diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c index 5b279fb..6244555 100644 --- a/drivers/staging/asus_oled/asus_oled.c +++ b/drivers/staging/asus_oled/asus_oled.c @@ -620,13 +620,13 @@ static ssize_t class_set_picture(struct device *device, #define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file -static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO, get_enabled, set_enabled); -static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture); +static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture); -static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, class_get_enabled, class_set_enabled); -static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture); +static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture); static int asus_oled_probe(struct usb_interface *interface, const struct usb_device_id *id) diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 7a582e8..ce1d251 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -128,6 +128,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if) static void update_mac_addresses(struct batman_if *batman_if) { + if (!batman_if || !batman_if->packet_buff) + return; + addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, @@ -194,8 +197,6 @@ static void hardif_activate_interface(struct bat_priv *bat_priv, if (batman_if->if_status != IF_INACTIVE) return; - dev_hold(batman_if->net_dev); - update_mac_addresses(batman_if); batman_if->if_status = IF_TO_BE_ACTIVATED; @@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct batman_if *batman_if) (batman_if->if_status != IF_TO_BE_ACTIVATED)) return; - dev_put(batman_if->net_dev); - batman_if->if_status = IF_INACTIVE; printk(KERN_INFO "batman-adv:Interface deactivated: %s\n", @@ -321,12 +320,14 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) if (ret != 1) goto out; + dev_hold(net_dev); + batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); if (!batman_if) { printk(KERN_ERR "batman-adv:" "Can't add interface (%s): out of memory\n", net_dev->name); - goto out; + goto release_dev; } batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); @@ -340,6 +341,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) batman_if->if_num = -1; batman_if->net_dev = net_dev; batman_if->if_status = IF_NOT_IN_USE; + batman_if->packet_buff = NULL; INIT_LIST_HEAD(&batman_if->list); check_known_mac_addr(batman_if->net_dev->dev_addr); @@ -350,6 +352,8 @@ free_dev: kfree(batman_if->dev); free_if: kfree(batman_if); +release_dev: + dev_put(net_dev); out: return NULL; } @@ -378,6 +382,7 @@ static void hardif_remove_interface(struct batman_if *batman_if) batman_if->if_status = IF_TO_BE_REMOVED; list_del_rcu(&batman_if->list); sysfs_del_hardif(&batman_if->hardif_obj); + dev_put(batman_if->net_dev); call_rcu(&batman_if->rcu, hardif_free_interface); } @@ -397,15 +402,13 @@ static int hard_if_event(struct notifier_block *this, /* FIXME: each batman_if will be attached to a softif */ struct bat_priv *bat_priv = netdev_priv(soft_device); - if (!batman_if) - batman_if = hardif_add_interface(net_dev); + if (!batman_if && event == NETDEV_REGISTER) + batman_if = hardif_add_interface(net_dev); if (!batman_if) goto out; switch (event) { - case NETDEV_REGISTER: - break; case NETDEV_UP: hardif_activate_interface(bat_priv, batman_if); break; diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c index 568aef8..2177c50 100644 --- a/drivers/staging/batman-adv/originator.c +++ b/drivers/staging/batman-adv/originator.c @@ -401,11 +401,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) { struct orig_node *orig_node; + unsigned long flags; HASHIT(hashit); /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); while (hash_iterate(orig_hash, &hashit)) { orig_node = hashit.bucket->data; @@ -414,11 +415,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) goto err; } - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return 0; err: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return -ENOMEM; } @@ -480,12 +481,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) { struct batman_if *batman_if_tmp; struct orig_node *orig_node; + unsigned long flags; HASHIT(hashit); int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ - spin_lock(&orig_hash_lock); + spin_lock_irqsave(&orig_hash_lock, flags); while (hash_iterate(orig_hash, &hashit)) { orig_node = hashit.bucket->data; @@ -512,10 +514,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) rcu_read_unlock(); batman_if->if_num = -1; - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return 0; err: - spin_unlock(&orig_hash_lock); + spin_unlock_irqrestore(&orig_hash_lock, flags); return -ENOMEM; } diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c index 51c40b7..0ae6c03 100644 --- a/drivers/staging/batman-adv/soft-interface.c +++ b/drivers/staging/batman-adv/soft-interface.c @@ -295,6 +295,10 @@ void interface_rx(struct sk_buff *skb, int hdr_size) skb_pull_rcsum(skb, hdr_size); /* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/ + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) { + kfree_skb(skb); + return; + } skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c index d330b18..db31dfb 100644 --- a/drivers/staging/comedi/drivers/jr3_pci.c +++ b/drivers/staging/comedi/drivers/jr3_pci.c @@ -53,6 +53,7 @@ Devices: [JR3] PCI force sensor board (jr3_pci) #define PCI_VENDOR_ID_JR3 0x1762 #define PCI_DEVICE_ID_JR3_1_CHANNEL 0x3111 +#define PCI_DEVICE_ID_JR3_1_CHANNEL_NEW 0x1111 #define PCI_DEVICE_ID_JR3_2_CHANNEL 0x3112 #define PCI_DEVICE_ID_JR3_3_CHANNEL 0x3113 #define PCI_DEVICE_ID_JR3_4_CHANNEL 0x3114 @@ -72,6 +73,8 @@ static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = { { PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { + PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL, @@ -808,6 +811,10 @@ static int jr3_pci_attach(struct comedi_device *dev, devpriv->n_channels = 1; } break; + case PCI_DEVICE_ID_JR3_1_CHANNEL_NEW:{ + devpriv->n_channels = 1; + } + break; case PCI_DEVICE_ID_JR3_2_CHANNEL:{ devpriv->n_channels = 2; } diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c index 67c8a53..95bdc0d 100644 --- a/drivers/staging/comedi/drivers/ni_labpc.c +++ b/drivers/staging/comedi/drivers/ni_labpc.c @@ -571,7 +571,8 @@ int labpc_common_attach(struct comedi_device *dev, unsigned long iobase, /* grab our IRQ */ if (irq) { isr_flags = 0; - if (thisboard->bustype == pci_bustype) + if (thisboard->bustype == pci_bustype + || thisboard->bustype == pcmcia_bustype) isr_flags |= IRQF_SHARED; if (request_irq(irq, labpc_interrupt, isr_flags, driver_labpc.driver_name, dev)) { diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index eed74f0..f21a0e8 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -204,7 +204,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev) t->value = temp; \ return count; \ } \ - static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); + static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value); show_int(enable); show_int(offline); diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig index 97480f5..7455c80 100644 --- a/drivers/staging/hv/Kconfig +++ b/drivers/staging/hv/Kconfig @@ -17,7 +17,7 @@ config HYPERV_STORAGE config HYPERV_BLOCK tristate "Microsoft Hyper-V virtual block driver" - depends on BLOCK && SCSI && LBDAF + depends on BLOCK && SCSI && (LBDAF || 64BIT) default HYPERV help Select this option to enable the Hyper-V virtual block driver. diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 61bd0be..1886bc0 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -378,6 +378,7 @@ static int blkvsc_probe(struct device *device) blkdev->gd->first_minor = 0; blkdev->gd->fops = &block_ops; blkdev->gd->private_data = blkdev; + blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device); sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum); blkvsc_do_inquiry(blkdev); diff --git a/drivers/staging/hv/channel.c b/drivers/staging/hv/channel.c index f047c5a..8955d6a 100644 --- a/drivers/staging/hv/channel.c +++ b/drivers/staging/hv/channel.c @@ -78,14 +78,14 @@ static void VmbusChannelSetEvent(struct vmbus_channel *Channel) if (Channel->OfferMsg.MonitorAllocated) { /* Each u32 represents 32 channels */ - set_bit(Channel->OfferMsg.ChildRelId & 31, + sync_set_bit(Channel->OfferMsg.ChildRelId & 31, (unsigned long *) gVmbusConnection.SendInterruptPage + (Channel->OfferMsg.ChildRelId >> 5)); monitorPage = gVmbusConnection.MonitorPages; monitorPage++; /* Get the child to parent monitor page */ - set_bit(Channel->MonitorBit, + sync_set_bit(Channel->MonitorBit, (unsigned long *)&monitorPage->TriggerGroup [Channel->MonitorGroup].Pending); @@ -105,7 +105,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) if (Channel->OfferMsg.MonitorAllocated) { /* Each u32 represents 32 channels */ - clear_bit(Channel->OfferMsg.ChildRelId & 31, + sync_clear_bit(Channel->OfferMsg.ChildRelId & 31, (unsigned long *)gVmbusConnection.SendInterruptPage + (Channel->OfferMsg.ChildRelId >> 5)); @@ -113,7 +113,7 @@ static void VmbusChannelClearEvent(struct vmbus_channel *channel) (struct hv_monitor_page *)gVmbusConnection.MonitorPages; monitorPage++; /* Get the child to parent monitor page */ - clear_bit(Channel->MonitorBit, + sync_clear_bit(Channel->MonitorBit, (unsigned long *)&monitorPage->TriggerGroup [Channel->MonitorGroup].Pending); } diff --git a/drivers/staging/hv/connection.c b/drivers/staging/hv/connection.c index e8824da..d80a9f3 100644 --- a/drivers/staging/hv/connection.c +++ b/drivers/staging/hv/connection.c @@ -292,7 +292,9 @@ void VmbusOnEvents(void) for (dword = 0; dword < maxdword; dword++) { if (recvInterruptPage[dword]) { for (bit = 0; bit < 32; bit++) { - if (test_and_clear_bit(bit, (unsigned long *)&recvInterruptPage[dword])) { + if (sync_test_and_clear_bit(bit, + (unsigned long *) + &recvInterruptPage[dword])) { relid = (dword << 5) + bit; DPRINT_DBG(VMBUS, "event detected for relid - %d", relid); @@ -337,7 +339,7 @@ int VmbusSetEvent(u32 childRelId) DPRINT_ENTER(VMBUS); /* Each u32 represents 32 channels */ - set_bit(childRelId & 31, + sync_set_bit(childRelId & 31, (unsigned long *)gVmbusConnection.SendInterruptPage + (childRelId >> 5)); diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c index 55b9932..f1a7402 100644 --- a/drivers/staging/hv/netvsc_drv.c +++ b/drivers/staging/hv/netvsc_drv.c @@ -46,6 +46,7 @@ struct net_device_context { /* point back to our device context */ struct vm_device *device_ctx; unsigned long avail; + struct work_struct work; }; struct netvsc_driver_context { @@ -237,6 +238,7 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, { struct vm_device *device_ctx = to_vm_device(device_obj); struct net_device *net = dev_get_drvdata(&device_ctx->device); + struct net_device_context *ndev_ctx; DPRINT_ENTER(NETVSC_DRV); @@ -249,6 +251,9 @@ static void netvsc_linkstatus_callback(struct hv_device *device_obj, if (status == 1) { netif_carrier_on(net); netif_wake_queue(net); + netif_notify_peers(net); + ndev_ctx = netdev_priv(net); + schedule_work(&ndev_ctx->work); } else { netif_carrier_off(net); netif_stop_queue(net); @@ -348,8 +353,30 @@ static const struct net_device_ops device_ops = { .ndo_stop = netvsc_close, .ndo_start_xmit = netvsc_start_xmit, .ndo_set_multicast_list = netvsc_set_multicast_list, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, }; +/* + * Send GARP packet to network peers after migrations. + * After Quick Migration, the network is not immediately operational in the + * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add + * another netif_notify_peers() into a scheduled work, otherwise GARP packet + * will not be sent after quick migration, and cause network disconnection. + */ +static void netvsc_send_garp(struct work_struct *w) +{ + struct net_device_context *ndev_ctx; + struct net_device *net; + + msleep(20); + ndev_ctx = container_of(w, struct net_device_context, work); + net = dev_get_drvdata(&ndev_ctx->device_ctx->device); + netif_notify_peers(net); +} + + static int netvsc_probe(struct device *device) { struct driver_context *driver_ctx = @@ -381,6 +408,7 @@ static int netvsc_probe(struct device *device) net_device_ctx->device_ctx = device_ctx; net_device_ctx->avail = ring_size; dev_set_drvdata(device, net); + INIT_WORK(&net_device_ctx->work, netvsc_send_garp); /* Notify the netvsc driver of the new device */ ret = net_drv_obj->Base.OnDeviceAdd(device_obj, &device_info); diff --git a/drivers/staging/hv/ring_buffer.c b/drivers/staging/hv/ring_buffer.c index ae2a10e..c5a3870 100644 --- a/drivers/staging/hv/ring_buffer.c +++ b/drivers/staging/hv/ring_buffer.c @@ -192,8 +192,7 @@ Description: static inline u64 GetRingBufferIndices(RING_BUFFER_INFO *RingInfo) { - return ((u64)RingInfo->RingBuffer->WriteIndex << 32) - || RingInfo->RingBuffer->ReadIndex; + return (u64)RingInfo->RingBuffer->WriteIndex << 32; } diff --git a/drivers/staging/hv/storvsc_api.h b/drivers/staging/hv/storvsc_api.h index 0063bde..8505a1c 100644 --- a/drivers/staging/hv/storvsc_api.h +++ b/drivers/staging/hv/storvsc_api.h @@ -28,10 +28,10 @@ #include "vmbus_api.h" /* Defines */ -#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE) +#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) #define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE) -#define STORVSC_MAX_IO_REQUESTS 64 +#define STORVSC_MAX_IO_REQUESTS 128 /* * In Hyper-V, each port/path/target maps to 1 scsi host adapter. In diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index d22e35f..c1ba458 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c @@ -526,7 +526,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ - if (j == 0) + if (bounce_addr == 0) bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); while (srclen) { @@ -587,7 +587,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, destlen = orig_sgl[i].length; /* ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE); */ - if (j == 0) + if (bounce_addr == 0) bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0); while (destlen) { @@ -646,6 +646,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, unsigned int request_size = 0; int i; struct scatterlist *sgl; + unsigned int sg_count = 0; DPRINT_ENTER(STORVSC_DRV); @@ -730,6 +731,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, request->DataBuffer.Length = scsi_bufflen(scmnd); if (scsi_sg_count(scmnd)) { sgl = (struct scatterlist *)scsi_sglist(scmnd); + sg_count = scsi_sg_count(scmnd); /* check if we need to bounce the sgl */ if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { @@ -764,15 +766,16 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd, scsi_sg_count(scmnd)); sgl = cmd_request->bounce_sgl; + sg_count = cmd_request->bounce_sgl_count; } request->DataBuffer.Offset = sgl[0].offset; - for (i = 0; i < scsi_sg_count(scmnd); i++) { + for (i = 0; i < sg_count; i++) { DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d\n", i, sgl[i].length, sgl[i].offset); request->DataBuffer.PfnArray[i] = - page_to_pfn(sg_page((&sgl[i]))); + page_to_pfn(sg_page((&sgl[i]))); } } else if (scsi_sglist(scmnd)) { /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */ diff --git a/drivers/staging/hv/vmbus.c b/drivers/staging/hv/vmbus.c index 007543b..08a0c59 100644 --- a/drivers/staging/hv/vmbus.c +++ b/drivers/staging/hv/vmbus.c @@ -254,7 +254,7 @@ static int VmbusOnISR(struct hv_driver *drv) event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; /* Since we are a child, we only need to check bit 0 */ - if (test_and_clear_bit(0, (unsigned long *) &event->Flags32[0])) { + if (sync_test_and_clear_bit(0, (unsigned long *) &event->Flags32[0])) { DPRINT_DBG(VMBUS, "received event %d", event->Flags32[0]); ret |= 0x2; } diff --git a/drivers/staging/hv/vmbus_private.h b/drivers/staging/hv/vmbus_private.h index 588c667..90d58dd 100644 --- a/drivers/staging/hv/vmbus_private.h +++ b/drivers/staging/hv/vmbus_private.h @@ -32,6 +32,7 @@ #include "channel_interface.h" #include "ring_buffer.h" #include +#include /* diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c index 6de439f..92bdb82 100644 --- a/drivers/staging/iio/accel/adis16220_core.c +++ b/drivers/staging/iio/accel/adis16220_core.c @@ -506,7 +506,7 @@ static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16220_write_reset, 0); #define IIO_DEV_ATTR_CAPTURE(_store) \ - IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0) + IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0) static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture); diff --git a/drivers/staging/line6/Kconfig b/drivers/staging/line6/Kconfig index 7852d4a..bc1ffbe 100644 --- a/drivers/staging/line6/Kconfig +++ b/drivers/staging/line6/Kconfig @@ -2,6 +2,7 @@ config LINE6_USB tristate "Line6 USB support" depends on USB && SND select SND_RAWMIDI + select SND_PCM help This is a driver for the guitar amp, cab, and effects modeller PODxt Pro by Line6 (and similar devices), supporting the diff --git a/drivers/staging/line6/control.c b/drivers/staging/line6/control.c index 0b59852..e414571 100644 --- a/drivers/staging/line6/control.c +++ b/drivers/staging/line6/control.c @@ -268,210 +268,210 @@ VARIAX_PARAM_R(float, mix2); VARIAX_PARAM_R(float, mix1); VARIAX_PARAM_R(int, pickup_wiring); -static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak); -static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position, +static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak); +static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position); -static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain); -static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position); -static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold); -static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan); -static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup, +static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan); +static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup); -static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model, +static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model); -static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive); -static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass); -static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid); -static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid); -static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble); -static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid, +static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive); +static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass); +static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid); +static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid); +static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble); +static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid); -static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol, +static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol); -static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix, +static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix); -static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup, +static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup); -static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency); -static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence, +static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence); -static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass); -static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable); -static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold, +static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold); -static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time, +static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time); -static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable, +static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable); -static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable, +static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable); -static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time, +static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time); -static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable, +static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable); -static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1, +static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1); -static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1, +static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1); -static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value); -static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass); -static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2, +static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2); -static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix); -static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3, +static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3); -static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable, +static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable); -static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type, +static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type); -static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay, +static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay); -static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone, +static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone); -static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay); -static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post, +static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post); -static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency); -static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass); -static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable, +static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable); -static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut); -static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut); -static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum); -static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post, +static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post); -static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post, +static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post); -static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model, +static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model); -static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay, +static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay); -static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable, +static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable); -static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value); -static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2, +static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2); -static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3, +static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3); -static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4, +static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4); -static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5, +static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5); -static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix, +static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix); -static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post, +static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post); -static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model); -static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency); -static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass); -static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision); -static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision); -static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable, +static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable); -static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap); -static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap); +static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign); -static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency); -static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner); -static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection, +static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner); +static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection); -static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model, +static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model); -static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model, +static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model); -static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel, +static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel); -static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency); -static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency); -static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value); -static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2, +static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2); -static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3, +static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3); -static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4, +static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4); -static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5, +static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5); -static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6, +static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6); -static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select); -static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4, +static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4); -static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5, +static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5); -static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post, +static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post); -static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model, +static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model); -static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model); -static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb, +static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb); -static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb, +static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb); -static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model, +static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model); -static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume, +static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume); -static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off, +static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off); -static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select); -static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage, +static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage); -static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain, +static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain); -static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass); -static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain, +static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain); -static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass); -static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain, +static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain); -static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass); -static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass); -static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain, +static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain); -static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass); static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write); static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c index 32b6ca7..9b42e34 100644 --- a/drivers/staging/line6/midi.c +++ b/drivers/staging/line6/midi.c @@ -362,8 +362,8 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev, return count; } -static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit); -static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive); +static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit); +static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive); /* MIDI device destructor */ static int snd_line6_midi_free(struct snd_device *device) diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c index 28f5146..63318d7 100644 --- a/drivers/staging/line6/pod.c +++ b/drivers/staging/line6/pod.c @@ -952,33 +952,33 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1, 1); #undef GET_SYSTEM_PARAM /* POD special files: */ -static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, pod_set_channel); +static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel); static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write); static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write); static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write); -static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump); -static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf); -static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish); +static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump); +static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf); +static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish); static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write); -static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess); -static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level); +static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess); +static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level); static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write); static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write); -static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_amp_setup); -static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, pod_set_retrieve_channel); -static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, pod_set_retrieve_effects_setup); -static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, pod_set_routing); +static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup); +static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel); +static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup); +static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing); static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write); -static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, pod_set_store_amp_setup); -static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, pod_set_store_channel); -static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, pod_set_store_effects_setup); -static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq); -static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute); +static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup); +static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel); +static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup); +static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq); +static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute); static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write); static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write); #if CREATE_RAW_FILE -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw); #endif /* diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c index e6770ea..db42178 100644 --- a/drivers/staging/line6/toneport.c +++ b/drivers/staging/line6/toneport.c @@ -124,9 +124,9 @@ static ssize_t toneport_set_led_green(struct device *dev, return count; } -static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read, +static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_red); -static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read, +static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_green); static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c index 58ddbe6..b2fc09b 100644 --- a/drivers/staging/line6/variax.c +++ b/drivers/staging/line6/variax.c @@ -389,17 +389,17 @@ static ssize_t variax_set_raw2(struct device *dev, #endif /* Variax workbench special files: */ -static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, variax_set_model); -static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, variax_set_volume); -static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone); +static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model); +static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume); +static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone); static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write); static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write); static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write); -static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, variax_set_active); +static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active); #if CREATE_RAW_FILE -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); -static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2); +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw); +static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2); #endif diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 9ca0e9e..6474c3a 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -2179,6 +2179,7 @@ int panel_init(void) if (pprt) { parport_release(pprt); parport_unregister_device(pprt); + pprt = NULL; } parport_unregister_driver(&panel_driver); printk(KERN_ERR "Panel driver version " PANEL_VERSION @@ -2228,6 +2229,7 @@ static void __exit panel_cleanup_module(void) /* TODO: free all input signals */ parport_release(pprt); parport_unregister_device(pprt); + pprt = NULL; } parport_unregister_driver(&panel_driver); } diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c index 42783d7..6771520 100644 --- a/drivers/staging/phison/phison.c +++ b/drivers/staging/phison/phison.c @@ -62,7 +62,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id) }; const struct ata_port_info *ppi[] = { &info, NULL }; - ret = ata_pci_sff_init_one(pdev, ppi, &phison_sht, NULL, 0); + ret = ata_pci_bmdma_init_one(pdev, ppi, &phison_sht, NULL, 0); dev_dbg(&pdev->dev, "phison_init_one(), ret = %x\n", ret); diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index 674769d..5f15647 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c @@ -44,6 +44,7 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x07B8, 0x2870)}, /* AboCom */ {USB_DEVICE(0x07B8, 0x2770)}, /* AboCom */ {USB_DEVICE(0x0DF6, 0x0039)}, /* Sitecom 2770 */ + {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom 2770 */ {USB_DEVICE(0x083A, 0x7512)}, /* Arcadyan 2770 */ {USB_DEVICE(0x0789, 0x0162)}, /* Logitec 2870 */ {USB_DEVICE(0x0789, 0x0163)}, /* Logitec 2870 */ @@ -64,6 +65,8 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ + {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */ + {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ {USB_DEVICE(0x07AA, 0x002F)}, /* Corega */ @@ -94,7 +97,8 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x050d, 0x815c)}, {USB_DEVICE(0x1482, 0x3C09)}, /* Abocom */ {USB_DEVICE(0x14B2, 0x3C09)}, /* Alpha */ - {USB_DEVICE(0x04E8, 0x2018)}, /* samsung */ + {USB_DEVICE(0x04E8, 0x2018)}, /* samsung linkstick2 */ + {USB_DEVICE(0x1690, 0x0740)}, /* Askey */ {USB_DEVICE(0x5A57, 0x0280)}, /* Zinwell */ {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ {USB_DEVICE(0x7392, 0x7718)}, @@ -104,21 +108,34 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ {USB_DEVICE(0x050d, 0x815c)}, /* Belkin F5D8053 */ + {USB_DEVICE(0x100D, 0x9031)}, /* Motorola 2770 */ #endif /* RT2870 // */ #ifdef RT3070 {USB_DEVICE(0x148F, 0x3070)}, /* Ralink 3070 */ {USB_DEVICE(0x148F, 0x3071)}, /* Ralink 3071 */ {USB_DEVICE(0x148F, 0x3072)}, /* Ralink 3072 */ {USB_DEVICE(0x0DB0, 0x3820)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x871C)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x822C)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x871B)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x822B)}, /* Ralink 3070 */ {USB_DEVICE(0x0DF6, 0x003E)}, /* Sitecom 3070 */ {USB_DEVICE(0x0DF6, 0x0042)}, /* Sitecom 3072 */ + {USB_DEVICE(0x0DF6, 0x0048)}, /* Sitecom 3070 */ + {USB_DEVICE(0x0DF6, 0x0047)}, /* Sitecom 3071 */ {USB_DEVICE(0x14B2, 0x3C12)}, /* AL 3070 */ {USB_DEVICE(0x18C5, 0x0012)}, /* Corega 3070 */ {USB_DEVICE(0x083A, 0x7511)}, /* Arcadyan 3070 */ + {USB_DEVICE(0x083A, 0xA701)}, /* SMC 3070 */ + {USB_DEVICE(0x083A, 0xA702)}, /* SMC 3072 */ {USB_DEVICE(0x1740, 0x9703)}, /* EnGenius 3070 */ {USB_DEVICE(0x1740, 0x9705)}, /* EnGenius 3071 */ {USB_DEVICE(0x1740, 0x9706)}, /* EnGenius 3072 */ + {USB_DEVICE(0x1740, 0x9707)}, /* EnGenius 3070 */ + {USB_DEVICE(0x1740, 0x9708)}, /* EnGenius 3071 */ + {USB_DEVICE(0x1740, 0x9709)}, /* EnGenius 3072 */ {USB_DEVICE(0x13D3, 0x3273)}, /* AzureWave 3070 */ + {USB_DEVICE(0x13D3, 0x3305)}, /* AzureWave 3070*/ {USB_DEVICE(0x1044, 0x800D)}, /* Gigabyte GN-WB32L 3070 */ {USB_DEVICE(0x2019, 0xAB25)}, /* Planex Communications, Inc. RT3070 */ {USB_DEVICE(0x07B8, 0x3070)}, /* AboCom 3070 */ @@ -131,18 +148,41 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x07D1, 0x3C0D)}, /* D-Link 3070 */ {USB_DEVICE(0x07D1, 0x3C0E)}, /* D-Link 3070 */ {USB_DEVICE(0x07D1, 0x3C0F)}, /* D-Link 3070 */ + {USB_DEVICE(0x07D1, 0x3C16)}, /* D-Link 3070 */ + {USB_DEVICE(0x07D1, 0x3C17)}, /* D-Link 8070 */ {USB_DEVICE(0x1D4D, 0x000C)}, /* Pegatron Corporation 3070 */ {USB_DEVICE(0x1D4D, 0x000E)}, /* Pegatron Corporation 3070 */ {USB_DEVICE(0x5A57, 0x5257)}, /* Zinwell 3070 */ {USB_DEVICE(0x5A57, 0x0283)}, /* Zinwell 3072 */ {USB_DEVICE(0x04BB, 0x0945)}, /* I-O DATA 3072 */ + {USB_DEVICE(0x04BB, 0x0947)}, /* I-O DATA 3070 */ + {USB_DEVICE(0x04BB, 0x0948)}, /* I-O DATA 3072 */ {USB_DEVICE(0x203D, 0x1480)}, /* Encore 3070 */ + {USB_DEVICE(0x20B8, 0x8888)}, /* PARA INDUSTRIAL 3070 */ + {USB_DEVICE(0x0B05, 0x1784)}, /* Asus 3072 */ + {USB_DEVICE(0x203D, 0x14A9)}, /* Encore 3070*/ + {USB_DEVICE(0x0DB0, 0x899A)}, /* MSI 3070*/ + {USB_DEVICE(0x0DB0, 0x3870)}, /* MSI 3070*/ + {USB_DEVICE(0x0DB0, 0x870A)}, /* MSI 3070*/ + {USB_DEVICE(0x0DB0, 0x6899)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x3822)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x3871)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x871A)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x822A)}, /* MSI 3070 */ + {USB_DEVICE(0x0DB0, 0x3821)}, /* Ralink 3070 */ + {USB_DEVICE(0x0DB0, 0x821A)}, /* Ralink 3070 */ + {USB_DEVICE(0x083A, 0xA703)}, /* IO-MAGIC */ + {USB_DEVICE(0x13D3, 0x3307)}, /* Azurewave */ + {USB_DEVICE(0x13D3, 0x3321)}, /* Azurewave */ + {USB_DEVICE(0x07FA, 0x7712)}, /* Edimax */ + {USB_DEVICE(0x0789, 0x0166)}, /* Edimax */ + {USB_DEVICE(0x148F, 0x2070)}, /* Edimax */ #endif /* RT3070 // */ - {USB_DEVICE(0x0DF6, 0x003F)}, /* Sitecom WL-608 */ {USB_DEVICE(0x1737, 0x0077)}, /* Linksys WUSB54GC-EU v3 */ {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ {USB_DEVICE(0x2019, 0xED14)}, /* Planex Communications, Inc. */ + {USB_DEVICE(0x0411, 0x015D)}, /* Buffalo Airstation WLI-UC-GN */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c index a0ece1f..e7e8745 100644 --- a/drivers/staging/rtl8187se/r8185b_init.c +++ b/drivers/staging/rtl8187se/r8185b_init.c @@ -268,8 +268,12 @@ HwHSSIThreeWire( } udelay(10); } - if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) - panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp); + if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) { + printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:" + " %#X RE|WE bits are not clear!!\n", u1bTmp); + dump_stack(); + return 0; + } // RTL8187S HSSI Read/Write Function u1bTmp = read_nic_byte(dev, RF_SW_CONFIG); @@ -309,13 +313,23 @@ HwHSSIThreeWire( int idx; int ByteCnt = nDataBufBitCnt / 8; //printk("%d\n",nDataBufBitCnt); - if ((nDataBufBitCnt % 8) != 0) - panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n", - nDataBufBitCnt); + if ((nDataBufBitCnt % 8) != 0) { + printk(KERN_ERR "rtl8187se: " + "HwThreeWire(): nDataBufBitCnt(%d)" + " should be multiple of 8!!!\n", + nDataBufBitCnt); + dump_stack(); + nDataBufBitCnt += 8; + nDataBufBitCnt &= ~7; + } - if (nDataBufBitCnt > 64) - panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n", - nDataBufBitCnt); + if (nDataBufBitCnt > 64) { + printk(KERN_ERR "rtl8187se: HwThreeWire():" + " nDataBufBitCnt(%d) should <= 64!!!\n", + nDataBufBitCnt); + dump_stack(); + nDataBufBitCnt = 64; + } for(idx = 0; idx < ByteCnt; idx++) { diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c index eb44b60..ac2bf11 100644 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ b/drivers/staging/samsung-laptop/samsung-laptop.c @@ -356,7 +356,7 @@ static ssize_t set_silent_state(struct device *dev, } return count; } -static DEVICE_ATTR(silent, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO, get_silent_state, set_silent_state); diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index c7e061e..456cd5c 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c @@ -1143,7 +1143,7 @@ static struct device_attribute fb_device_attrs[] = { __ATTR_RO(metrics_bytes_sent), __ATTR_RO(metrics_cpu_kcycles_used), __ATTR_RO(metrics_misc), - __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store), + __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store), __ATTR_RW(use_defio), }; diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c index 5972ae7..d497653 100644 --- a/drivers/staging/usbip/stub_rx.c +++ b/drivers/staging/usbip/stub_rx.c @@ -170,33 +170,23 @@ static int tweak_set_configuration_cmd(struct urb *urb) static int tweak_reset_device_cmd(struct urb *urb) { - struct usb_ctrlrequest *req; - __u16 value; - __u16 index; - int ret; - - req = (struct usb_ctrlrequest *) urb->setup_packet; - value = le16_to_cpu(req->wValue); - index = le16_to_cpu(req->wIndex); - - usbip_uinfo("reset_device (port %d) to %s\n", index, - dev_name(&urb->dev->dev)); + struct stub_priv *priv = (struct stub_priv *) urb->context; + struct stub_device *sdev = priv->sdev; - /* all interfaces should be owned by usbip driver, so just reset it. */ - ret = usb_lock_device_for_reset(urb->dev, NULL); - if (ret < 0) { - dev_err(&urb->dev->dev, "lock for reset\n"); - return ret; - } - - /* try to reset the device */ - ret = usb_reset_device(urb->dev); - if (ret < 0) - dev_err(&urb->dev->dev, "device reset\n"); + usbip_uinfo("reset_device %s\n", dev_name(&urb->dev->dev)); - usb_unlock_device(urb->dev); - - return ret; + /* + * usb_lock_device_for_reset caused a deadlock: it causes the driver + * to unbind. In the shutdown the rx thread is signalled to shut down + * but this thread is pending in the usb_lock_device_for_reset. + * + * Instead queue the reset. + * + * Unfortunatly an existing usbip connection will be dropped due to + * driver unbinding. + */ + usb_queue_reset_device(sdev->interface); + return 0; } /* diff --git a/drivers/staging/usbip/stub_tx.c b/drivers/staging/usbip/stub_tx.c index d7136e2..b7a493c 100644 --- a/drivers/staging/usbip/stub_tx.c +++ b/drivers/staging/usbip/stub_tx.c @@ -169,7 +169,6 @@ static int stub_send_ret_submit(struct stub_device *sdev) struct stub_priv *priv, *tmp; struct msghdr msg; - struct kvec iov[3]; size_t txsize; size_t total_size = 0; @@ -179,28 +178,73 @@ static int stub_send_ret_submit(struct stub_device *sdev) struct urb *urb = priv->urb; struct usbip_header pdu_header; void *iso_buffer = NULL; + struct kvec *iov = NULL; + int iovnum = 0; txsize = 0; memset(&pdu_header, 0, sizeof(pdu_header)); memset(&msg, 0, sizeof(msg)); - memset(&iov, 0, sizeof(iov)); - usbip_dbg_stub_tx("setup txdata urb %p\n", urb); + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) + iovnum = 2 + urb->number_of_packets; + else + iovnum = 2; + + iov = kzalloc(iovnum * sizeof(struct kvec), GFP_KERNEL); + if (!iov) { + usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); + return -1; + } + + iovnum = 0; /* 1. setup usbip_header */ setup_ret_submit_pdu(&pdu_header, urb); + usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", + pdu_header.base.seqnum, urb); + /*usbip_dump_header(pdu_header);*/ usbip_header_correct_endian(&pdu_header, 1); - iov[0].iov_base = &pdu_header; - iov[0].iov_len = sizeof(pdu_header); + iov[iovnum].iov_base = &pdu_header; + iov[iovnum].iov_len = sizeof(pdu_header); + iovnum++; txsize += sizeof(pdu_header); /* 2. setup transfer buffer */ - if (usb_pipein(urb->pipe) && urb->actual_length > 0) { - iov[1].iov_base = urb->transfer_buffer; - iov[1].iov_len = urb->actual_length; + if (usb_pipein(urb->pipe) && + usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && + urb->actual_length > 0) { + iov[iovnum].iov_base = urb->transfer_buffer; + iov[iovnum].iov_len = urb->actual_length; + iovnum++; txsize += urb->actual_length; + } else if (usb_pipein(urb->pipe) && + usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + /* + * For isochronous packets: actual length is the sum of + * the actual length of the individual, packets, but as + * the packet offsets are not changed there will be + * padding between the packets. To optimally use the + * bandwidth the padding is not transmitted. + */ + + int i; + for (i = 0; i < urb->number_of_packets; i++) { + iov[iovnum].iov_base = urb->transfer_buffer + urb->iso_frame_desc[i].offset; + iov[iovnum].iov_len = urb->iso_frame_desc[i].actual_length; + iovnum++; + txsize += urb->iso_frame_desc[i].actual_length; + } + + if (txsize != sizeof(pdu_header) + urb->actual_length) { + dev_err(&sdev->interface->dev, + "actual length of urb (%d) does not match iso packet sizes (%d)\n", + urb->actual_length, txsize-sizeof(pdu_header)); + kfree(iov); + usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); + return -1; + } } /* 3. setup iso_packet_descriptor */ @@ -211,32 +255,34 @@ static int stub_send_ret_submit(struct stub_device *sdev) if (!iso_buffer) { usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); + kfree(iov); return -1; } - iov[2].iov_base = iso_buffer; - iov[2].iov_len = len; + iov[iovnum].iov_base = iso_buffer; + iov[iovnum].iov_len = len; txsize += len; + iovnum++; } - ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, - 3, txsize); + ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, + iov, iovnum, txsize); if (ret != txsize) { dev_err(&sdev->interface->dev, "sendmsg failed!, retval %d for %zd\n", ret, txsize); + kfree(iov); kfree(iso_buffer); usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); return -1; } + kfree(iov); kfree(iso_buffer); - usbip_dbg_stub_tx("send txdata\n"); total_size += txsize; } - spin_lock_irqsave(&sdev->priv_lock, flags); list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c index 6a499f0..c141eef 100644 --- a/drivers/staging/usbip/usbip_common.c +++ b/drivers/staging/usbip/usbip_common.c @@ -334,10 +334,11 @@ void usbip_dump_header(struct usbip_header *pdu) usbip_udbg("CMD_UNLINK: seq %u\n", pdu->u.cmd_unlink.seqnum); break; case USBIP_RET_SUBMIT: - usbip_udbg("RET_SUBMIT: st %d al %u sf %d ec %d\n", + usbip_udbg("RET_SUBMIT: st %d al %u sf %d #p %d ec %d\n", pdu->u.ret_submit.status, pdu->u.ret_submit.actual_length, pdu->u.ret_submit.start_frame, + pdu->u.ret_submit.number_of_packets, pdu->u.ret_submit.error_count); case USBIP_RET_UNLINK: usbip_udbg("RET_UNLINK: status %d\n", pdu->u.ret_unlink.status); @@ -625,6 +626,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, rpdu->status = urb->status; rpdu->actual_length = urb->actual_length; rpdu->start_frame = urb->start_frame; + rpdu->number_of_packets = urb->number_of_packets; rpdu->error_count = urb->error_count; } else { /* vhci_rx.c */ @@ -632,6 +634,7 @@ static void usbip_pack_ret_submit(struct usbip_header *pdu, struct urb *urb, urb->status = rpdu->status; urb->actual_length = rpdu->actual_length; urb->start_frame = rpdu->start_frame; + urb->number_of_packets = rpdu->number_of_packets; urb->error_count = rpdu->error_count; } } @@ -700,11 +703,13 @@ static void correct_endian_ret_submit(struct usbip_header_ret_submit *pdu, cpu_to_be32s(&pdu->status); cpu_to_be32s(&pdu->actual_length); cpu_to_be32s(&pdu->start_frame); + cpu_to_be32s(&pdu->number_of_packets); cpu_to_be32s(&pdu->error_count); } else { be32_to_cpus(&pdu->status); be32_to_cpus(&pdu->actual_length); be32_to_cpus(&pdu->start_frame); + cpu_to_be32s(&pdu->number_of_packets); be32_to_cpus(&pdu->error_count); } } @@ -830,6 +835,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) int size = np * sizeof(*iso); int i; int ret; + int total_length = 0; if (!usb_pipeisoc(urb->pipe)) return 0; @@ -859,19 +865,75 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb) return -EPIPE; } + for (i = 0; i < np; i++) { iso = buff + (i * sizeof(*iso)); usbip_iso_pakcet_correct_endian(iso, 0); usbip_pack_iso(iso, &urb->iso_frame_desc[i], 0); + total_length += urb->iso_frame_desc[i].actual_length; } kfree(buff); + if (total_length != urb->actual_length) { + dev_err(&urb->dev->dev, + "total length of iso packets (%d) not equal to actual length of buffer (%d)\n", + total_length, urb->actual_length); + + if (ud->side == USBIP_STUB) + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); + else + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + + return -EPIPE; + } + return ret; } EXPORT_SYMBOL_GPL(usbip_recv_iso); +/* + * This functions restores the padding which was removed for optimizing + * the bandwidth during transfer over tcp/ip + * + * buffer and iso packets need to be stored and be in propeper endian in urb + * before calling this function + */ +int usbip_pad_iso(struct usbip_device *ud, struct urb *urb) +{ + int np = urb->number_of_packets; + int i; + int ret; + int actualoffset = urb->actual_length; + + if (!usb_pipeisoc(urb->pipe)) + return 0; + + /* if no packets or length of data is 0, then nothing to unpack */ + if (np == 0 || urb->actual_length == 0) + return 0; + + /* + * if actual_length is transfer_buffer_length then no padding is + * present. + */ + if (urb->actual_length == urb->transfer_buffer_length) + return 0; + + /* + * loop over all packets from last to first (to prevent overwritting + * memory when padding) and move them into the proper place + */ + for (i = np-1; i > 0; i--) { + actualoffset -= urb->iso_frame_desc[i].actual_length; + memmove(urb->transfer_buffer + urb->iso_frame_desc[i].offset, + urb->transfer_buffer + actualoffset, + urb->iso_frame_desc[i].actual_length); + } + return ret; +} +EXPORT_SYMBOL_GPL(usbip_pad_iso); /* some members of urb must be substituted before. */ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h index e1bbd12..af6903c 100644 --- a/drivers/staging/usbip/usbip_common.h +++ b/drivers/staging/usbip/usbip_common.h @@ -393,6 +393,8 @@ void usbip_header_correct_endian(struct usbip_header *pdu, int send); int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb); /* some members of urb must be substituted before. */ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb); +/* some members of urb must be substituted before. */ +int usbip_pad_iso(struct usbip_device *ud, struct urb *urb); void *usbip_alloc_iso_desc_pdu(struct urb *urb, ssize_t *bufflen); diff --git a/drivers/staging/usbip/usbip_event.c b/drivers/staging/usbip/usbip_event.c index a2566f1..af3832b 100644 --- a/drivers/staging/usbip/usbip_event.c +++ b/drivers/staging/usbip/usbip_event.c @@ -38,21 +38,13 @@ static int event_handler(struct usbip_device *ud) ud->eh_ops.shutdown(ud); ud->event &= ~USBIP_EH_SHUTDOWN; - - break; } - /* Stop the error handler. */ - if (ud->event & USBIP_EH_BYE) - return -1; - /* Reset the device. */ if (ud->event & USBIP_EH_RESET) { ud->eh_ops.reset(ud); ud->event &= ~USBIP_EH_RESET; - - break; } /* Mark the device as unusable. */ @@ -60,13 +52,11 @@ static int event_handler(struct usbip_device *ud) ud->eh_ops.unusable(ud); ud->event &= ~USBIP_EH_UNUSABLE; - - break; } - /* NOTREACHED */ - printk(KERN_ERR "%s: unknown event\n", __func__); - return -1; + /* Stop the error handler. */ + if (ud->event & USBIP_EH_BYE) + return -1; } return 0; diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h index 41a1fe5..afc3b1a 100644 --- a/drivers/staging/usbip/vhci.h +++ b/drivers/staging/usbip/vhci.h @@ -100,9 +100,6 @@ struct vhci_hcd { * But, the index of this array begins from 0. */ struct vhci_device vdev[VHCI_NPORTS]; - - /* vhci_device which has not been assiged its address yet */ - int pending_port; }; @@ -119,6 +116,9 @@ void rh_port_disconnect(int rhport); void vhci_rx_loop(struct usbip_task *ut); void vhci_tx_loop(struct usbip_task *ut); +struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, + __u32 seqnum); + #define hardware (&the_controller->pdev.dev) static inline struct vhci_device *port_to_vdev(__u32 port) diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c index be5d8db..e003beb 100644 --- a/drivers/staging/usbip/vhci_hcd.c +++ b/drivers/staging/usbip/vhci_hcd.c @@ -138,8 +138,6 @@ void rh_port_connect(int rhport, enum usb_device_speed speed) * the_controller->vdev[rhport].ud.status = VDEV_CONNECT; * spin_unlock(&the_controller->vdev[rhport].ud.lock); */ - the_controller->pending_port = rhport; - spin_unlock_irqrestore(&the_controller->lock, flags); usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); @@ -164,6 +162,8 @@ void rh_port_disconnect(int rhport) * spin_unlock(&vdev->ud.lock); */ spin_unlock_irqrestore(&the_controller->lock, flags); + + usb_hcd_poll_rh_status(vhci_to_hcd(the_controller)); } @@ -557,6 +557,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, struct device *dev = &urb->dev->dev; int ret = 0; unsigned long flags; + struct vhci_device *vdev; usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n", hcd, urb, mem_flags); @@ -572,6 +573,18 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return urb->status; } + vdev = port_to_vdev(urb->dev->portnum-1); + + /* refuse enqueue for dead connection */ + spin_lock(&vdev->ud.lock); + if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) { + usbip_uerr("enqueue for inactive port %d\n", vdev->rhport); + spin_unlock(&vdev->ud.lock); + spin_unlock_irqrestore(&the_controller->lock, flags); + return -ENODEV; + } + spin_unlock(&vdev->ud.lock); + ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto no_need_unlink; @@ -590,8 +603,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, __u8 type = usb_pipetype(urb->pipe); struct usb_ctrlrequest *ctrlreq = (struct usb_ctrlrequest *) urb->setup_packet; - struct vhci_device *vdev = - port_to_vdev(the_controller->pending_port); if (type != PIPE_CONTROL || !ctrlreq) { dev_err(dev, "invalid request to devnum 0\n"); @@ -605,7 +616,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, dev_info(dev, "SetAddress Request (%d) to port %d\n", ctrlreq->wValue, vdev->rhport); - vdev->udev = urb->dev; + if (vdev->udev) + usb_put_dev(vdev->udev); + vdev->udev = usb_get_dev(urb->dev); spin_lock(&vdev->ud.lock); vdev->ud.status = VDEV_ST_USED; @@ -625,8 +638,9 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, "Get_Descriptor to device 0 " "(get max pipe size)\n"); - /* FIXME: reference count? (usb_get_dev()) */ - vdev->udev = urb->dev; + if (vdev->udev) + usb_put_dev(vdev->udev); + vdev->udev = usb_get_dev(urb->dev); goto out; default: @@ -797,27 +811,12 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) spin_unlock_irqrestore(&vdev->priv_lock, flags2); } - - if (!vdev->ud.tcp_socket) { - /* tcp connection is closed */ - usbip_uinfo("vhci_hcd: vhci_urb_dequeue() gives back urb %p\n", - urb); - - usb_hcd_unlink_urb_from_ep(hcd, urb); - - spin_unlock_irqrestore(&the_controller->lock, flags); - usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, - urb->status); - spin_lock_irqsave(&the_controller->lock, flags); - } - spin_unlock_irqrestore(&the_controller->lock, flags); usbip_dbg_vhci_hc("leave\n"); return 0; } - static void vhci_device_unlink_cleanup(struct vhci_device *vdev) { struct vhci_unlink *unlink, *tmp; @@ -825,11 +824,34 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev) spin_lock(&vdev->priv_lock); list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) { + usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum); list_del(&unlink->list); kfree(unlink); } list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) { + struct urb *urb; + + /* give back URB of unanswered unlink request */ + usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum); + + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); + if (!urb) { + usbip_uinfo("the urb (seqnum %lu) was already given back\n", + unlink->unlink_seqnum); + list_del(&unlink->list); + kfree(unlink); + continue; + } + + urb->status = -ENODEV; + + spin_lock(&the_controller->lock); + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb); + spin_unlock(&the_controller->lock); + + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status); + list_del(&unlink->list); kfree(unlink); } @@ -899,6 +921,10 @@ static void vhci_device_reset(struct usbip_device *ud) vdev->speed = 0; vdev->devid = 0; + if (vdev->udev) + usb_put_dev(vdev->udev); + vdev->udev = NULL; + ud->tcp_socket = NULL; ud->status = VDEV_ST_NULL; diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c index 8147d72..a1ac1b8 100644 --- a/drivers/staging/usbip/vhci_rx.c +++ b/drivers/staging/usbip/vhci_rx.c @@ -23,16 +23,14 @@ #include "vhci.h" -/* get URB from transmitted urb queue */ -static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, +/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */ +struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum) { struct vhci_priv *priv, *tmp; struct urb *urb = NULL; int status; - spin_lock(&vdev->priv_lock); - list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) { if (priv->seqnum == seqnum) { urb = priv->urb; @@ -63,8 +61,6 @@ static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, } } - spin_unlock(&vdev->priv_lock); - return urb; } @@ -74,9 +70,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, struct usbip_device *ud = &vdev->ud; struct urb *urb; + spin_lock(&vdev->priv_lock); urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum); + spin_unlock(&vdev->priv_lock); if (!urb) { usbip_uerr("cannot find a urb of seqnum %u\n", @@ -101,6 +99,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev, if (usbip_recv_iso(ud, urb) < 0) return; + /* restore the padding in iso packets */ + if (usbip_pad_iso(ud, urb) < 0) + return; if (usbip_dbg_flag_vhci_rx) usbip_dump_urb(urb); @@ -161,7 +162,12 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev, return; } + spin_lock(&vdev->priv_lock); + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum); + + spin_unlock(&vdev->priv_lock); + if (!urb) { /* * I get the result of a unlink request. But, it seems that I diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c index 22c2fab..f79808e 100644 --- a/drivers/staging/vt6655/wpactl.c +++ b/drivers/staging/vt6655/wpactl.c @@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice, DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); - if (param->u.wpa_associate.wpa_ie && - copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) - return -EINVAL; + if (param->u.wpa_associate.wpa_ie_len) { + if (!param->u.wpa_associate.wpa_ie) + return -EINVAL; + if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE)) + return -EINVAL; + if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) + return -EFAULT; + } if (param->u.wpa_associate.mode == 1) pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index c89990f..b24025d 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1128,6 +1128,7 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, { struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); + struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; int ret; /* instance init */ @@ -1172,15 +1173,34 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, goto fail; } - usb_fill_int_urb(instance->rcv_urb, + if (!cmd_ep) { + dbg("cxacru_bind: no command endpoint"); + ret = -ENODEV; + goto fail; + } + + if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_INT) { + usb_fill_int_urb(instance->rcv_urb, usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done, 1); - usb_fill_int_urb(instance->snd_urb, + usb_fill_int_urb(instance->snd_urb, usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done, 4); + } else { + usb_fill_bulk_urb(instance->rcv_urb, + usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD), + instance->rcv_buf, PAGE_SIZE, + cxacru_blocking_completion, &instance->rcv_done); + + usb_fill_bulk_urb(instance->snd_urb, + usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), + instance->snd_buf, PAGE_SIZE, + cxacru_blocking_completion, &instance->snd_done); + } mutex_init(&instance->cm_serialize); diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index e213d3f..51dc44d 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -2291,7 +2291,7 @@ out: return ret; } -static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); +static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot); static ssize_t read_human_status(struct device *dev, struct device_attribute *attr, char *buf) @@ -2354,8 +2354,7 @@ out: return ret; } -static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, - read_human_status, NULL); +static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL); static ssize_t read_delin(struct device *dev, struct device_attribute *attr, char *buf) @@ -2387,7 +2386,7 @@ out: return ret; } -static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); +static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL); #define UEA_ATTR(name, reset) \ \ diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 162c95a..2c70e1e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -297,6 +297,8 @@ static void acm_ctrl_irq(struct urb *urb) if (!ACM_READY(acm)) goto exit; + usb_mark_last_busy(acm->dev); + data = (unsigned char *)(dr + 1); switch (dr->bNotificationType) { case USB_CDC_NOTIFY_NETWORK_CONNECTION: @@ -336,7 +338,6 @@ static void acm_ctrl_irq(struct urb *urb) break; } exit: - usb_mark_last_busy(acm->dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&urb->dev->dev, "%s - usb_submit_urb failed with " @@ -533,6 +534,8 @@ static void acm_softint(struct work_struct *work) if (!ACM_READY(acm)) return; tty = tty_port_tty_get(&acm->port); + if (!tty) + return; tty_wakeup(tty); tty_kref_put(tty); } @@ -652,8 +655,10 @@ static void acm_port_down(struct acm *acm, int drain) usb_kill_urb(acm->ctrlurb); for (i = 0; i < ACM_NW; i++) usb_kill_urb(acm->wb[i].urb); + tasklet_disable(&acm->urb_task); for (i = 0; i < nr; i++) usb_kill_urb(acm->ru[i].urb); + tasklet_enable(&acm->urb_task); acm->control->needs_remote_wakeup = 0; usb_autopm_put_interface(acm->control); } @@ -971,7 +976,8 @@ static int acm_probe(struct usb_interface *intf, } if (!buflen) { - if (intf->cur_altsetting->endpoint->extralen && + if (intf->cur_altsetting->endpoint && + intf->cur_altsetting->endpoint->extralen && intf->cur_altsetting->endpoint->extra) { dev_dbg(&intf->dev, "Seeking extra descriptors on endpoint\n"); @@ -1487,6 +1493,11 @@ static int acm_reset_resume(struct usb_interface *intf) USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ USB_CDC_ACM_PROTO_VENDOR) +#define SAMSUNG_PCSUITE_ACM_INFO(x) \ + USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \ + USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \ + USB_CDC_ACM_PROTO_VENDOR) + /* * USB driver structure. */ @@ -1597,6 +1608,18 @@ static const struct usb_device_id acm_ids[] = { { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */ { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */ { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */ + { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */ + { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */ + { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */ + { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */ + { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */ + { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */ + { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */ + { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */ + { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ + { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ + { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */ + { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ @@ -1605,6 +1628,10 @@ static const struct usb_device_id acm_ids[] = { .driver_info = NOT_A_MODEM, }, + /* control interfaces without any protocol set */ + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, + USB_CDC_PROTO_NONE) }, + /* control interfaces with various AT-command sets */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER) }, diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 3449742..dcb4e5e 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -222,7 +222,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, break; case USB_ENDPOINT_XFER_INT: type = "Int."; - if (speed == USB_SPEED_HIGH) + if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER) interval = 1 << (desc->bInterval - 1); else interval = desc->bInterval; @@ -230,7 +230,8 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end, default: /* "can't happen" */ return start; } - interval *= (speed == USB_SPEED_HIGH) ? 125 : 1000; + interval *= (speed == USB_SPEED_HIGH || + speed == USB_SPEED_SUPER) ? 125 : 1000; if (interval % 1000) unit = 'u'; else { @@ -540,8 +541,9 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes, if (level == 0) { int max; - /* high speed reserves 80%, full/low reserves 90% */ - if (usbdev->speed == USB_SPEED_HIGH) + /* super/high speed reserves 80%, full/low reserves 90% */ + if (usbdev->speed == USB_SPEED_HIGH || + usbdev->speed == USB_SPEED_SUPER) max = 800; else max = FRAME_TIME_MAX_USECS_ALLOC; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index c2f62a3..da05024 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -965,10 +965,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg) static int proc_connectinfo(struct dev_state *ps, void __user *arg) { - struct usbdevfs_connectinfo ci; + struct usbdevfs_connectinfo ci = { + .devnum = ps->dev->devnum, + .slow = ps->dev->speed == USB_SPEED_LOW + }; - ci.devnum = ps->dev->devnum; - ci.slow = ps->dev->speed == USB_SPEED_LOW; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index f06f5db..1e6ccef 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -159,9 +159,9 @@ void usb_major_cleanup(void) int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver) { - int retval = -EINVAL; + int retval; int minor_base = class_driver->minor_base; - int minor = 0; + int minor; char name[20]; char *temp; @@ -173,12 +173,17 @@ int usb_register_dev(struct usb_interface *intf, */ minor_base = 0; #endif - intf->minor = -1; - - dbg ("looking for a minor, starting at %d", minor_base); if (class_driver->fops == NULL) - goto exit; + return -EINVAL; + if (intf->minor >= 0) + return -EADDRINUSE; + + retval = init_usb_class(); + if (retval) + return retval; + + dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base); down_write(&minor_rwsem); for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { @@ -186,20 +191,12 @@ int usb_register_dev(struct usb_interface *intf, continue; usb_minors[minor] = class_driver->fops; - - retval = 0; + intf->minor = minor; break; } up_write(&minor_rwsem); - - if (retval) - goto exit; - - retval = init_usb_class(); - if (retval) - goto exit; - - intf->minor = minor; + if (intf->minor < 0) + return -EXFULL; /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); @@ -213,11 +210,11 @@ int usb_register_dev(struct usb_interface *intf, "%s", temp); if (IS_ERR(intf->usb_dev)) { down_write(&minor_rwsem); - usb_minors[intf->minor] = NULL; + usb_minors[minor] = NULL; + intf->minor = -1; up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } -exit: return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 12742f1..7d0c7a6 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1874,7 +1874,7 @@ void usb_free_streams(struct usb_interface *interface, /* Streams only apply to bulk endpoints. */ for (i = 0; i < num_eps; i++) - if (!usb_endpoint_xfer_bulk(&eps[i]->desc)) + if (!eps[i] || !usb_endpoint_xfer_bulk(&eps[i]->desc)) return; hcd->driver->free_streams(hcd, dev, eps, num_eps, mem_flags); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 70cccc7..d4041c6 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -676,6 +677,8 @@ static void hub_init_func3(struct work_struct *ws); static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) { struct usb_device *hdev = hub->hdev; + struct usb_hcd *hcd; + int ret; int port1; int status; bool need_debounce_delay = false; @@ -714,6 +717,25 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) usb_autopm_get_interface_no_resume( to_usb_interface(hub->intfdev)); return; /* Continues at init2: below */ + } else if (type == HUB_RESET_RESUME) { + /* The internal host controller state for the hub device + * may be gone after a host power loss on system resume. + * Update the device's info so the HW knows it's a hub. + */ + hcd = bus_to_hcd(hdev->bus); + if (hcd->driver->update_hub_device) { + ret = hcd->driver->update_hub_device(hcd, hdev, + &hub->tt, GFP_NOIO); + if (ret < 0) { + dev_err(hub->intfdev, "Host not " + "accepting hub info " + "update.\n"); + dev_err(hub->intfdev, "LS/FS devices " + "and hubs may not work " + "under this hub\n."); + } + } + hub_power_on(hub, true); } else { hub_power_on(hub, true); } @@ -1801,7 +1823,6 @@ int usb_new_device(struct usb_device *udev) pm_runtime_set_active(&udev->dev); pm_runtime_enable(&udev->dev); - usb_detect_quirks(udev); err = usb_enumerate_device(udev); /* Read descriptors */ if (err < 0) goto fail; @@ -2721,6 +2742,11 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, udev->ttport = hdev->ttport; } else if (udev->speed != USB_SPEED_HIGH && hdev->speed == USB_SPEED_HIGH) { + if (!hub->tt.hub) { + dev_err(&udev->dev, "parent hub has no TT\n"); + retval = -EINVAL; + goto fail; + } udev->tt = &hub->tt; udev->ttport = port1; } @@ -2859,13 +2885,16 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, else i = udev->descriptor.bMaxPacketSize0; if (le16_to_cpu(udev->ep0.desc.wMaxPacketSize) != i) { - if (udev->speed != USB_SPEED_FULL || + if (udev->speed == USB_SPEED_LOW || !(i == 8 || i == 16 || i == 32 || i == 64)) { - dev_err(&udev->dev, "ep0 maxpacket = %d\n", i); + dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i); retval = -EMSGSIZE; goto fail; } - dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); + if (udev->speed == USB_SPEED_FULL) + dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); + else + dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i); udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i); usb_ep0_reinit(udev); } @@ -3111,6 +3140,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, if (status < 0) goto loop; + usb_detect_quirks(udev); + if (udev->quirks & USB_QUIRK_DELAY_INIT) + msleep(1000); + /* consecutive bus-powered hubs aren't reliable; they can * violate the voltage drop budget. if the new child has * a "powered" LED, users should notice we didn't enable it diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index fd4c36e..d6e3e41 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1140,13 +1140,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) { int i; - dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, - skip_ep0 ? "non-ep0" : "all"); - for (i = skip_ep0; i < 16; ++i) { - usb_disable_endpoint(dev, i, true); - usb_disable_endpoint(dev, i + USB_DIR_IN, true); - } - /* getting rid of interfaces will disconnect * any drivers bound to them (a key side effect) */ @@ -1176,6 +1169,13 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) if (dev->state == USB_STATE_CONFIGURED) usb_set_device_state(dev, USB_STATE_ADDRESS); } + + dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__, + skip_ep0 ? "non-ep0" : "all"); + for (i = skip_ep0; i < 16; ++i) { + usb_disable_endpoint(dev, i, true); + usb_disable_endpoint(dev, i + USB_DIR_IN, true); + } } /** @@ -1724,6 +1724,15 @@ free_interfaces: if (ret) goto free_interfaces; + /* if it's already configured, clear out old state first. + * getting rid of old interfaces means unbinding their drivers. + */ + if (dev->state != USB_STATE_ADDRESS) + usb_disable_device(dev, 1); /* Skip ep0 */ + + /* Get rid of pending async Set-Config requests for this device */ + cancel_async_set_config(dev); + /* Make sure we have bandwidth (and available HCD resources) for this * configuration. Remove endpoints from the schedule if we're dropping * this configuration to set configuration 0. After this point, the @@ -1733,20 +1742,11 @@ free_interfaces: mutex_lock(&hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { - usb_autosuspend_device(dev); mutex_unlock(&hcd->bandwidth_mutex); + usb_autosuspend_device(dev); goto free_interfaces; } - /* if it's already configured, clear out old state first. - * getting rid of old interfaces means unbinding their drivers. - */ - if (dev->state != USB_STATE_ADDRESS) - usb_disable_device(dev, 1); /* Skip ep0 */ - - /* Get rid of pending async Set-Config requests for this device */ - cancel_async_set_config(dev); - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), USB_REQ_SET_CONFIGURATION, 0, configuration, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); @@ -1761,8 +1761,8 @@ free_interfaces: if (!cp) { usb_set_device_state(dev, USB_STATE_ADDRESS); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); - usb_autosuspend_device(dev); mutex_unlock(&hcd->bandwidth_mutex); + usb_autosuspend_device(dev); goto free_interfaces; } mutex_unlock(&hcd->bandwidth_mutex); @@ -1802,6 +1802,7 @@ free_interfaces: intf->dev.groups = usb_interface_groups; intf->dev.dma_mask = dev->dev.dma_mask; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); + intf->minor = -1; device_initialize(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index db99c08..1848420 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -38,6 +38,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Creative SB Audigy 2 NX */ { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech Harmony 700-series */ + { USB_DEVICE(0x046d, 0xc122), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Philips PSC805 audio device */ { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -45,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ + { USB_DEVICE(0x04e8, 0x6601), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Roland SC-8820 */ { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, @@ -65,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Keytouch QWERTY Panel keyboard */ + { USB_DEVICE(0x0926, 0x3333), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 7c05555..419e6b3 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -137,6 +137,16 @@ void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor) } EXPORT_SYMBOL_GPL(usb_anchor_urb); +/* Callers must hold anchor->lock */ +static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor) +{ + urb->anchor = NULL; + list_del(&urb->anchor_list); + usb_put_urb(urb); + if (list_empty(&anchor->urb_list)) + wake_up(&anchor->wait); +} + /** * usb_unanchor_urb - unanchors an URB * @urb: pointer to the urb to anchor @@ -156,17 +166,14 @@ void usb_unanchor_urb(struct urb *urb) return; spin_lock_irqsave(&anchor->lock, flags); - if (unlikely(anchor != urb->anchor)) { - /* we've lost the race to another thread */ - spin_unlock_irqrestore(&anchor->lock, flags); - return; - } - urb->anchor = NULL; - list_del(&urb->anchor_list); + /* + * At this point, we could be competing with another thread which + * has the same intention. To protect the urb from being unanchored + * twice, only the winner of the race gets the job. + */ + if (likely(anchor == urb->anchor)) + __usb_unanchor_urb(urb, anchor); spin_unlock_irqrestore(&anchor->lock, flags); - usb_put_urb(urb); - if (list_empty(&anchor->urb_list)) - wake_up(&anchor->wait); } EXPORT_SYMBOL_GPL(usb_unanchor_urb); @@ -749,20 +756,11 @@ EXPORT_SYMBOL_GPL(usb_unpoison_anchored_urbs); void usb_unlink_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; - unsigned long flags; - spin_lock_irqsave(&anchor->lock, flags); - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - usb_get_urb(victim); - spin_unlock_irqrestore(&anchor->lock, flags); - /* this will unanchor the URB */ + while ((victim = usb_get_from_anchor(anchor)) != NULL) { usb_unlink_urb(victim); usb_put_urb(victim); - spin_lock_irqsave(&anchor->lock, flags); } - spin_unlock_irqrestore(&anchor->lock, flags); } EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs); @@ -799,12 +797,11 @@ struct urb *usb_get_from_anchor(struct usb_anchor *anchor) victim = list_entry(anchor->urb_list.next, struct urb, anchor_list); usb_get_urb(victim); - spin_unlock_irqrestore(&anchor->lock, flags); - usb_unanchor_urb(victim); + __usb_unanchor_urb(victim, anchor); } else { - spin_unlock_irqrestore(&anchor->lock, flags); victim = NULL; } + spin_unlock_irqrestore(&anchor->lock, flags); return victim; } @@ -826,12 +823,7 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) while (!list_empty(&anchor->urb_list)) { victim = list_entry(anchor->urb_list.prev, struct urb, anchor_list); - usb_get_urb(victim); - spin_unlock_irqrestore(&anchor->lock, flags); - /* this may free the URB */ - usb_unanchor_urb(victim); - usb_put_urb(victim); - spin_lock_irqsave(&anchor->lock, flags); + __usb_unanchor_urb(victim, anchor); } spin_unlock_irqrestore(&anchor->lock, flags); } diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index d623c7b..2d19d88 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -2014,6 +2014,9 @@ static int __init usba_udc_probe(struct platform_device *pdev) } else { disable_irq(gpio_to_irq(udc->vbus_pin)); } + } else { + /* gpio_request fail so use -EINVAL for gpio_is_valid */ + udc->vbus_pin = -EINVAL; } } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 391d169..b17e5c1 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -838,7 +838,8 @@ unknown: */ switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: - f = cdev->config->interface[intf]; + if (cdev->config) + f = cdev->config->interface[intf]; break; case USB_RECIP_ENDPOINT: @@ -1017,14 +1018,6 @@ static int composite_bind(struct usb_gadget *gadget) */ usb_ep_autoconfig_reset(cdev->gadget); - /* standardized runtime overrides for device ID data */ - if (idVendor) - cdev->desc.idVendor = cpu_to_le16(idVendor); - if (idProduct) - cdev->desc.idProduct = cpu_to_le16(idProduct); - if (bcdDevice) - cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); - /* composite gadget needs to assign strings for whole device (like * serial number), register function drivers, potentially update * power state and consumption, etc @@ -1036,6 +1029,14 @@ static int composite_bind(struct usb_gadget *gadget) cdev->desc = *composite->dev; cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; + /* standardized runtime overrides for device ID data */ + if (idVendor) + cdev->desc.idVendor = cpu_to_le16(idVendor); + if (idProduct) + cdev->desc.idProduct = cpu_to_le16(idProduct); + if (bcdDevice) + cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); + /* strings can't be assigned before bind() allocates the * releavnt identifiers */ diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index d47a123..bd6226c 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -111,7 +111,7 @@ acm_iad_descriptor = { .bInterfaceCount = 2, // control + data .bFunctionClass = USB_CLASS_COMM, .bFunctionSubClass = USB_CDC_SUBCLASS_ACM, - .bFunctionProtocol = USB_CDC_PROTO_NONE, + .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER, /* .iFunction = DYNAMIC */ }; diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index d1af253..bc43322 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c @@ -52,8 +52,8 @@ MODULE_AUTHOR("Michal Nazarewicz"); MODULE_LICENSE("GPL"); -static unsigned short gfs_vendor_id = 0x0525; /* XXX NetChip */ -static unsigned short gfs_product_id = 0xa4ac; /* XXX */ +static unsigned short gfs_vendor_id = 0x1d6b; /* Linux Foundation */ +static unsigned short gfs_product_id = 0x0105; /* FunctionFS Gadget */ static struct usb_device_descriptor gfs_dev_desc = { .bLength = sizeof gfs_dev_desc, diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c index a930d7f..0ac051e 100644 --- a/drivers/usb/gadget/multi.c +++ b/drivers/usb/gadget/multi.c @@ -39,8 +39,8 @@ /*-------------------------------------------------------------------------*/ -#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */ -#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */ +#define MULTI_VENDOR_NUM 0x1d6b /* Linux Foundation */ +#define MULTI_PRODUCT_NUM 0x0104 /* Multifunction Composite Gadget */ /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 4c3ac5c..d615eee 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -130,31 +130,31 @@ static struct printer_dev usb_printer_gadget; * parameters are in UTF-8 (superset of ASCII's 7 bit characters). */ -static ushort __initdata idVendor; +static ushort idVendor; module_param(idVendor, ushort, S_IRUGO); MODULE_PARM_DESC(idVendor, "USB Vendor ID"); -static ushort __initdata idProduct; +static ushort idProduct; module_param(idProduct, ushort, S_IRUGO); MODULE_PARM_DESC(idProduct, "USB Product ID"); -static ushort __initdata bcdDevice; +static ushort bcdDevice; module_param(bcdDevice, ushort, S_IRUGO); MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); -static char *__initdata iManufacturer; +static char *iManufacturer; module_param(iManufacturer, charp, S_IRUGO); MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); -static char *__initdata iProduct; +static char *iProduct; module_param(iProduct, charp, S_IRUGO); MODULE_PARM_DESC(iProduct, "USB Product string"); -static char *__initdata iSerialNum; +static char *iSerialNum; module_param(iSerialNum, charp, S_IRUGO); MODULE_PARM_DESC(iSerialNum, "1"); -static char *__initdata iPNPstring; +static char *iPNPstring; module_param(iPNPstring, charp, S_IRUGO); MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;"); diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index 5c0d06c..1043da1 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c @@ -292,9 +292,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, /* mandatory */ case OID_GEN_VENDOR_DESCRIPTION: pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__); - length = strlen (rndis_per_dev_params [configNr].vendorDescr); - memcpy (outbuf, - rndis_per_dev_params [configNr].vendorDescr, length); + if ( rndis_per_dev_params [configNr].vendorDescr ) { + length = strlen (rndis_per_dev_params [configNr].vendorDescr); + memcpy (outbuf, + rndis_per_dev_params [configNr].vendorDescr, length); + } else { + outbuf[0] = 0; + } retval = 0; break; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a3ef2a9..761bbb3 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -102,6 +102,9 @@ MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); #define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) +/* for ASPM quirk of ISOC on AMD SB800 */ +static struct pci_dev *amd_nb_dev; + /*-------------------------------------------------------------------------*/ #include "ehci.h" @@ -501,6 +504,11 @@ static void ehci_stop (struct usb_hcd *hcd) spin_unlock_irq (&ehci->lock); ehci_mem_cleanup (ehci); + if (amd_nb_dev) { + pci_dev_put(amd_nb_dev); + amd_nb_dev = NULL; + } + #ifdef EHCI_STATS ehci_dbg (ehci, "irq normal %ld err %ld reclaim %ld (lost %ld)\n", ehci->stats.normal, ehci->stats.error, ehci->stats.reclaim, @@ -536,6 +544,8 @@ static int ehci_init(struct usb_hcd *hcd) ehci->iaa_watchdog.function = ehci_iaa_watchdog; ehci->iaa_watchdog.data = (unsigned long) ehci; + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); + /* * hw default: 1K periodic list heads, one per frame. * periodic_size can shrink by USBCMD update if hcc_params allows. @@ -543,11 +553,20 @@ static int ehci_init(struct usb_hcd *hcd) ehci->periodic_size = DEFAULT_I_TDPS; INIT_LIST_HEAD(&ehci->cached_itd_list); INIT_LIST_HEAD(&ehci->cached_sitd_list); + + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + switch (EHCI_TUNE_FLS) { + case 0: ehci->periodic_size = 1024; break; + case 1: ehci->periodic_size = 512; break; + case 2: ehci->periodic_size = 256; break; + default: BUG(); + } + } if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) return retval; /* controllers may cache some of the periodic schedule ... */ - hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); if (HCC_ISOC_CACHE(hcc_params)) // full frame cache ehci->i_thresh = 2 + 8; else // N microframes cached @@ -596,12 +615,6 @@ static int ehci_init(struct usb_hcd *hcd) /* periodic schedule size can be smaller than default */ temp &= ~(3 << 2); temp |= (EHCI_TUNE_FLS << 2); - switch (EHCI_TUNE_FLS) { - case 0: ehci->periodic_size = 1024; break; - case 1: ehci->periodic_size = 512; break; - case 2: ehci->periodic_size = 256; break; - default: BUG(); - } } ehci->command = temp; @@ -1009,10 +1022,11 @@ rescan: tmp && tmp != qh; tmp = tmp->qh_next.qh) continue; - /* periodic qh self-unlinks on empty */ - if (!tmp) - goto nogood; - unlink_async (ehci, qh); + /* periodic qh self-unlinks on empty, and a COMPLETING qh + * may already be unlinked. + */ + if (tmp) + unlink_async(ehci, qh); /* FALL THROUGH */ case QH_STATE_UNLINK: /* wait for hw to finish? */ case QH_STATE_UNLINK_WAIT: @@ -1029,7 +1043,6 @@ idle_timeout: } /* else FALL THROUGH */ default: -nogood: /* caller was supposed to have unlinked any requests; * that's not our job. just leak this memory. */ diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index d43d176..e9dc136 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -41,6 +41,42 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) return 0; } +static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci) +{ + struct pci_dev *amd_smbus_dev; + u8 rev = 0; + + amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); + if (amd_smbus_dev) { + pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); + if (rev < 0x40) { + pci_dev_put(amd_smbus_dev); + amd_smbus_dev = NULL; + return 0; + } + } else { + amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL); + if (!amd_smbus_dev) + return 0; + pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev); + if (rev < 0x11 || rev > 0x18) { + pci_dev_put(amd_smbus_dev); + amd_smbus_dev = NULL; + return 0; + } + } + + if (!amd_nb_dev) + amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); + + ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n"); + + pci_dev_put(amd_smbus_dev); + amd_smbus_dev = NULL; + + return 1; +} + /* called during probe() after chip reset completes */ static int ehci_pci_setup(struct usb_hcd *hcd) { @@ -99,6 +135,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd) /* cache this readonly data; minimize chip reads */ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + if (ehci_quirk_amd_hudson(ehci)) + ehci->amd_l1_fix = 1; + retval = ehci_halt(ehci); if (retval) return retval; @@ -114,6 +153,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd) break; case PCI_VENDOR_ID_INTEL: ehci->need_io_watchdog = 0; + ehci->fs_i_thresh = 1; if (pdev->device == 0x27cc) { ehci->broken_periodic = 1; ehci_info(ehci, "using broken periodic workaround\n"); diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 5aec928..a07cfd6 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) } rv = usb_add_hcd(hcd, irq, 0); - if (rv == 0) - return 0; + if (rv) + goto err_ehci; + + return 0; +err_ehci: + if (ehci->has_amcc_usb23) + iounmap(ehci->ohci_hcctrl_reg); iounmap(hcd->regs); err_ioremap: irq_dispose_mapping(irq); err_irq: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); - - if (ehci->has_amcc_usb23) - iounmap(ehci->ohci_hcctrl_reg); err_rmr: usb_put_hcd(hcd); diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 11a79c4..ed8db6a 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -315,7 +315,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) int stopped; unsigned count = 0; u8 state; - const __le32 halt = HALT_BIT(ehci); struct ehci_qh_hw *hw = qh->hw; if (unlikely (list_empty (&qh->qtd_list))) @@ -422,7 +421,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) && !(qtd->hw_alt_next & EHCI_LIST_END(ehci))) { stopped = 1; - goto halt; } /* stop scanning when we reach qtds the hc is using */ @@ -456,16 +454,6 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) */ ehci_clear_tt_buffer(ehci, qh, urb, token); } - - /* force halt for unlinked or blocked qh, so we'll - * patch the qh later and so that completions can't - * activate it while we "know" it's stopped. - */ - if ((halt & hw->hw_token) == 0) { -halt: - hw->hw_token |= halt; - wmb (); - } } /* unless we already know the urb's status, collect qtd status @@ -1258,24 +1246,27 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) static void scan_async (struct ehci_hcd *ehci) { + bool stopped; struct ehci_qh *qh; enum ehci_timer_action action = TIMER_IO_WATCHDOG; ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); timer_action_done (ehci, TIMER_ASYNC_SHRINK); rescan: + stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); qh = ehci->async->qh_next.qh; if (likely (qh != NULL)) { do { /* clean any finished work for this qh */ - if (!list_empty (&qh->qtd_list) - && qh->stamp != ehci->stamp) { + if (!list_empty(&qh->qtd_list) && (stopped || + qh->stamp != ehci->stamp)) { int temp; /* unlinks could happen here; completion * reporting drops the lock. rescan using * the latest schedule, but don't rescan - * qhs we already finished (no looping). + * qhs we already finished (no looping) + * unless the controller is stopped. */ qh = qh_get (qh); qh->stamp = ehci->stamp; @@ -1296,9 +1287,9 @@ rescan: */ if (list_empty(&qh->qtd_list) && qh->qh_state == QH_STATE_LINKED) { - if (!ehci->reclaim - && ((ehci->stamp - qh->stamp) & 0x1fff) - >= (EHCI_SHRINK_FRAMES * 8)) + if (!ehci->reclaim && (stopped || + ((ehci->stamp - qh->stamp) & 0x1fff) + >= EHCI_SHRINK_FRAMES * 8)) start_unlink_async(ehci, qh); else action = TIMER_ASYNC_SHRINK; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 805ec63..a530856 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1400,7 +1400,6 @@ iso_stream_schedule ( int status; unsigned mod = ehci->periodic_size << 3; struct ehci_iso_sched *sched = urb->hcpriv; - struct pci_dev *pdev; if (sched->span > (mod - SCHEDULE_SLOP)) { ehci_dbg (ehci, "iso request %p too long\n", urb); @@ -1427,15 +1426,14 @@ iso_stream_schedule ( * slot in the schedule, implicitly assuming URB_ISO_ASAP. */ if (likely (!list_empty (&stream->td_list))) { - pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); start = stream->next_uframe; /* For high speed devices, allow scheduling within the - * isochronous scheduling threshold. For full speed devices, - * don't. (Work around for Intel ICH9 bug.) + * isochronous scheduling threshold. For full speed devices + * and Intel PCI-based controllers, don't (work around for + * Intel ICH9 bug). */ - if (!stream->highspeed && - pdev->vendor == PCI_VENDOR_ID_INTEL) + if (!stream->highspeed && ehci->fs_i_thresh) next = now + ehci->i_thresh; else next = now; @@ -1588,6 +1586,63 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); } +#define AB_REG_BAR_LOW 0xe0 +#define AB_REG_BAR_HIGH 0xe1 +#define AB_INDX(addr) ((addr) + 0x00) +#define AB_DATA(addr) ((addr) + 0x04) +#define NB_PCIE_INDX_ADDR 0xe0 +#define NB_PCIE_INDX_DATA 0xe4 +#define NB_PIF0_PWRDOWN_0 0x01100012 +#define NB_PIF0_PWRDOWN_1 0x01100013 + +static void ehci_quirk_amd_L1(struct ehci_hcd *ehci, int disable) +{ + u32 addr, addr_low, addr_high, val; + + outb_p(AB_REG_BAR_LOW, 0xcd6); + addr_low = inb_p(0xcd7); + outb_p(AB_REG_BAR_HIGH, 0xcd6); + addr_high = inb_p(0xcd7); + addr = addr_high << 8 | addr_low; + outl_p(0x30, AB_INDX(addr)); + outl_p(0x40, AB_DATA(addr)); + outl_p(0x34, AB_INDX(addr)); + val = inl_p(AB_DATA(addr)); + + if (disable) { + val &= ~0x8; + val |= (1 << 4) | (1 << 9); + } else { + val |= 0x8; + val &= ~((1 << 4) | (1 << 9)); + } + outl_p(val, AB_DATA(addr)); + + if (amd_nb_dev) { + addr = NB_PIF0_PWRDOWN_0; + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val); + + addr = NB_PIF0_PWRDOWN_1; + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_nb_dev, NB_PCIE_INDX_DATA, val); + } + + return; +} + /* fit urb's itds into the selected schedule slot; activate as needed */ static int itd_link_urb ( @@ -1615,6 +1670,12 @@ itd_link_urb ( next_uframe >> 3, next_uframe & 0x7); stream->start = jiffies; } + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 1); + } + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; /* fill iTDs uframe by uframe */ @@ -1741,6 +1802,11 @@ itd_complete ( (void) disable_periodic(ehci); ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 0); + } + if (unlikely(list_is_singular(&stream->td_list))) { ehci_to_hcd(ehci)->self.bandwidth_allocated -= stream->bandwidth; @@ -2028,6 +2094,12 @@ sitd_link_urb ( stream->interval, hc32_to_cpu(ehci, stream->splits)); stream->start = jiffies; } + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 1); + } + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; /* fill sITDs frame by frame */ @@ -2130,6 +2202,11 @@ sitd_complete ( (void) disable_periodic(ehci); ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_l1_fix == 1) + ehci_quirk_amd_L1(ehci, 0); + } + if (list_is_singular(&stream->td_list)) { ehci_to_hcd(ehci)->self.bandwidth_allocated -= stream->bandwidth; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index 650a687..c702e4b 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -130,6 +130,8 @@ struct ehci_hcd { /* one per controller */ unsigned has_amcc_usb23:1; unsigned need_io_watchdog:1; unsigned broken_periodic:1; + unsigned fs_i_thresh:1; /* Intel iso scheduling */ + unsigned amd_l1_fix:1; /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index dbcafa2..e0eed11 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -33,6 +33,7 @@ struct isp1760_hcd { struct inter_packet_info atl_ints[32]; struct inter_packet_info int_ints[32]; struct memory_chunk memory_pool[BLOCKS]; + u32 atl_queued; /* periodic schedule support */ #define DEFAULT_I_TDPS 1024 @@ -850,6 +851,11 @@ static void enqueue_an_ATL_packet(struct usb_hcd *hcd, struct isp1760_qh *qh, skip_map &= ~queue_entry; isp1760_writel(skip_map, hcd->regs + HC_ATL_PTD_SKIPMAP_REG); + priv->atl_queued++; + if (priv->atl_queued == 2) + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, + hcd->regs + HC_INTERRUPT_ENABLE); + buffstatus = isp1760_readl(hcd->regs + HC_BUFFER_STATUS_REG); buffstatus |= ATL_BUFFER; isp1760_writel(buffstatus, hcd->regs + HC_BUFFER_STATUS_REG); @@ -991,6 +997,7 @@ static void do_atl_int(struct usb_hcd *usb_hcd) u32 dw3; status = 0; + priv->atl_queued--; queue_entry = __ffs(done_map); done_map &= ~(1 << queue_entry); @@ -1053,11 +1060,6 @@ static void do_atl_int(struct usb_hcd *usb_hcd) * device is not able to send data fast enough. * This happens mostly on slower hardware. */ - printk(KERN_NOTICE "Reloading ptd %p/%p... qh %p read: " - "%d of %zu done: %08x cur: %08x\n", qtd, - urb, qh, PTD_XFERRED_LENGTH(dw3), - qtd->length, done_map, - (1 << queue_entry)); /* RL counter = ERR counter */ dw3 &= ~(0xf << 19); @@ -1085,6 +1087,11 @@ static void do_atl_int(struct usb_hcd *usb_hcd) priv_write_copy(priv, (u32 *)&ptd, usb_hcd->regs + atl_regs, sizeof(ptd)); + priv->atl_queued++; + if (priv->atl_queued == 2) + isp1760_writel(INTERRUPT_ENABLE_SOT_MASK, + usb_hcd->regs + HC_INTERRUPT_ENABLE); + buffstatus = isp1760_readl(usb_hcd->regs + HC_BUFFER_STATUS_REG); buffstatus |= ATL_BUFFER; @@ -1190,6 +1197,9 @@ static void do_atl_int(struct usb_hcd *usb_hcd) skip_map = isp1760_readl(usb_hcd->regs + HC_ATL_PTD_SKIPMAP_REG); } + if (priv->atl_queued <= 1) + isp1760_writel(INTERRUPT_ENABLE_MASK, + usb_hcd->regs + HC_INTERRUPT_ENABLE); } static void do_intl_int(struct usb_hcd *usb_hcd) @@ -1769,7 +1779,7 @@ static irqreturn_t isp1760_irq(struct usb_hcd *usb_hcd) goto leave; isp1760_writel(imask, usb_hcd->regs + HC_INTERRUPT_REG); - if (imask & HC_ATL_INT) + if (imask & (HC_ATL_INT | HC_SOT_INT)) do_atl_int(usb_hcd); if (imask & HC_INTL_INT) diff --git a/drivers/usb/host/isp1760-hcd.h b/drivers/usb/host/isp1760-hcd.h index 6931ef5..612bce5 100644 --- a/drivers/usb/host/isp1760-hcd.h +++ b/drivers/usb/host/isp1760-hcd.h @@ -69,6 +69,7 @@ void deinit_kmem_cache(void); #define HC_INTERRUPT_ENABLE 0x314 #define INTERRUPT_ENABLE_MASK (HC_INTL_INT | HC_ATL_INT | HC_EOT_INT) +#define INTERRUPT_ENABLE_SOT_MASK (HC_INTL_INT | HC_SOT_INT | HC_EOT_INT) #define HC_ISO_INT (1 << 9) #define HC_ATL_INT (1 << 8) diff --git a/drivers/usb/host/r8a66597.h b/drivers/usb/host/r8a66597.h index 95d0f5a..25563e9 100644 --- a/drivers/usb/host/r8a66597.h +++ b/drivers/usb/host/r8a66597.h @@ -227,7 +227,7 @@ static inline void r8a66597_write_fifo(struct r8a66597 *r8a66597, int odd = len & 0x0001; len = len / 2; - ioread16_rep(fifoaddr, buf, len); + iowrite16_rep(fifoaddr, buf, len); if (unlikely(odd)) { buf = &buf[len]; iowrite8((unsigned char)*buf, fifoaddr); diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index a1a7a97..480936a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -132,6 +132,13 @@ static u32 xhci_port_state_to_neutral(u32 state) static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, u32 __iomem *addr, u32 port_status) { + /* Don't allow the USB core to disable SuperSpeed ports. */ + if (xhci->port_array[wIndex] == 0x03) { + xhci_dbg(xhci, "Ignoring request to disable " + "SuperSpeed port.\n"); + return; + } + /* Write 1 to disable the port */ xhci_writel(xhci, port_status | PORT_PE, addr); port_status = xhci_readl(xhci, addr); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 2eb658d..7ddb69f 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -961,6 +961,47 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return 0; } +/* + * Convert interval expressed as 2^(bInterval - 1) == interval into + * straight exponent value 2^n == interval. + * + */ +static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; + if (interval != ep->desc.bInterval - 1) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval); + + return interval; +} + +/* + * Convert bInterval expressed in frames (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +static unsigned int xhci_parse_frame_interval(struct usb_device *udev, + struct usb_host_endpoint *ep) +{ + unsigned int interval; + + interval = fls(8 * ep->desc.bInterval) - 1; + interval = clamp_val(interval, 3, 10); + if ((1 << interval) != 8 * ep->desc.bInterval) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, + 8 * ep->desc.bInterval); + + return interval; +} + /* Return the polling or NAK interval. * * The polling interval is expressed in "microframes". If xHCI's Interval field @@ -978,43 +1019,35 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, case USB_SPEED_HIGH: /* Max NAK rate */ if (usb_endpoint_xfer_control(&ep->desc) || - usb_endpoint_xfer_bulk(&ep->desc)) + usb_endpoint_xfer_bulk(&ep->desc)) { interval = ep->desc.bInterval; + break; + } /* Fall through - SS and HS isoc/int have same decoding */ case USB_SPEED_SUPER: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - if (ep->desc.bInterval == 0) - interval = 0; - else - interval = ep->desc.bInterval - 1; - if (interval > 15) - interval = 15; - if (interval != ep->desc.bInterval + 1) - dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", - ep->desc.bEndpointAddress, 1 << interval); + usb_endpoint_xfer_isoc(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); } break; /* Convert bInterval (in 1-255 frames) to microframes and round down to * nearest power of 2. */ case USB_SPEED_FULL: + if (usb_endpoint_xfer_int(&ep->desc)) { + interval = xhci_parse_exponent_interval(udev, ep); + break; + } + /* + * Fall through for isochronous endpoint interval decoding + * since it uses the same rules as low speed interrupt + * endpoints. + */ case USB_SPEED_LOW: if (usb_endpoint_xfer_int(&ep->desc) || - usb_endpoint_xfer_isoc(&ep->desc)) { - interval = fls(8*ep->desc.bInterval) - 1; - if (interval > 10) - interval = 10; - if (interval < 3) - interval = 3; - if ((1 << interval) != 8*ep->desc.bInterval) - dev_warn(&udev->dev, - "ep %#x - rounding interval" - " to %d microframes, " - "ep desc says %d microframes\n", - ep->desc.bEndpointAddress, - 1 << interval, - 8*ep->desc.bInterval); + usb_endpoint_xfer_isoc(&ep->desc)) { + + interval = xhci_parse_frame_interval(udev, ep); } break; default: @@ -1086,7 +1119,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, if (udev->speed == USB_SPEED_SUPER) return ep->ss_ep_comp.wBytesPerInterval; - max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; /* A 0 in max burst means 1 transfer per ESIT */ return max_packet * (max_burst + 1); @@ -1165,7 +1198,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Fall through */ case USB_SPEED_FULL: case USB_SPEED_LOW: - max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); ep_ctx->ep_info2 |= MAX_PACKET(max_packet); break; default: @@ -1457,6 +1490,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->dcbaa = NULL; scratchpad_free(xhci); + + xhci->num_usb2_ports = 0; + xhci->num_usb3_ports = 0; + kfree(xhci->usb2_ports); + kfree(xhci->usb3_ports); + kfree(xhci->port_array); + xhci->page_size = 0; xhci->page_shift = 0; } @@ -1617,6 +1657,166 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) return 0; } +static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + u32 __iomem *addr, u8 major_revision) +{ + u32 temp, port_offset, port_count; + int i; + + if (major_revision > 0x03) { + xhci_warn(xhci, "Ignoring unknown port speed, " + "Ext Cap %p, revision = 0x%x\n", + addr, major_revision); + /* Ignoring port protocol we can't understand. FIXME */ + return; + } + + /* Port offset and count in the third dword, see section 7.2 */ + temp = xhci_readl(xhci, addr + 2); + port_offset = XHCI_EXT_PORT_OFF(temp); + port_count = XHCI_EXT_PORT_COUNT(temp); + xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " + "count = %u, revision = 0x%x\n", + addr, port_offset, port_count, major_revision); + /* Port count includes the current port offset */ + if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) + /* WTF? "Valid values are ‘1’ to MaxPorts" */ + return; + port_offset--; + for (i = port_offset; i < (port_offset + port_count); i++) { + /* Duplicate entry. Ignore the port if the revisions differ. */ + if (xhci->port_array[i] != 0) { + xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," + " port %u\n", addr, i); + xhci_warn(xhci, "Port was marked as USB %u, " + "duplicated as USB %u\n", + xhci->port_array[i], major_revision); + /* Only adjust the roothub port counts if we haven't + * found a similar duplicate. + */ + if (xhci->port_array[i] != major_revision && + xhci->port_array[i] != (u8) -1) { + if (xhci->port_array[i] == 0x03) + xhci->num_usb3_ports--; + else + xhci->num_usb2_ports--; + xhci->port_array[i] = (u8) -1; + } + /* FIXME: Should we disable the port? */ + continue; + } + xhci->port_array[i] = major_revision; + if (major_revision == 0x03) + xhci->num_usb3_ports++; + else + xhci->num_usb2_ports++; + } + /* FIXME: Should we disable ports not in the Extended Capabilities? */ +} + +/* + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that + * specify what speeds each port is supposed to be. We can't count on the port + * speed bits in the PORTSC register being correct until a device is connected, + * but we need to set up the two fake roothubs with the correct number of USB + * 3.0 and USB 2.0 ports at host controller initialization time. + */ +static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) +{ + u32 __iomem *addr; + u32 offset; + unsigned int num_ports; + int i, port_index; + + addr = &xhci->cap_regs->hcc_params; + offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); + if (offset == 0) { + xhci_err(xhci, "No Extended Capability registers, " + "unable to set up roothub.\n"); + return -ENODEV; + } + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); + if (!xhci->port_array) + return -ENOMEM; + + /* + * For whatever reason, the first capability offset is from the + * capability register base, not from the HCCPARAMS register. + * See section 5.3.6 for offset calculation. + */ + addr = &xhci->cap_regs->hc_capbase + offset; + while (1) { + u32 cap_id; + + cap_id = xhci_readl(xhci, addr); + if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) + xhci_add_in_port(xhci, num_ports, addr, + (u8) XHCI_EXT_PORT_MAJOR(cap_id)); + offset = XHCI_EXT_CAPS_NEXT(cap_id); + if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) + == num_ports) + break; + /* + * Once you're into the Extended Capabilities, the offset is + * always relative to the register holding the offset. + */ + addr += offset; + } + + if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { + xhci_warn(xhci, "No ports on the roothubs?\n"); + return -ENODEV; + } + xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", + xhci->num_usb2_ports, xhci->num_usb3_ports); + /* + * Note we could have all USB 3.0 ports, or all USB 2.0 ports. + * Not sure how the USB core will handle a hub with no ports... + */ + if (xhci->num_usb2_ports) { + xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* + xhci->num_usb2_ports, flags); + if (!xhci->usb2_ports) + return -ENOMEM; + + port_index = 0; + for (i = 0; i < num_ports; i++) { + if (xhci->port_array[i] == 0x03 || + xhci->port_array[i] == 0 || + xhci->port_array[i] == -1) + continue; + + xhci->usb2_ports[port_index] = + &xhci->op_regs->port_status_base + + NUM_PORT_REGS*i; + xhci_dbg(xhci, "USB 2.0 port at index %u, " + "addr = %p\n", i, + xhci->usb2_ports[port_index]); + port_index++; + } + } + if (xhci->num_usb3_ports) { + xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* + xhci->num_usb3_ports, flags); + if (!xhci->usb3_ports) + return -ENOMEM; + + port_index = 0; + for (i = 0; i < num_ports; i++) + if (xhci->port_array[i] == 0x03) { + xhci->usb3_ports[port_index] = + &xhci->op_regs->port_status_base + + NUM_PORT_REGS*i; + xhci_dbg(xhci, "USB 3.0 port at index %u, " + "addr = %p\n", i, + xhci->usb3_ports[port_index]); + port_index++; + } + } + return 0; +} int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { @@ -1797,6 +1997,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (scratchpad_alloc(xhci, flags)) goto fail; + if (xhci_setup_port_arrays(xhci, flags)) + goto fail; return 0; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index bfc99a9..b262e91 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci, *seg = (*seg)->next; *trb = ((*seg)->trbs); } else { - *trb = (*trb)++; + (*trb)++; } } @@ -457,8 +457,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(cur_td->start_seg, dev->eps[ep_index].stopped_trb, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } + /* Dig out the cycle state saved by the xHC during the stop ep cmd */ xhci_dbg(xhci, "Finding endpoint context\n"); ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); @@ -469,8 +472,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_deq_seg = find_trb_seg(state->new_deq_seg, state->new_deq_ptr, &state->new_cycle_state); - if (!state->new_deq_seg) - BUG(); + if (!state->new_deq_seg) { + WARN_ON(1); + return; + } trb = &state->new_deq_ptr->generic; if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && @@ -478,15 +483,26 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, state->new_cycle_state = ~(state->new_cycle_state) & 0x1; next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); + /* + * If there is only one segment in a ring, find_trb_seg()'s while loop + * will not run, and it will return before it has a chance to see if it + * needs to toggle the cycle bit. It can't tell if the stalled transfer + * ended just before the link TRB on a one-segment ring, or if the TD + * wrapped around the top of the ring, because it doesn't have the TD in + * question. Look for the one-segment case where stalled TRB's address + * is greater than the new dequeue pointer address. + */ + if (ep_ring->first_seg == ep_ring->first_seg->next && + state->new_deq_ptr < dev->eps[ep_index].stopped_trb) + state->new_cycle_state ^= 0x1; + xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state); + /* Don't update the ring cycle state for the producer (us). */ xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n", state->new_deq_seg); addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr); xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n", (unsigned long long) addr); - xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n"); - ep_ring->dequeue = state->new_deq_ptr; - ep_ring->deq_seg = state->new_deq_seg; } static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, @@ -905,9 +921,26 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci, } else { xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", ep_ctx->deq); + if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg, + dev->eps[ep_index].queued_deq_ptr) == + (ep_ctx->deq & ~(EP_CTX_CYCLE_MASK))) { + /* Update the ring's dequeue segment and dequeue pointer + * to reflect the new position. + */ + ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg; + ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr; + } else { + xhci_warn(xhci, "Mismatch between completed Set TR Deq " + "Ptr command & xHCI internal state.\n"); + xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n", + dev->eps[ep_index].queued_deq_seg, + dev->eps[ep_index].queued_deq_ptr); + } } dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; + dev->eps[ep_index].queued_deq_seg = NULL; + dev->eps[ep_index].queued_deq_ptr = NULL; /* Restart any rings with pending URBs */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } @@ -1885,12 +1918,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) /* Scatter gather list entries may cross 64KB boundaries */ running_total = TRB_MAX_BUFF_SIZE - - (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; if (running_total != 0) num_trbs++; /* How many more 64KB chunks to transfer, how many more TRBs? */ - while (running_total < sg_dma_len(sg)) { + while (running_total < sg_dma_len(sg) && running_total < temp) { num_trbs++; running_total += TRB_MAX_BUFF_SIZE; } @@ -1915,11 +1949,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) static void check_trb_math(struct urb *urb, int num_trbs, int running_total) { if (num_trbs != 0) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " "TRBs, %d left\n", __func__, urb->ep->desc.bEndpointAddress, num_trbs); if (running_total != urb->transfer_buffer_length) - dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " + dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " "queued %#x (%d), asked for %#x (%d)\n", __func__, urb->ep->desc.bEndpointAddress, @@ -2046,8 +2080,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, sg = urb->sg; addr = (u64) sg_dma_address(sg); this_sg_len = sg_dma_len(sg); - trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; @@ -2083,7 +2116,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), (unsigned int) addr + trb_buff_len); if (TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { + (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), @@ -2127,7 +2160,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, } trb_buff_len = TRB_MAX_BUFF_SIZE - - (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (addr & (TRB_MAX_BUFF_SIZE - 1)); trb_buff_len = min_t(int, trb_buff_len, this_sg_len); if (running_total + trb_buff_len > urb->transfer_buffer_length) trb_buff_len = @@ -2166,7 +2199,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, num_trbs = 0; /* How much data is (potentially) left before the 64KB boundary? */ running_total = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + running_total &= TRB_MAX_BUFF_SIZE - 1; /* If there's some data on this 64KB chunk, or we have to send a * zero-length transfer, we need at least one TRB @@ -2206,8 +2240,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, /* How much data is in the first TRB? */ addr = (u64) urb->transfer_dma; trb_buff_len = TRB_MAX_BUFF_SIZE - - (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); - if (urb->transfer_buffer_length < trb_buff_len) + (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); + if (trb_buff_len > urb->transfer_buffer_length) trb_buff_len = urb->transfer_buffer_length; first_trb = true; @@ -2492,6 +2526,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); u32 type = TRB_TYPE(TRB_SET_DEQ); + struct xhci_virt_ep *ep; addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); if (addr == 0) { @@ -2500,6 +2535,14 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, deq_seg, deq_ptr); return 0; } + ep = &xhci->devs[slot_id]->eps[ep_index]; + if ((ep->ep_state & SET_DEQ_PENDING)) { + xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n"); + xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n"); + return 0; + } + ep->queued_deq_seg = deq_seg; + ep->queued_deq_ptr = deq_ptr; return queue_command(xhci, lower_32_bits(addr) | cycle_state, upper_32_bits(addr), trb_stream_id, trb_slot_id | trb_ep_index | type, false); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3998f72..807f8e2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1224,6 +1224,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, cmd_completion = command->completion; cmd_status = &command->status; command->command_trb = xhci->cmd_ring->enqueue; + + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if ((command->command_trb->link.control & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) + command->command_trb = + xhci->cmd_ring->enq_seg->next->trbs; + list_add_tail(&command->cmd_list, &virt_dev->cmd_list); } else { in_ctx = virt_dev->in_ctx; @@ -1933,6 +1942,15 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Attempt to submit the Reset Device command to the command ring */ spin_lock_irqsave(&xhci->lock, flags); reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; + + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) + reset_device_cmd->command_trb = + xhci->cmd_ring->enq_seg->next->trbs; + list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); ret = xhci_queue_reset_device(xhci, slot_id); if (ret) { @@ -1992,10 +2010,18 @@ int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Everything but endpoint 0 is disabled, so free or cache the rings. */ last_freed_endpoint = 1; for (i = 1; i < 31; ++i) { - if (!virt_dev->eps[i].ring) - continue; - xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); - last_freed_endpoint = i; + struct xhci_virt_ep *ep = &virt_dev->eps[i]; + + if (ep->ep_state & EP_HAS_STREAMS) { + xhci_free_stream_info(xhci, ep->stream_info); + ep->stream_info = NULL; + ep->ep_state &= ~EP_HAS_STREAMS; + } + + if (ep->ring) { + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } } xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 6c7e343..a6982c3 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -232,7 +232,7 @@ struct xhci_op_regs { * notification type that matches a bit set in this bit field. */ #define DEV_NOTE_MASK (0xffff) -#define ENABLE_DEV_NOTE(x) (1 << x) +#define ENABLE_DEV_NOTE(x) (1 << (x)) /* Most of the device notification types should only be used for debug. * SW does need to pay attention to function wake notifications. */ @@ -448,6 +448,24 @@ struct xhci_doorbell_array { /** + * struct xhci_protocol_caps + * @revision: major revision, minor revision, capability ID, + * and next capability pointer. + * @name_string: Four ASCII characters to say which spec this xHC + * follows, typically "USB ". + * @port_info: Port offset, count, and protocol-defined information. + */ +struct xhci_protocol_caps { + u32 revision; + u32 name_string; + u32 port_info; +}; + +#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) +#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +/** * struct xhci_container_ctx * @type: Type of context. Used to calculated offsets to contained contexts. * @size: Size of the context data @@ -580,11 +598,11 @@ struct xhci_ep_ctx { #define EP_STATE_STOPPED 3 #define EP_STATE_ERROR 4 /* Mult - Max number of burtst within an interval, in EP companion desc. */ -#define EP_MULT(p) ((p & 0x3) << 8) +#define EP_MULT(p) (((p) & 0x3) << 8) /* bits 10:14 are Max Primary Streams */ /* bit 15 is Linear Stream Array */ /* Interval - period between requests to an endpoint - 125u increments. */ -#define EP_INTERVAL(p) ((p & 0xff) << 16) +#define EP_INTERVAL(p) (((p) & 0xff) << 16) #define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) #define EP_MAXPSTREAMS_MASK (0x1f << 10) #define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) @@ -614,10 +632,18 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) +/* Get max packet size from ep desc. Bit 10..0 specify the max packet size. + * USB2.0 spec 9.6.6. + */ +#define GET_MAX_PACKET(p) ((p) & 0x7ff) + /* tx_info bitmasks */ #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) +/* deq bitmasks */ +#define EP_CTX_CYCLE_MASK (1 << 0) + /** * struct xhci_input_control_context @@ -720,6 +746,12 @@ struct xhci_virt_ep { struct timer_list stop_cmd_timer; int stop_cmds_pending; struct xhci_hcd *xhci; + /* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue + * command. We'll need to update the ring's dequeue segment and dequeue + * pointer after the command completes. + */ + struct xhci_segment *queued_deq_seg; + union xhci_trb *queued_deq_ptr; }; struct xhci_virt_device { @@ -1182,6 +1214,15 @@ struct xhci_hcd { #define XHCI_LINK_TRB_QUIRK (1 << 0) #define XHCI_RESET_EP_QUIRK (1 << 1) #define XHCI_NEC_HOST (1 << 2) + + /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ + u8 *port_array; + /* Array of pointers to USB 3.0 PORTSC registers */ + u32 __iomem **usb3_ports; + unsigned int num_usb3_ports; + /* Array of pointers to USB 2.0 PORTSC registers */ + u32 __iomem **usb2_ports; + unsigned int num_usb2_ports; }; /* For testing purposes */ diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 2f43c57..9251773 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev, return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); } -static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO, - get_port0_handler, set_port0_handler); +static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler); -static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO, - get_port1_handler, set_port1_handler); +static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler); static int cypress_probe(struct usb_interface *interface, diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 7dc9d3c..654243e 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -373,7 +373,7 @@ static ssize_t iowarrior_write(struct file *file, case USB_DEVICE_ID_CODEMERCS_IOWPV2: case USB_DEVICE_ID_CODEMERCS_IOW40: /* IOW24 and IOW40 use a synchronous call */ - buf = kmalloc(8, GFP_KERNEL); /* 8 bytes are enough for both products */ + buf = kmalloc(count, GFP_KERNEL); if (!buf) { retval = -ENOMEM; goto exit; @@ -552,6 +552,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, /* needed for power consumption */ struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; + memset(&info, 0, sizeof(info)); /* directly from the descriptor */ info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); info.product = dev->product_id; diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index d25814c..e33bff5 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #else x.sisusb_conactive = 0; #endif + memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved)); if (copy_to_user((void __user *)arg, &x, sizeof(x))) retval = -EFAULT; diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index d77aba4..f63776a 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c @@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed); +static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c index 63da2c3..c96f51d 100644 --- a/drivers/usb/misc/usbled.c +++ b/drivers/usb/misc/usbled.c @@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co change_color(led); \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value); show_set(blue); show_set(red); show_set(green); diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index de8ef94..417b8f2 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, \ \ return count; \ } \ -static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name); +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name); static ssize_t show_attr_text(struct device *dev, struct device_attribute *attr, char *buf) @@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev, return count; } -static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text); +static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text); static ssize_t show_attr_decimals(struct device *dev, struct device_attribute *attr, char *buf) @@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev, return count; } -static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO, - show_attr_decimals, set_attr_decimals); +static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals); static ssize_t show_attr_textmode(struct device *dev, struct device_attribute *attr, char *buf) @@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev, return -EINVAL; } -static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO, - show_attr_textmode, set_attr_textmode); +static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode); MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 16dffe9..c3049c7 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1378,7 +1378,6 @@ static void iso_callback (struct urb *urb) break; } } - simple_free_urb (urb); ctx->pending--; if (ctx->pending == 0) { @@ -1495,6 +1494,7 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param, } simple_free_urb (urbs [i]); + urbs[i] = NULL; context.pending--; context.submit_error = 1; break; @@ -1504,6 +1504,10 @@ test_iso_queue (struct usbtest_dev *dev, struct usbtest_param *param, wait_for_completion (&context.done); + for (i = 0; i < param->sglen; i++) { + if (urbs[i]) + simple_free_urb(urbs[i]); + } /* * Isochronous transfers are expected to fail sometimes. As an * arbitrary limit, we will report an error if any submissions diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 796e2f6..2380338 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -177,12 +177,11 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p spin_lock_irqsave(&priv->asynclock, flags); list_add_tail(&rq->asynclist, &priv->asynclist); spin_unlock_irqrestore(&priv->asynclock, flags); + kref_get(&rq->ref_count); ret = usb_submit_urb(rq->urb, mem_flags); - if (!ret) { - kref_get(&rq->ref_count); + if (!ret) return rq; - } - kref_put(&rq->ref_count, destroy_async); + destroy_async(&rq->ref_count); err("submit_async_request submit_urb failed with %d", ret); return NULL; } diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 61c76b1..56c93ca 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1009,7 +1009,7 @@ static int mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) mutex_lock(&rp->fetch_lock); spin_lock_irqsave(&rp->b_lock, flags); - mon_free_buff(rp->b_vec, size/CHUNK_SIZE); + mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); kfree(rp->b_vec); rp->b_vec = vec; rp->b_size = size; diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index b611420..611a9d2 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -342,8 +342,10 @@ int __init musb_platform_init(struct musb *musb, void *board_data) usb_nop_xceiv_register(); musb->xceiv = otg_get_transceiver(); - if (!musb->xceiv) + if (!musb->xceiv) { + gpio_free(musb->config->gpio_vrsel); return -ENODEV; + } if (ANOMALY_05000346) { bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); @@ -394,8 +396,9 @@ int __init musb_platform_init(struct musb *musb, void *board_data) int musb_platform_exit(struct musb *musb) { - gpio_free(musb->config->gpio_vrsel); + otg_put_transceiver(musb->xceiv); + usb_nop_xceiv_unregister(); return 0; } diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 5762436..6e67629 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -446,6 +446,7 @@ int __init musb_platform_init(struct musb *musb, void *board_data) fail: clk_disable(musb->clock); + otg_put_transceiver(musb->xceiv); usb_nop_xceiv_unregister(); return -ENODEV; } @@ -496,6 +497,7 @@ int musb_platform_exit(struct musb *musb) clk_disable(musb->clock); + otg_put_transceiver(musb->xceiv); usb_nop_xceiv_unregister(); return 0; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 3b795c5..6128e7c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1922,10 +1922,6 @@ static void musb_free(struct musb *musb) dma_controller_destroy(c); } -#ifdef CONFIG_USB_MUSB_OTG - put_device(musb->xceiv->dev); -#endif - #ifdef CONFIG_USB_MUSB_HDRC_HCD usb_put_hcd(musb_to_hcd(musb)); #else @@ -2248,7 +2244,6 @@ static int __exit musb_remove(struct platform_device *pdev) #endif musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_platform_exit(musb); - musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_free(musb); iounmap(ctrl_base); diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index bba76af..9e8639d 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -92,29 +92,29 @@ static const struct musb_register_map musb_regmap[] = { { "LS_EOF1", 0x7E, 8 }, { "SOFT_RST", 0x7F, 8 }, { "DMA_CNTLch0", 0x204, 16 }, - { "DMA_ADDRch0", 0x208, 16 }, - { "DMA_COUNTch0", 0x20C, 16 }, + { "DMA_ADDRch0", 0x208, 32 }, + { "DMA_COUNTch0", 0x20C, 32 }, { "DMA_CNTLch1", 0x214, 16 }, - { "DMA_ADDRch1", 0x218, 16 }, - { "DMA_COUNTch1", 0x21C, 16 }, + { "DMA_ADDRch1", 0x218, 32 }, + { "DMA_COUNTch1", 0x21C, 32 }, { "DMA_CNTLch2", 0x224, 16 }, - { "DMA_ADDRch2", 0x228, 16 }, - { "DMA_COUNTch2", 0x22C, 16 }, + { "DMA_ADDRch2", 0x228, 32 }, + { "DMA_COUNTch2", 0x22C, 32 }, { "DMA_CNTLch3", 0x234, 16 }, - { "DMA_ADDRch3", 0x238, 16 }, - { "DMA_COUNTch3", 0x23C, 16 }, + { "DMA_ADDRch3", 0x238, 32 }, + { "DMA_COUNTch3", 0x23C, 32 }, { "DMA_CNTLch4", 0x244, 16 }, - { "DMA_ADDRch4", 0x248, 16 }, - { "DMA_COUNTch4", 0x24C, 16 }, + { "DMA_ADDRch4", 0x248, 32 }, + { "DMA_COUNTch4", 0x24C, 32 }, { "DMA_CNTLch5", 0x254, 16 }, - { "DMA_ADDRch5", 0x258, 16 }, - { "DMA_COUNTch5", 0x25C, 16 }, + { "DMA_ADDRch5", 0x258, 32 }, + { "DMA_COUNTch5", 0x25C, 32 }, { "DMA_CNTLch6", 0x264, 16 }, - { "DMA_ADDRch6", 0x268, 16 }, - { "DMA_COUNTch6", 0x26C, 16 }, + { "DMA_ADDRch6", 0x268, 32 }, + { "DMA_COUNTch6", 0x26C, 32 }, { "DMA_CNTLch7", 0x274, 16 }, - { "DMA_ADDRch7", 0x278, 16 }, - { "DMA_COUNTch7", 0x27C, 16 }, + { "DMA_ADDRch7", 0x278, 32 }, + { "DMA_COUNTch7", 0x27C, 32 }, { } /* Terminating Entry */ }; @@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = { static int musb_test_mode_open(struct inode *inode, struct file *file) { - file->private_data = inode->i_private; - return single_open(file, musb_test_mode_show, inode->i_private); } static ssize_t musb_test_mode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - struct musb *musb = file->private_data; + struct seq_file *s = file->private_data; + struct musb *musb = s->private; u8 test = 0; char buf[18]; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6fca870..180da4e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -568,11 +568,19 @@ static void rxstate(struct musb *musb, struct musb_request *req) { const u8 epnum = req->epnum; struct usb_request *request = &req->request; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; unsigned fifo_count = 0; - u16 len = musb_ep->packet_sz; + u16 len; u16 csr = musb_readw(epio, MUSB_RXCSR); + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; + + len = musb_ep->packet_sz; /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { @@ -740,9 +748,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) u16 csr; struct usb_request *request; void __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; musb_ep_select(mbase, epnum); @@ -1081,7 +1095,7 @@ struct free_record { /* * Context: controller locked, IRQs blocked. */ -static void musb_ep_restart(struct musb *musb, struct musb_request *req) +void musb_ep_restart(struct musb *musb, struct musb_request *req) { DBG(3, "<== %s request %p len %u on hw_ep%d\n", req->tx ? "TX/IN" : "RX/OUT", diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index c8b1403..572b1da 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h @@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *); extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); +extern void musb_ep_restart(struct musb *, struct musb_request *); + #endif /* __MUSB_GADGET_H */ diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 21b9788..ecae714 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -261,6 +261,7 @@ __acquires(musb->lock) ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; + struct musb_request *request; void __iomem *regs; int is_in; u16 csr; @@ -302,6 +303,14 @@ __acquires(musb->lock) musb_writew(regs, MUSB_RXCSR, csr); } + /* Maybe start the first request in the queue */ + request = to_musb_request( + next_request(musb_ep)); + if (!musb_ep->busy && request) { + DBG(3, "restarting the request\n"); + musb_ep_restart(musb, request); + } + /* select ep0 again */ musb_ep_select(mbase, 0); } break; diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index e06d65e..49352b3 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -323,8 +323,10 @@ static int musb_platform_resume(struct musb *musb) int musb_platform_exit(struct musb *musb) { + del_timer_sync(&musb_idle_timer); musb_platform_suspend(musb); + otg_put_transceiver(musb->xceiv); return 0; } diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 3c48e77..bde40ef 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -1152,6 +1152,8 @@ done: if (ret < 0) { if (sync) iounmap(sync); + + otg_put_transceiver(musb->xceiv); usb_nop_xceiv_unregister(); } return ret; @@ -1166,6 +1168,8 @@ int musb_platform_exit(struct musb *musb) musb->board_set_power(0); iounmap(musb->sync_va); + + otg_put_transceiver(musb->xceiv); usb_nop_xceiv_unregister(); return 0; } diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 63f7cc4..14ac87e 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -75,6 +75,7 @@ static int debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x1a86, 0x5523) }, { }, }; MODULE_DEVICE_TABLE(usb, id_table); @@ -486,12 +487,22 @@ static void ch341_read_int_callback(struct urb *urb) if (actual_length >= 4) { struct ch341_private *priv = usb_get_serial_port_data(port); unsigned long flags; + u8 prev_line_status = priv->line_status; spin_lock_irqsave(&priv->lock, flags); priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT; if ((data[1] & CH341_MULT_STAT)) priv->multi_status_change = 1; spin_unlock_irqrestore(&priv->lock, flags); + + if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) { + struct tty_struct *tty = tty_port_tty_get(&port->port); + if (tty) + usb_serial_handle_dcd_change(port, tty, + priv->line_status & CH341_BIT_DCD); + tty_kref_put(tty); + } + wake_up_interruptible(&priv->delta_msr_wait); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 8b8c797..1a85938 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -49,11 +49,11 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *, static void cp210x_break_ctl(struct tty_struct *, int); static int cp210x_startup(struct usb_serial *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); -static int cp210x_carrier_raised(struct usb_serial_port *p); static int debug; static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ @@ -85,9 +85,9 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ - { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ @@ -107,8 +107,11 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ + { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ @@ -122,9 +125,14 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ + { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ + { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ + { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ + { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; @@ -157,7 +165,6 @@ static struct usb_serial_driver cp210x_device = { .tiocmset = cp210x_tiocmset, .attach = cp210x_startup, .dtr_rts = cp210x_dtr_rts, - .carrier_raised = cp210x_carrier_raised }; /* Config request types */ @@ -218,8 +225,8 @@ static struct usb_serial_driver cp210x_device = { #define BITS_STOP_2 0x0002 /* CP210X_SET_BREAK */ -#define BREAK_ON 0x0000 -#define BREAK_OFF 0x0001 +#define BREAK_ON 0x0001 +#define BREAK_OFF 0x0000 /* CP210X_(SET_MHS|GET_MDMSTS) */ #define CONTROL_DTR 0x0001 @@ -756,15 +763,6 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file) return result; } -static int cp210x_carrier_raised(struct usb_serial_port *p) -{ - unsigned int control; - cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1); - if (control & CONTROL_DCD) - return 1; - return 0; -} - static void cp210x_break_ctl (struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index fd35f73..0c36695 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -455,7 +455,6 @@ static int digi_write_room(struct tty_struct *tty); static int digi_chars_in_buffer(struct tty_struct *tty); static int digi_open(struct tty_struct *tty, struct usb_serial_port *port); static void digi_close(struct usb_serial_port *port); -static int digi_carrier_raised(struct usb_serial_port *port); static void digi_dtr_rts(struct usb_serial_port *port, int on); static int digi_startup_device(struct usb_serial *serial); static int digi_startup(struct usb_serial *serial); @@ -511,7 +510,6 @@ static struct usb_serial_driver digi_acceleport_2_device = { .open = digi_open, .close = digi_close, .dtr_rts = digi_dtr_rts, - .carrier_raised = digi_carrier_raised, .write = digi_write, .write_room = digi_write_room, .write_bulk_callback = digi_write_bulk_callback, @@ -1337,14 +1335,6 @@ static void digi_dtr_rts(struct usb_serial_port *port, int on) digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1); } -static int digi_carrier_raised(struct usb_serial_port *port) -{ - struct digi_port *priv = usb_get_serial_port_data(port); - if (priv->dp_modem_signals & TIOCM_CD) - return 1; - return 0; -} - static int digi_open(struct tty_struct *tty, struct usb_serial_port *port) { int ret; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index e298dc4..8ed7c88 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -99,6 +99,7 @@ struct ftdi_sio_quirk { static int ftdi_jtag_probe(struct usb_serial *serial); static int ftdi_mtxorb_hack_setup(struct usb_serial *serial); static int ftdi_NDI_device_setup(struct usb_serial *serial); +static int ftdi_stmclite_probe(struct usb_serial *serial); static void ftdi_USB_UIRT_setup(struct ftdi_private *priv); static void ftdi_HE_TIRA1_setup(struct ftdi_private *priv); @@ -122,6 +123,10 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { .port_probe = ftdi_HE_TIRA1_setup, }; +static struct ftdi_sio_quirk ftdi_stmclite_quirk = { + .probe = ftdi_stmclite_probe, +}; + /* * The 8U232AM has the same API as the sio except for: * - it can support MUCH higher baudrates; up to: @@ -145,6 +150,8 @@ static struct ftdi_sio_quirk ftdi_HE_TIRA1_quirk = { * /sys/bus/usb/ftdi_sio/new_id, then send patch/report! */ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, @@ -157,6 +164,9 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_7_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USINT_CAT_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USINT_WKEY_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_USINT_RS232_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACTZWAVE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IRTRANS_PID) }, { USB_DEVICE(FTDI_VID, FTDI_IPLUS_PID) }, @@ -174,9 +184,11 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_SNIFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, + { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, @@ -196,6 +208,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, + { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, @@ -513,6 +526,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, { USB_DEVICE(OCT_VID, OCT_US101_PID) }, + { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, { USB_DEVICE(FTDI_VID, FTDI_HE_TIRA1_PID), .driver_info = (kernel_ulong_t)&ftdi_HE_TIRA1_quirk }, { USB_DEVICE(FTDI_VID, FTDI_USB_UIRT_PID), @@ -609,6 +623,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, + { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, @@ -669,8 +684,17 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, - { USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) }, - { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) }, + { USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, @@ -692,6 +716,7 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, @@ -711,8 +736,37 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) }, { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) }, + + /* Papouch devices based on FTDI chip */ + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485_2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AP485_2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB422_2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485S_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB485C_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_LEC_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SB232_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_TMU_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_IRAMP_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK5_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO8x8_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO4x4_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x2_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO10x1_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO30x3_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO60x3_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO2x16_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_QUIDO3x32_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_DRAK6_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_UPSUSB_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_MU_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_SIMUKEY_PID) }, { USB_DEVICE(PAPOUCH_VID, PAPOUCH_AD4USB_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMUX_PID) }, + { USB_DEVICE(PAPOUCH_VID, PAPOUCH_GMSR_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DGQG_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOMINTELL_DUSB_PID) }, { USB_DEVICE(ALTI2_VID, ALTI2_N3_PID) }, @@ -733,6 +787,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) }, + { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, @@ -746,6 +802,26 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, + { USB_DEVICE(FTDI_VID, ACCESIO_COM4SM_PID) }, + { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, + { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), + .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -1372,7 +1448,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) } /* set max packet size based on descriptor */ - priv->max_packet_size = ep_desc->wMaxPacketSize; + priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize); dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); } @@ -1627,6 +1703,25 @@ static int ftdi_jtag_probe(struct usb_serial *serial) } /* + * First and second port on STMCLiteadaptors is reserved for JTAG interface + * and the forth port for pio + */ +static int ftdi_stmclite_probe(struct usb_serial *serial) +{ + struct usb_device *udev = serial->dev; + struct usb_interface *interface = serial->interface; + + dbg("%s", __func__); + + if (interface == udev->actconfig->interface[2]) + return 0; + + dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n"); + + return -ENODEV; +} + +/* * The Matrix Orbital VK204-25-USB has an invalid IN endpoint. * We have to correct it if we want to read from it. */ @@ -2013,8 +2108,6 @@ static void ftdi_set_termios(struct tty_struct *tty, "urb failed to set to rts/cts flow control\n"); } - /* raise DTR/RTS */ - set_mctrl(port, TIOCM_DTR | TIOCM_RTS); } else { /* * Xon/Xoff code @@ -2062,8 +2155,6 @@ static void ftdi_set_termios(struct tty_struct *tty, } } - /* lower DTR/RTS */ - clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); } return; } diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index d01946d..0060913 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -40,6 +40,11 @@ #define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */ +/* US Interface Navigator (http://www.usinterface.com/) */ +#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */ +#define FTDI_USINT_WKEY_PID 0xb811 /* Navigator WKEY and FSK lines */ +#define FTDI_USINT_RS232_PID 0xb812 /* Navigator RS232 and CONFIG lines */ + /* OOCDlink by Joern Kaipf * (http://www.joernonline.de/dw/doku.php?id=start&idx=projects:oocdlink) */ #define FTDI_OOCDLINK_PID 0xbaf8 /* Amontec JTAGkey */ @@ -56,6 +61,7 @@ #define FTDI_OPENDCC_SNIFFER_PID 0xBFD9 #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB +#define FTDI_OPENDCC_GBM_PID 0xBFDC /* * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com) @@ -105,6 +111,12 @@ /* Propox devices */ #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 +/* Lenz LI-USB Computer Interface. */ +#define FTDI_LENZ_LIUSB_PID 0xD780 + +/* Vardaan Enterprises Serial Interface VEUSB422R3 */ +#define FTDI_VARDAAN_PID 0xF070 + /* * Xsens Technologies BV products (http://www.xsens.com). */ @@ -127,6 +139,18 @@ #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ /* + * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs + */ +#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8 +#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9 +#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA +#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB +#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC +#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD +#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE +#define FTDI_CHAMSYS_WING_PID 0xDAFF + +/* * Westrex International devices submitted by Cory Lee */ #define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */ @@ -276,6 +300,8 @@ * Hameg HO820 and HO870 interface (using VID 0x0403) */ #define HAMEG_HO820_PID 0xed74 +#define HAMEG_HO730_PID 0xed73 +#define HAMEG_HO720_PID 0xed72 #define HAMEG_HO870_PID 0xed71 /* @@ -494,6 +520,12 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Acton Research Corp. + */ +#define ACTON_VID 0x0647 /* Vendor ID */ +#define ACTON_SPECTRAPRO_PID 0x0100 + +/* * Contec products (http://www.contec.com) * Submitted by Daniel Sangorrin */ @@ -542,14 +574,27 @@ /* Note: OCT US101 is also rebadged as Dick Smith Electronics (NZ) XH6381 */ /* Also rebadged as Dick Smith Electronics (Aus) XH6451 */ /* Also rebadged as SIIG Inc. model US2308 hardware version 1 */ +#define OCT_DK201_PID 0x0103 /* OCT DK201 USB docking station */ #define OCT_US101_PID 0x0421 /* OCT US101 USB to RS-232 */ /* - * Icom ID-1 digital transceiver + * Definitions for Icom Inc. devices */ - -#define ICOM_ID1_VID 0x0C26 -#define ICOM_ID1_PID 0x0004 +#define ICOM_VID 0x0C26 /* Icom vendor ID */ +/* Note: ID-1 is a communications tranceiver for HAM-radio operators */ +#define ICOM_ID_1_PID 0x0004 /* ID-1 USB to RS-232 */ +/* Note: OPC is an Optional cable to connect an Icom Tranceiver */ +#define ICOM_OPC_U_UC_PID 0x0018 /* OPC-478UC, OPC-1122U cloning cable */ +/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */ +#define ICOM_ID_RP2C1_PID 0x0009 /* ID-RP2C Asset 1 to RS-232 */ +#define ICOM_ID_RP2C2_PID 0x000A /* ID-RP2C Asset 2 to RS-232 */ +#define ICOM_ID_RP2D_PID 0x000B /* ID-RP2D configuration port*/ +#define ICOM_ID_RP2VT_PID 0x000C /* ID-RP2V Transmit config port */ +#define ICOM_ID_RP2VR_PID 0x000D /* ID-RP2V Receive config port */ +#define ICOM_ID_RP4KVT_PID 0x0010 /* ID-RP4000V Transmit config port */ +#define ICOM_ID_RP4KVR_PID 0x0011 /* ID-RP4000V Receive config port */ +#define ICOM_ID_RP2KVT_PID 0x0012 /* ID-RP2000V Transmit config port */ +#define ICOM_ID_RP2KVR_PID 0x0013 /* ID-RP2000V Receive config port */ /* * GN Otometrics (http://www.otometrics.com) @@ -700,6 +745,7 @@ */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ +#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ /* * Bayer Ascensia Contour blood glucose meter USB-converter cable. @@ -984,6 +1030,12 @@ #define ALTI2_N3_PID 0x6001 /* Neptune 3 */ /* + * Ionics PlugComputer + */ +#define IONICS_VID 0x1c0c +#define IONICS_PLUGCOMPUTER_PID 0x0102 + +/* * Dresden Elektronik Sensor Terminal Board */ #define DE_VID 0x1cf1 /* Vendor ID */ @@ -991,14 +1043,45 @@ #define WHT_PID 0x0004 /* Wireless Handheld Terminal */ /* + * STMicroelectonics + */ +#define ST_VID 0x0483 +#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ + +/* * Papouch products (http://www.papouch.com/) * Submitted by Folkert van Heusden */ #define PAPOUCH_VID 0x5050 /* Vendor ID */ +#define PAPOUCH_SB485_PID 0x0100 /* Papouch SB485 USB-485/422 Converter */ +#define PAPOUCH_AP485_PID 0x0101 /* AP485 USB-RS485 Converter */ +#define PAPOUCH_SB422_PID 0x0102 /* Papouch SB422 USB-RS422 Converter */ +#define PAPOUCH_SB485_2_PID 0x0103 /* Papouch SB485 USB-485/422 Converter */ +#define PAPOUCH_AP485_2_PID 0x0104 /* AP485 USB-RS485 Converter */ +#define PAPOUCH_SB422_2_PID 0x0105 /* Papouch SB422 USB-RS422 Converter */ +#define PAPOUCH_SB485S_PID 0x0106 /* Papouch SB485S USB-485/422 Converter */ +#define PAPOUCH_SB485C_PID 0x0107 /* Papouch SB485C USB-485/422 Converter */ +#define PAPOUCH_LEC_PID 0x0300 /* LEC USB Converter */ +#define PAPOUCH_SB232_PID 0x0301 /* Papouch SB232 USB-RS232 Converter */ #define PAPOUCH_TMU_PID 0x0400 /* TMU USB Thermometer */ -#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Quido 4/4 Module */ +#define PAPOUCH_IRAMP_PID 0x0500 /* Papouch IRAmp Duplex */ +#define PAPOUCH_DRAK5_PID 0x0700 /* Papouch DRAK5 */ +#define PAPOUCH_QUIDO8x8_PID 0x0800 /* Papouch Quido 8/8 Module */ +#define PAPOUCH_QUIDO4x4_PID 0x0900 /* Papouch Quido 4/4 Module */ +#define PAPOUCH_QUIDO2x2_PID 0x0a00 /* Papouch Quido 2/2 Module */ +#define PAPOUCH_QUIDO10x1_PID 0x0b00 /* Papouch Quido 10/1 Module */ +#define PAPOUCH_QUIDO30x3_PID 0x0c00 /* Papouch Quido 30/3 Module */ +#define PAPOUCH_QUIDO60x3_PID 0x0d00 /* Papouch Quido 60(100)/3 Module */ +#define PAPOUCH_QUIDO2x16_PID 0x0e00 /* Papouch Quido 2/16 Module */ +#define PAPOUCH_QUIDO3x32_PID 0x0f00 /* Papouch Quido 3/32 Module */ +#define PAPOUCH_DRAK6_PID 0x1000 /* Papouch DRAK6 */ +#define PAPOUCH_UPSUSB_PID 0x8000 /* Papouch UPS-USB adapter */ +#define PAPOUCH_MU_PID 0x8001 /* MU controller */ +#define PAPOUCH_SIMUKEY_PID 0x8002 /* Papouch SimuKey */ #define PAPOUCH_AD4USB_PID 0x8003 /* AD4USB Measurement Module */ +#define PAPOUCH_GMUX_PID 0x8004 /* Papouch GOLIATH MUX */ +#define PAPOUCH_GMSR_PID 0x8005 /* Papouch GOLIATH MSR */ /* * Marvell SheevaPlug @@ -1025,6 +1108,11 @@ #define MJSG_HD_RADIO_PID 0x937C /* + * D.O.Tec products (http://www.directout.eu) + */ +#define FTDI_DOTEC_PID 0x9868 + +/* * Xverve Signalyzer tools (http://www.signalyzer.com/) */ #define XVERVE_SIGNALYZER_ST_PID 0xBCA0 @@ -1032,3 +1120,35 @@ #define XVERVE_SIGNALYZER_SH2_PID 0xBCA2 #define XVERVE_SIGNALYZER_SH4_PID 0xBCA4 +/* + * Segway Robotic Mobility Platform USB interface (using VID 0x0403) + * Submitted by John G. Rogers + */ +#define SEGWAY_RMP200_PID 0xe729 + + +/* + * Accesio USB Data Acquisition products (http://www.accesio.com/) + */ +#define ACCESIO_COM4SM_PID 0xD578 + +/* www.sciencescope.co.uk educational dataloggers */ +#define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 +#define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C +#define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D + +/* + * Milkymist One JTAG/Serial + */ +#define QIHARDWARE_VID 0x20B7 +#define MILKYMISTONE_JTAGSERIAL_PID 0x0713 + +/* + * CTI GmbH RS485 Converter http://www.cti-lean.com/ + */ +/* USB-485-Mini*/ +#define FTDI_CTI_MINI_PID 0xF608 +/* USB-Nano-485*/ +#define FTDI_CTI_NANO_PID 0xF60B + + diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index a817ced..d2d5505 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -208,18 +208,23 @@ retry: urb->transfer_buffer_length = count; usb_serial_debug_data(debug, &port->dev, __func__, count, urb->transfer_buffer); + spin_lock_irqsave(&port->lock, flags); + port->tx_bytes += count; + spin_unlock_irqrestore(&port->lock, flags); + + clear_bit(i, &port->write_urbs_free); result = usb_submit_urb(urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - error submitting urb: %d\n", __func__, result); + set_bit(i, &port->write_urbs_free); + spin_lock_irqsave(&port->lock, flags); + port->tx_bytes -= count; + spin_unlock_irqrestore(&port->lock, flags); + clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags); return result; } - clear_bit(i, &port->write_urbs_free); - - spin_lock_irqsave(&port->lock, flags); - port->tx_bytes += count; - spin_unlock_irqrestore(&port->lock, flags); /* Try sending off another urb, unless in irq context (in which case * there will be no free urb). */ @@ -476,6 +481,26 @@ int usb_serial_handle_break(struct usb_serial_port *port) } EXPORT_SYMBOL_GPL(usb_serial_handle_break); +/** + * usb_serial_handle_dcd_change - handle a change of carrier detect state + * @port: usb_serial_port structure for the open port + * @tty: tty_struct structure for the port + * @status: new carrier detect status, nonzero if active + */ +void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, unsigned int status) +{ + struct tty_port *port = &usb_port->port; + + dbg("%s - port %d, status %d", __func__, usb_port->number, status); + + if (status) + wake_up_interruptible(&port->open_wait); + else if (tty && !C_CLOCAL(tty)) + tty_hangup(tty); +} +EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change); + int usb_serial_generic_resume(struct usb_serial *serial) { struct usb_serial_port *port; diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index 76e6fb3..db0e3fe 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -2894,8 +2894,8 @@ static void load_application_firmware(struct edgeport_serial *edge_serial) dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build); - edge_serial->product_info.FirmwareMajorVersion = fw->data[0]; - edge_serial->product_info.FirmwareMinorVersion = fw->data[1]; + edge_serial->product_info.FirmwareMajorVersion = rec->data[0]; + edge_serial->product_info.FirmwareMinorVersion = rec->data[1]; edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build); for (rec = ihex_next_binrec(rec); rec; diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index 0fca265..9991063 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial) /* Check if we have an old version in the I2C and update if necessary */ - if (download_cur_ver != download_new_ver) { + if (download_cur_ver < download_new_ver) { dbg("%s - Update I2C dld from %d.%d to %d.%d", __func__, firmware_version->Ver_Major, diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 185fe9a..2cbd661 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -680,22 +680,6 @@ static void keyspan_pda_dtr_rts(struct usb_serial_port *port, int on) } } -static int keyspan_pda_carrier_raised(struct usb_serial_port *port) -{ - struct usb_serial *serial = port->serial; - unsigned char modembits; - - /* If we can read the modem status and the DCD is low then - carrier is not raised yet */ - if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) { - if (!(modembits & (1>>6))) - return 0; - } - /* Carrier raised, or we failed (eg disconnected) so - progress accordingly */ - return 1; -} - static int keyspan_pda_open(struct tty_struct *tty, struct usb_serial_port *port) @@ -882,7 +866,6 @@ static struct usb_serial_driver keyspan_pda_device = { .id_table = id_table_std, .num_ports = 1, .dtr_rts = keyspan_pda_dtr_rts, - .carrier_raised = keyspan_pda_carrier_raised, .open = keyspan_pda_open, .close = keyspan_pda_close, .write = keyspan_pda_write, diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index bd5bd85..b382d9a 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -372,7 +372,7 @@ static void kobil_read_int_callback(struct urb *urb) } tty = tty_port_tty_get(&port->port); - if (urb->actual_length) { + if (tty && urb->actual_length) { /* BEGIN DEBUG */ /* diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 7aa01b9..2849f8c 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -549,9 +549,12 @@ static void mct_u232_close(struct usb_serial_port *port) { dbg("%s port %d", __func__, port->number); - usb_serial_generic_close(port); - if (port->serial->dev) + if (port->serial->dev) { + /* shutdown our urbs */ + usb_kill_urb(port->write_urb); + usb_kill_urb(port->read_urb); usb_kill_urb(port->interrupt_in_urb); + } } /* mct_u232_close */ diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 30922a7..aa66581 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file, case TIOCGICOUNT: cnow = mos7720_port->icount; + + memset(&icount, 0, sizeof(struct serial_icounter_struct)); + icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 585b7e6..1a42bc2 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -119,16 +119,20 @@ * by making a change here, in moschip_port_id_table, and in * moschip_id_table_combined */ -#define USB_VENDOR_ID_BANDB 0x0856 -#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 -#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 -#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 -#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 -#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 -#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 -#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 -#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 -#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 +#define USB_VENDOR_ID_BANDB 0x0856 +#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22 +#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00 +#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24 +#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01 +#define BANDB_DEVICE_ID_US9ML2_2 0xAC29 +#define BANDB_DEVICE_ID_US9ML2_4 0xAC30 +#define BANDB_DEVICE_ID_USPTL4_2 0xAC31 +#define BANDB_DEVICE_ID_USPTL4_4 0xAC32 +#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 +#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02 +#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 +#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03 +#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24 /* This driver also supports * ATEN UC2324 device using Moschip MCS7840 @@ -184,13 +188,17 @@ static const struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, @@ -201,13 +209,17 @@ static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, + {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, @@ -2273,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file, case TIOCGICOUNT: cnow = mos7840_port->icount; smp_rmb(); + + memset(&icount, 0, sizeof(struct serial_icounter_struct)); + icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c index a6b207c..1f00f24 100644 --- a/drivers/usb/serial/navman.c +++ b/drivers/usb/serial/navman.c @@ -25,6 +25,7 @@ static int debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ + { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */ { }, }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index ed01f3b..9ff19c8 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -96,8 +96,8 @@ static void opticon_bulk_callback(struct urb *urb) /* real data, send it to the tty layer */ tty = tty_port_tty_get(&port->port); if (tty) { - tty_insert_flip_string(tty, data, - data_length); + tty_insert_flip_string(tty, data + 2, + data_length); tty_flip_buffer_push(tty); tty_kref_put(tty); } @@ -130,7 +130,7 @@ exit: priv->bulk_address), priv->bulk_in_buffer, priv->buffer_size, opticon_bulk_callback, priv); - result = usb_submit_urb(port->read_urb, GFP_ATOMIC); + result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "%s - failed resubmitting read urb, error %d\n", diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 5cd30e4..6210996 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -145,7 +145,10 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_PRODUCT_E143D 0x143D #define HUAWEI_PRODUCT_E143E 0x143E #define HUAWEI_PRODUCT_E143F 0x143F +#define HUAWEI_PRODUCT_K4505 0x1464 +#define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_E14AC 0x14AC +#define HUAWEI_PRODUCT_ETS1220 0x1803 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -359,12 +362,35 @@ static void option_instat_callback(struct urb *urb); #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 -#define CINTERION_VENDOR_ID 0x0681 +/* Cinterion (formerly Siemens) products */ +#define SIEMENS_VENDOR_ID 0x0681 +#define CINTERION_VENDOR_ID 0x1e2d +#define CINTERION_PRODUCT_HC25_MDM 0x0047 +#define CINTERION_PRODUCT_HC25_MDMNET 0x0040 +#define CINTERION_PRODUCT_HC28_MDM 0x004C +#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */ +#define CINTERION_PRODUCT_EU3_E 0x0051 +#define CINTERION_PRODUCT_EU3_P 0x0052 +#define CINTERION_PRODUCT_PH8 0x0053 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +/* Celot products */ +#define CELOT_VENDOR_ID 0x211f +#define CELOT_PRODUCT_CT680M 0x6801 + +/* ONDA Communication vendor id */ +#define ONDA_VENDOR_ID 0x1ee8 + +/* ONDA MT825UP HSDPA 14.2 modem */ +#define ONDA_MT825UP 0x000b + +/* Samsung products */ +#define SAMSUNG_VENDOR_ID 0x04e8 +#define SAMSUNG_PRODUCT_GT_B3730 0x6889 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -482,7 +508,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, - { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_9508) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, /* Novatel Merlin V640/XV620 */ { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, /* Novatel Merlin V620/S620 */ @@ -585,6 +614,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff) }, @@ -596,38 +626,52 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, + /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff) }, + /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0079, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0082, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) }, @@ -843,6 +887,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, @@ -885,10 +931,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, - - { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, + /* Cinterion */ + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ + { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ + { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730/GT-B3710 LTE USB modem.*/ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); @@ -1017,6 +1074,13 @@ static int option_probe(struct usb_serial *serial, serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff) return -ENODEV; + /* Don't bind network interfaces on Huawei K3765 & K4505 */ + if (serial->dev->descriptor.idVendor == HUAWEI_VENDOR_ID && + (serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K3765 || + serial->dev->descriptor.idProduct == HUAWEI_PRODUCT_K4505) && + serial->interface->cur_altsetting->desc.bInterfaceNumber == 1) + return -ENODEV; + data = serial->private = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL); if (!data) diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 6b60018..2fcc54e 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -50,6 +50,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) }, @@ -86,6 +87,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, + { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, @@ -676,9 +678,11 @@ static void pl2303_update_line_status(struct usb_serial_port *port, { struct pl2303_private *priv = usb_get_serial_port_data(port); + struct tty_struct *tty; unsigned long flags; u8 status_idx = UART_STATE; u8 length = UART_STATE + 1; + u8 prev_line_status; u16 idv, idp; idv = le16_to_cpu(port->serial->dev->descriptor.idVendor); @@ -700,11 +704,20 @@ static void pl2303_update_line_status(struct usb_serial_port *port, /* Save off the uart status for others to look at */ spin_lock_irqsave(&priv->lock, flags); + prev_line_status = priv->line_status; priv->line_status = data[status_idx]; spin_unlock_irqrestore(&priv->lock, flags); if (priv->line_status & UART_BREAK_ERROR) usb_serial_handle_break(port); wake_up_interruptible(&priv->delta_msr_wait); + + tty = tty_port_tty_get(&port->port); + if (!tty) + return; + if ((priv->line_status ^ prev_line_status) & UART_DCD) + usb_serial_handle_dcd_change(port, tty, + priv->line_status & UART_DCD); + tty_kref_put(tty); } static void pl2303_read_int_callback(struct urb *urb) diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index a871645..1b025f7 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -21,6 +21,7 @@ #define PL2303_PRODUCT_ID_MMX 0x0612 #define PL2303_PRODUCT_ID_GPRS 0x0609 #define PL2303_PRODUCT_ID_HCR331 0x331a +#define PL2303_PRODUCT_ID_MOTOROLA 0x0307 #define ATEN_VENDOR_ID 0x0557 #define ATEN_VENDOR_ID2 0x0547 @@ -128,6 +129,10 @@ #define CRESSI_VENDOR_ID 0x04b8 #define CRESSI_EDY_PRODUCT_ID 0x0521 +/* Zeagle dive computer interface */ +#define ZEAGLE_VENDOR_ID 0x04b8 +#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 + /* Sony, USB data cable for CMD-Jxx mobile phones */ #define SONY_VENDOR_ID 0x054c #define SONY_QN3USB_PRODUCT_ID 0x0437 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d47b56e..d876bba 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -302,7 +302,9 @@ static const struct usb_device_id id_table[] = { .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ - + { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { } }; MODULE_DEVICE_TABLE(usb, id_table); diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 329d311..b9575c8 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -133,7 +133,7 @@ struct spcp8x5_usb_ctrl_arg { /* how come ??? */ #define UART_STATE 0x08 -#define UART_STATE_TRANSIENT_MASK 0x74 +#define UART_STATE_TRANSIENT_MASK 0x75 #define UART_DCD 0x01 #define UART_DSR 0x02 #define UART_BREAK_ERROR 0x04 @@ -531,6 +531,9 @@ static void spcp8x5_process_read_urb(struct urb *urb) tty_insert_flip_string_fixed_flag(tty, data, tty_flag, urb->actual_length); tty_flip_buffer_push(tty); + if (status & UART_DCD) + usb_serial_handle_dcd_change(port, tty, + priv->line_status & MSR_STATUS_LINE_DCD); tty_kref_put(tty); } diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 90979a1..c58ef54 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -365,9 +365,9 @@ failed_1port: static void __exit ti_exit(void) { + usb_deregister(&ti_usb_driver); usb_serial_deregister(&ti_1port_device); usb_serial_deregister(&ti_2port_device); - usb_deregister(&ti_usb_driver); } diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 941c2d4..8fadc13 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -52,6 +52,7 @@ static struct usb_driver usb_serial_driver = { .suspend = usb_serial_suspend, .resume = usb_serial_resume, .no_dynamic_id = 1, + .supports_autosuspend = 1, }; /* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead @@ -1344,6 +1345,8 @@ int usb_serial_register(struct usb_serial_driver *driver) return -ENODEV; fixup_generic(driver); + if (driver->usb_driver) + driver->usb_driver->supports_autosuspend = 1; if (!driver->description) driver->description = driver->driver.name; diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index eb76aae..1c11959 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -27,6 +27,7 @@ #include #include #include +#include #include "visor.h" /* @@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial, dbg("%s", __func__); + /* + * some Samsung Android phones in modem mode have the same ID + * as SPH-I500, but they are ACM devices, so dont bind to them + */ + if (id->idVendor == SAMSUNG_VENDOR_ID && + id->idProduct == SAMSUNG_SPH_I500_ID && + serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && + serial->dev->descriptor.bDeviceSubClass == + USB_CDC_SUBCLASS_ACM) + return -ENODEV; + if (serial->dev->actconfig->desc.bConfigurationValue != 1) { dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", serial->dev->actconfig->desc.bConfigurationValue); @@ -606,6 +618,10 @@ static int treo_attach(struct usb_serial *serial) static int clie_5_attach(struct usb_serial *serial) { + struct usb_serial_port *port; + unsigned int pipe; + int j; + dbg("%s", __func__); /* TH55 registers 2 ports. @@ -621,9 +637,14 @@ static int clie_5_attach(struct usb_serial *serial) return -1; /* port 0 now uses the modified endpoint Address */ - serial->port[0]->bulk_out_endpointAddress = + port = serial->port[0]; + port->bulk_out_endpointAddress = serial->port[1]->bulk_out_endpointAddress; + pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress); + for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) + port->write_urbs[j]->pipe = pipe; + return 0; } diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c index 57fc2f5..ceba512 100644 --- a/drivers/usb/storage/sierra_ms.c +++ b/drivers/usb/storage/sierra_ms.c @@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, } return result; } -static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); +static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL); int sierra_ms_init(struct us_data *us) { diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h index 44be6d7..fba2824 100644 --- a/drivers/usb/storage/unusual_cypress.h +++ b/drivers/usb/storage/unusual_cypress.h @@ -31,4 +31,9 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999, "Cypress ISD-300LP", US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0), +UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999, + "Super Top", + "USB 2.0 SATA BRIDGE", + US_SC_CYP_ATACB, US_PR_DEVICE, NULL, 0), + #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */ diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 2c897ee..b0c0a33 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -481,6 +481,13 @@ UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64), +/* Reported by Vitaly Kuznetsov */ +UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, + "Samsung", + "YP-CP3", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), + /* Entry and supporting patch by Theodore Kilgore . * Device uses standards-violating 32-byte Bulk Command Block Wrappers and * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. @@ -1036,6 +1043,15 @@ UNUSUAL_DEV( 0x084d, 0x0011, 0x0110, 0x0110, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_BULK32), +/* Reported by + * The device reports a vendor-specific device class, requiring an + * explicit vendor/product match. + */ +UNUSUAL_DEV( 0x0851, 0x1542, 0x0002, 0x0002, + "MagicPixel", + "FW_Omega2", + US_SC_DEVICE, US_PR_DEVICE, NULL, 0), + /* Andrew Lunn * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL * on LUN 4. @@ -1380,6 +1396,13 @@ UNUSUAL_DEV( 0x0f19, 0x0105, 0x0100, 0x0100, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_IGNORE_RESIDUE ), +/* Submitted by Nick Holloway */ +UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100, + "VTech", + "Kidizoom", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_FIX_CAPACITY ), + /* Reported by Michael Stattmann */ UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000, "Sony Ericsson", @@ -1859,6 +1882,22 @@ UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000, US_SC_DEVICE, US_PR_DEVICE, NULL, US_FL_BAD_SENSE ), +/* Patch by Richard Schütz + * This external hard drive enclosure uses a JMicron chip which + * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ +UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000, + "TrekStor GmbH & Co. KG", + "DataStation maxi g.u", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ), + +/* Reported by Jasper Mackenzie */ +UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000, + "Coby Electronics", + "MP3 Player", + US_SC_DEVICE, US_PR_DEVICE, NULL, + US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ), + UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, "ST", "2A", diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index e207810..0870329 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state) { struct backlight_device *bd = to_backlight_device(dev); - if (bd->ops->options & BL_CORE_SUSPENDRESUME) { - mutex_lock(&bd->ops_lock); + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state |= BL_CORE_SUSPENDED; backlight_update_status(bd); - mutex_unlock(&bd->ops_lock); } + mutex_unlock(&bd->ops_lock); return 0; } @@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); - if (bd->ops->options & BL_CORE_SUSPENDRESUME) { - mutex_lock(&bd->ops_lock); + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state &= ~BL_CORE_SUSPENDED; backlight_update_status(bd); - mutex_unlock(&bd->ops_lock); } + mutex_unlock(&bd->ops_lock); return 0; } diff --git a/drivers/video/console/tileblit.c b/drivers/video/console/tileblit.c index 0056a41..15e8e1a 100644 --- a/drivers/video/console/tileblit.c +++ b/drivers/video/console/tileblit.c @@ -83,7 +83,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_tilecursor cursor; - int use_sw = (vc->vc_cursor_type & 0x01); + int use_sw = (vc->vc_cursor_type & 0x10); cursor.sx = vc->vc_x; cursor.sy = vc->vc_y; diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h index f3a4e15..f96a471 100644 --- a/drivers/video/matrox/matroxfb_base.h +++ b/drivers/video/matrox/matroxfb_base.h @@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) { static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) /* - * memcpy_toio works for us if: + * iowrite32_rep works for us if: * (1) Copies data as 32bit quantities, not byte after byte, * (2) Performs LE ordered stores, and * (3) It copes with unaligned source (destination is guaranteed to be page * aligned and length is guaranteed to be multiple of 4). */ - memcpy_toio(va.vaddr, src, len); + iowrite32_rep(va.vaddr, src, len >> 2); #else u_int32_t __iomem* addr = va.vaddr; diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 559bf17..b52f8e4 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, break; case FBIOGET_VBLANK: + + memset(&sisvbblank, 0, sizeof(struct fb_vblank)); + sisvbblank.count = 0; sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); diff --git a/drivers/video/via/accel.c b/drivers/video/via/accel.c index e44893e..c2f4e6e 100644 --- a/drivers/video/via/accel.c +++ b/drivers/video/via/accel.c @@ -283,11 +283,12 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height, writel(tmp, engine + 0x1C); } - if (op != VIA_BITBLT_COLOR) + if (op == VIA_BITBLT_FILL) { + writel(fg_color, engine + 0x58); + } else if (op == VIA_BITBLT_MONO) { writel(fg_color, engine + 0x4C); - - if (op == VIA_BITBLT_MONO) writel(bg_color, engine + 0x50); + } if (op == VIA_BITBLT_FILL) ge_cmd |= fill_rop << 24 | 0x00002000 | 0x00000001; diff --git a/drivers/video/via/ioctl.c b/drivers/video/via/ioctl.c index da03c07..4d553d0 100644 --- a/drivers/video/via/ioctl.c +++ b/drivers/video/via/ioctl.c @@ -25,6 +25,8 @@ int viafb_ioctl_get_viafb_info(u_long arg) { struct viafb_ioctl_info viainfo; + memset(&viainfo, 0, sizeof(struct viafb_ioctl_info)); + viainfo.viafb_id = VIAID; viainfo.vendor_id = PCI_VIA_VENDOR_ID; diff --git a/drivers/video/via/via_i2c.c b/drivers/video/via/via_i2c.c index da9e4ca..021112e 100644 --- a/drivers/video/via/via_i2c.c +++ b/drivers/video/via/via_i2c.c @@ -114,6 +114,7 @@ static void via_i2c_setsda(void *data, int state) int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata) { + int ret; u8 mm1[] = {0x00}; struct i2c_msg msgs[2]; @@ -126,11 +127,18 @@ int viafb_i2c_readbyte(u8 adap, u8 slave_addr, u8 index, u8 *pdata) mm1[0] = index; msgs[0].len = 1; msgs[1].len = 1; msgs[0].buf = mm1; msgs[1].buf = pdata; - return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); + ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); + if (ret == 2) + ret = 0; + else if (ret >= 0) + ret = -EIO; + + return ret; } int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data) { + int ret; u8 msg[2] = { index, data }; struct i2c_msg msgs; @@ -140,11 +148,18 @@ int viafb_i2c_writebyte(u8 adap, u8 slave_addr, u8 index, u8 data) msgs.addr = slave_addr / 2; msgs.len = 2; msgs.buf = msg; - return i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1); + ret = i2c_transfer(&via_i2c_par[adap].adapter, &msgs, 1); + if (ret == 1) + ret = 0; + else if (ret >= 0) + ret = -EIO; + + return ret; } int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len) { + int ret; u8 mm1[] = {0x00}; struct i2c_msg msgs[2]; @@ -156,7 +171,13 @@ int viafb_i2c_readbytes(u8 adap, u8 slave_addr, u8 index, u8 *buff, int buff_len mm1[0] = index; msgs[0].len = 1; msgs[1].len = buff_len; msgs[0].buf = mm1; msgs[1].buf = buff; - return i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); + ret = i2c_transfer(&via_i2c_par[adap].adapter, msgs, 2); + if (ret == 2) + ret = 0; + else if (ret >= 0) + ret = -EIO; + + return ret; } /* diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index e66b8b1..d8b12c3 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -858,9 +858,9 @@ unsigned long w100fb_gpio_read(int port) void w100fb_gpio_write(int port, unsigned long value) { if (port==W100_GPIO_PORT_A) - value = writel(value, remapped_regs + mmGPIO_DATA); + writel(value, remapped_regs + mmGPIO_DATA); else - value = writel(value, remapped_regs + mmGPIO_DATA2); + writel(value, remapped_regs + mmGPIO_DATA2); } EXPORT_SYMBOL(w100fb_gpio_read); EXPORT_SYMBOL(w100fb_gpio_write); diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index ef8d9d5..4fb5b2b 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -96,11 +96,6 @@ static struct pci_device_id virtio_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); -/* A PCI device has it's own struct device and so does a virtio device so - * we create a place for the virtio devices to show up in sysfs. I think it - * would make more sense for virtio to not insist on having it's own device. */ -static struct device *virtio_pci_root; - /* Convert a generic virtio device to our structure */ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) { @@ -629,7 +624,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, if (vp_dev == NULL) return -ENOMEM; - vp_dev->vdev.dev.parent = virtio_pci_root; + vp_dev->vdev.dev.parent = &pci_dev->dev; vp_dev->vdev.dev.release = virtio_pci_release_dev; vp_dev->vdev.config = &virtio_pci_config_ops; vp_dev->pci_dev = pci_dev; @@ -717,17 +712,7 @@ static struct pci_driver virtio_pci_driver = { static int __init virtio_pci_init(void) { - int err; - - virtio_pci_root = root_device_register("virtio-pci"); - if (IS_ERR(virtio_pci_root)) - return PTR_ERR(virtio_pci_root); - - err = pci_register_driver(&virtio_pci_driver); - if (err) - root_device_unregister(virtio_pci_root); - - return err; + return pci_register_driver(&virtio_pci_driver); } module_init(virtio_pci_init); @@ -735,7 +720,6 @@ module_init(virtio_pci_init); static void __exit virtio_pci_exit(void) { pci_unregister_driver(&virtio_pci_driver); - root_device_unregister(virtio_pci_root); } module_exit(virtio_pci_exit); diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index 428f8a1..3939e53 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c @@ -231,7 +231,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) struct resource *r; struct rdc321x_wdt_pdata *pdata; - pdata = pdev->dev.platform_data; + pdata = platform_get_drvdata(pdev); if (!pdata) { dev_err(&pdev->dev, "no platform data supplied\n"); return -ENODEV; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index db8f506..0769108 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -107,6 +107,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu) #define VALID_EVTCHN(chn) ((chn) != 0) static struct irq_chip xen_dynamic_chip; +static struct irq_chip xen_percpu_chip; /* Constructor for packed IRQ information. */ static struct irq_info mk_unbound_info(void) @@ -255,7 +256,7 @@ static void init_evtchn_cpu_bindings(void) } #endif - memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); + memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); } static inline void clear_evtchn(int port) @@ -363,7 +364,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) irq = find_unbound_irq(); set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_level_irq, "event"); + handle_edge_irq, "event"); evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_evtchn_info(evtchn); @@ -389,8 +390,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) if (irq < 0) goto out; - set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_level_irq, "ipi"); + set_irq_chip_and_handler_name(irq, &xen_percpu_chip, + handle_percpu_irq, "ipi"); bind_ipi.vcpu = cpu; if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, @@ -430,8 +431,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) irq = find_unbound_irq(); - set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_level_irq, "virq"); + set_irq_chip_and_handler_name(irq, &xen_percpu_chip, + handle_percpu_irq, "virq"); evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_virq_info(evtchn, virq); @@ -536,6 +537,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, if (irq < 0) return irq; + irqflags |= IRQF_NO_SUSPEND; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); @@ -933,6 +935,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { .retrigger = retrigger_dynirq, }; +static struct irq_chip xen_percpu_chip __read_mostly = { + .name = "xen-percpu", + + .disable = disable_dynirq, + .mask = disable_dynirq, + .unmask = enable_dynirq, + + .ack = ack_dynirq, +}; + void __init xen_init_IRQ(void) { int i; diff --git a/firmware/Makefile b/firmware/Makefile index 020e629..99955ed 100644 --- a/firmware/Makefile +++ b/firmware/Makefile @@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) # Directories which we _might_ need to create, so we have a rule for them. -firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all)))) +firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all)))) quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) cmd_mkdir = mkdir -p $@ diff --git a/fs/aio.c b/fs/aio.c index 1ccf25c..a564a9d 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -512,7 +512,7 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req) ctx->reqs_active--; if (unlikely(!ctx->reqs_active && ctx->dead)) - wake_up(&ctx->wait); + wake_up_all(&ctx->wait); } static void aio_fput_routine(struct work_struct *data) @@ -712,8 +712,16 @@ static ssize_t aio_run_iocb(struct kiocb *iocb) */ ret = retry(iocb); - if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) + if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { + /* + * There's no easy way to restart the syscall since other AIO's + * may be already running. Just fail this IO with EINTR. + */ + if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || + ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK)) + ret = -EINTR; aio_complete(iocb, ret, 0); + } out: spin_lock_irq(&ctx->ctx_lock); @@ -1225,7 +1233,7 @@ static void io_destroy(struct kioctx *ioctx) * by other CPUs at this point. Right now, we rely on the * locking done by the above calls to ensure this consistency. */ - wake_up(&ioctx->wait); + wake_up_all(&ioctx->wait); put_ioctx(ioctx); /* once for the lookup */ } @@ -1659,6 +1667,9 @@ long do_io_submit(aio_context_t ctx_id, long nr, if (unlikely(nr < 0)) return -EINVAL; + if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) + nr = LONG_MAX/sizeof(*iocbpp); + if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) return -EFAULT; diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index c4e8353..42b60b0 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -723,7 +723,7 @@ static int __init init_misc_binfmt(void) { int err = register_filesystem(&bm_fs_type); if (!err) { - err = register_binfmt(&misc_format); + err = insert_binfmt(&misc_format); if (err) unregister_filesystem(&bm_fs_type); } diff --git a/fs/bio.c b/fs/bio.c index e7bf6ca..b3e0174 100644 --- a/fs/bio.c +++ b/fs/bio.c @@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) { struct bio *bio; + if (nr_iovecs > UIO_MAXIOV) + return NULL; + bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), gfp_mask); if (unlikely(!bio)) @@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd) static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, gfp_t gfp_mask) { - struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); + struct bio_map_data *bmd; + if (iov_count > UIO_MAXIOV) + return NULL; + + bmd = kmalloc(sizeof(*bmd), gfp_mask); if (!bmd) return NULL; @@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT; + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + nr_pages += end - start; len += iov[i].iov_len; } @@ -954,6 +967,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT; + /* + * Overflow, abort + */ + if (end < start) + return ERR_PTR(-EINVAL); + nr_pages += end - start; /* * buffer must be aligned to at least hardsector size for now @@ -981,7 +1000,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, unsigned long start = uaddr >> PAGE_SHIFT; const int local_nr_pages = end - start; const int page_limit = cur_page + local_nr_pages; - + ret = get_user_pages_fast(uaddr, local_nr_pages, write_to_vm, &pages[cur_page]); if (ret < local_nr_pages) { diff --git a/fs/block_dev.c b/fs/block_dev.c index 99d6af8..4c54c86 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -681,8 +681,8 @@ retry: if (!bd_may_claim(bdev, whole, holder)) return -EBUSY; - /* if someone else is claiming, wait for it to finish */ - if (whole->bd_claiming && whole->bd_claiming != holder) { + /* if claiming is already in progress, wait for it to finish */ + if (whole->bd_claiming) { wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); DEFINE_WAIT(wait); @@ -1339,10 +1339,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) /* * hooks: /n/, see "layering violations". */ - ret = devcgroup_inode_permission(bdev->bd_inode, perm); - if (ret != 0) { - bdput(bdev); - return ret; + if (!for_part) { + ret = devcgroup_inode_permission(bdev->bd_inode, perm); + if (ret != 0) { + bdput(bdev); + return ret; + } } lock_kernel(); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 29c2009..e40aab4 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1212,6 +1212,8 @@ struct btrfs_root { #define BTRFS_INODE_NOATIME (1 << 9) #define BTRFS_INODE_DIRSYNC (1 << 10) +#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31) + /* some macros to generate set/get funcs for the struct fields. This * assumes there is a lefoo_to_cpu for every type, so lets make a simple * one for u8: @@ -2239,6 +2241,8 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); int btrfs_find_orphan_roots(struct btrfs_root *tree_root); int btrfs_set_root_node(struct btrfs_root_item *item, struct extent_buffer *node); +void btrfs_check_and_init_root_item(struct btrfs_root_item *item); + /* dir-item.c */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 34f7c37..9cd32dc 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1127,8 +1127,10 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, root->commit_root = btrfs_root_node(root); BUG_ON(!root->node); out: - if (location->objectid != BTRFS_TREE_LOG_OBJECTID) + if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { root->ref_cows = 1; + btrfs_check_and_init_root_item(&root->root_item); + } return root; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 9254b3d..06cb014 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -282,6 +282,10 @@ static noinline int create_subvol(struct btrfs_root *root, inode_item->nbytes = cpu_to_le64(root->leafsize); inode_item->mode = cpu_to_le32(S_IFDIR | 0755); + root_item.flags = 0; + root_item.byte_limit = 0; + inode_item->flags = cpu_to_le64(BTRFS_INODE_ROOT_ITEM_INIT); + btrfs_set_root_bytenr(&root_item, leaf->start); btrfs_set_root_generation(&root_item, trans->transid); btrfs_set_root_level(&root_item, 0); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 2d958be..6794baf 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -473,3 +473,21 @@ again: btrfs_free_path(path); return 0; } + +/* + * Old btrfs forgets to init root_item->flags and root_item->byte_limit + * for subvolumes. To work around this problem, we steal a bit from + * root_item->inode_item->flags, and use it to indicate if those fields + * have been properly initialized. + */ +void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) +{ + u64 inode_flags = le64_to_cpu(root_item->inode.flags); + + if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { + inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; + root_item->inode.flags = cpu_to_le64(inode_flags); + root_item->flags = 0; + root_item->byte_limit = 0; + } +} diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 66e4c66..7dc5b0d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -895,6 +895,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, record_root_in_trans(trans, root); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); + btrfs_check_and_init_root_item(new_root_item); old = btrfs_lock_root_node(root); btrfs_cow_block(trans, root, old, NULL, 0, &old); diff --git a/fs/char_dev.c b/fs/char_dev.c index d6db933..143d393 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "internal.h" @@ -39,7 +40,9 @@ struct backing_dev_info directly_mappable_cdev_bdi = { #endif /* permit direct mmap, for read, write or exec */ BDI_CAP_MAP_DIRECT | - BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP), + BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP | + /* no writeback happens */ + BDI_CAP_NO_ACCT_AND_WRITEBACK), }; static struct kobj_map *cdev_map; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index fb6318b..21088dc 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -104,7 +104,8 @@ extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, struct file *file, - struct vfsmount *mnt, unsigned int oflags); + struct vfsmount *mnt, unsigned int oflags, + __u32 oplock); extern int cifs_posix_open(char *full_path, struct inode **pinode, struct super_block *sb, int mode, int oflags, @@ -342,7 +343,7 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 netfid, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, - const bool waitFlag); + const bool waitFlag, const __u8 oplock_level); extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index c65c341..6b764d7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1647,7 +1647,8 @@ int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const __u64 len, const __u64 offset, const __u32 numUnlock, - const __u32 numLock, const __u8 lockType, const bool waitFlag) + const __u32 numLock, const __u8 lockType, + const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; @@ -1675,6 +1676,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; + pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 2208f06..50d60cc 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1647,9 +1647,6 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) if (ses) { cFYI(1, "Existing smb sess found (status=%d)", ses->status); - /* existing SMB ses has a server reference already */ - cifs_put_tcp_session(server); - mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(xid, ses); if (rc) { @@ -1672,6 +1669,9 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) } } mutex_unlock(&ses->session_mutex); + + /* existing SMB ses has a server reference already */ + cifs_put_tcp_session(server); FreeXid(xid); return ses; } @@ -2606,7 +2606,7 @@ try_mount_again: remote_path_check: /* check if a whole path (including prepath) is not remote */ - if (!rc && cifs_sb->prepathlen && tcon) { + if (!rc && tcon) { /* build_path_to_root works only when we have a valid tcon */ full_path = cifs_build_path_to_root(cifs_sb); if (full_path == NULL) { diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index e7ae78b..9c33878 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -138,9 +138,9 @@ cifs_bp_rename_retry: */ struct cifsFileInfo * cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, - struct file *file, struct vfsmount *mnt, unsigned int oflags) + struct file *file, struct vfsmount *mnt, unsigned int oflags, + __u32 oplock) { - int oplock = 0; struct cifsFileInfo *pCifsFile; struct cifsInodeInfo *pCifsInode; struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); @@ -149,9 +149,6 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, if (pCifsFile == NULL) return pCifsFile; - if (oplockEnabled) - oplock = REQ_OPLOCK; - pCifsFile->netfid = fileHandle; pCifsFile->pid = current->tgid; pCifsFile->pInode = igrab(newinode); @@ -476,7 +473,7 @@ cifs_create_set_dentry: } pfile_info = cifs_new_fileinfo(newinode, fileHandle, filp, - nd->path.mnt, oflags); + nd->path.mnt, oflags, oplock); if (pfile_info == NULL) { fput(filp); CIFSSMBClose(xid, tcon, fileHandle); @@ -738,7 +735,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, cfile = cifs_new_fileinfo(newInode, fileHandle, filp, nd->path.mnt, - nd->intent.open.flags); + nd->intent.open.flags, + oplock); if (cfile == NULL) { fput(filp); CIFSSMBClose(xid, pTcon, fileHandle); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 409e4f5..8a9e688 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -277,7 +277,7 @@ int cifs_open(struct inode *inode, struct file *file) pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, - oflags); + oflags, oplock); if (pCifsFile == NULL) { CIFSSMBClose(xid, tcon, netfid); rc = -ENOMEM; @@ -367,7 +367,7 @@ int cifs_open(struct inode *inode, struct file *file) goto out; pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, - file->f_flags); + file->f_flags, oplock); if (pCifsFile == NULL) { rc = -ENOMEM; goto out; @@ -796,12 +796,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) /* BB we could chain these into one lock request BB */ rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, - 0, 1, lockType, 0 /* wait flag */ ); + 0, 1, lockType, 0 /* wait flag */, 0); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1 /* numUnlock */ , 0 /* numLock */ , lockType, - 0 /* wait flag */ ); + 0 /* wait flag */, 0); pfLock->fl_type = F_UNLCK; if (rc != 0) cERROR(1, "Error unlocking previously locked " @@ -818,13 +818,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, 1, lockType | LOCKING_ANDX_SHARED_LOCK, - 0 /* wait flag */); + 0 /* wait flag */, 0); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1, 0, lockType | LOCKING_ANDX_SHARED_LOCK, - 0 /* wait flag */); + 0 /* wait flag */, 0); pfLock->fl_type = F_RDLCK; if (rc != 0) cERROR(1, "Error unlocking " @@ -868,8 +868,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if (numLock) { rc = CIFSSMBLock(xid, tcon, netfid, length, - pfLock->fl_start, - 0, numLock, lockType, wait_flag); + pfLock->fl_start, 0, numLock, lockType, + wait_flag, 0); if (rc == 0) { /* For Windows locks we must store them. */ @@ -889,9 +889,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) (pfLock->fl_start + length) >= (li->offset + li->length)) { stored_rc = CIFSSMBLock(xid, tcon, - netfid, - li->length, li->offset, - 1, 0, li->type, false); + netfid, li->length, + li->offset, 1, 0, + li->type, false, 0); if (stored_rc) rc = stored_rc; else { @@ -2300,7 +2300,8 @@ cifs_oplock_break(struct slow_work *work) */ if (!cfile->closePend && !cfile->oplock_break_cancelled) { rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, - LOCKING_ANDX_OPLOCK_RELEASE, false); + LOCKING_ANDX_OPLOCK_RELEASE, false, + cinode->clientCanCacheRead ? 1 : 0); cFYI(1, "Oplock release rc = %d", rc); } } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 6f0683c..b5dbf82 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -804,8 +804,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) rc = cifs_get_inode_info(&inode, full_path, NULL, sb, xid, NULL); - if (!inode) - return ERR_PTR(-ENOMEM); + if (!inode) { + inode = ERR_PTR(rc); + goto out; + } if (rc && cifs_sb->tcon->ipc) { cFYI(1, "ipc connection - fake read inode"); @@ -816,13 +818,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino) inode->i_uid = cifs_sb->mnt_uid; inode->i_gid = cifs_sb->mnt_gid; } else if (rc) { - kfree(full_path); - _FreeXid(xid); iget_failed(inode); - return ERR_PTR(rc); + inode = ERR_PTR(rc); } - +out: kfree(full_path); /* can not call macro FreeXid here since in a void func * TODO: This is no longer true diff --git a/fs/compat.c b/fs/compat.c index 6490d21..34bf9fc 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -1150,7 +1150,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, { compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; - struct iovec *iov; + struct iovec *iov = iovstack; ssize_t ret; io_fn_t fn; iov_fn_t fnv; @@ -1376,6 +1376,10 @@ static int compat_count(compat_uptr_t __user *argv, int max) argv++; if (i++ >= max) return -E2BIG; + + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; + cond_resched(); } } return i; @@ -1417,6 +1421,12 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv, while (len > 0) { int offset, bytes_to_copy; + if (fatal_signal_pending(current)) { + ret = -ERESTARTNOHAND; + goto out; + } + cond_resched(); + offset = pos % PAGE_SIZE; if (offset == 0) offset = PAGE_SIZE; @@ -1433,18 +1443,8 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv, if (!kmapped_page || kpos != (pos & PAGE_MASK)) { struct page *page; -#ifdef CONFIG_STACK_GROWSUP - ret = expand_stack_downwards(bprm->vma, pos); - if (ret < 0) { - /* We've exceed the stack rlimit. */ - ret = -E2BIG; - goto out; - } -#endif - ret = get_user_pages(current, bprm->mm, pos, - 1, 1, 1, &page, NULL); - if (ret <= 0) { - /* We've exceed the stack rlimit. */ + page = get_arg_page(bprm, pos, 1); + if (!page) { ret = -E2BIG; goto out; } @@ -1565,8 +1565,10 @@ int compat_do_execve(char * filename, return retval; out: - if (bprm->mm) + if (bprm->mm) { + acct_arg_size(bprm, 0); mmput(bprm->mm); + } out_file: if (bprm->file) { diff --git a/fs/dcache.c b/fs/dcache.c index 86d4db1..c1f86c7 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1186,9 +1186,12 @@ struct dentry *d_obtain_alias(struct inode *inode) spin_unlock(&tmp->d_lock); spin_unlock(&dcache_lock); + security_d_instantiate(tmp, inode); return tmp; out_iput: + if (res && !IS_ERR(res)) + security_d_instantiate(res, inode); iput(inode); return res; } diff --git a/fs/direct-io.c b/fs/direct-io.c index 7600aac..458fdd3 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio) * filesystems can use it to hold additional state between get_block calls and * dio_complete. */ -static int dio_complete(struct dio *dio, loff_t offset, int ret) +static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async) { ssize_t transferred = 0; @@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) transferred = dio->i_size - offset; } - if (dio->end_io && dio->result) - dio->end_io(dio->iocb, offset, transferred, - dio->map_bh.b_private); - - if (dio->flags & DIO_LOCKING) - /* lockdep: non-owner release */ - up_read_non_owner(&dio->inode->i_alloc_sem); - if (ret == 0) ret = dio->page_errors; if (ret == 0) @@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret) if (ret == 0) ret = transferred; + if (dio->end_io && dio->result) { + dio->end_io(dio->iocb, offset, transferred, + dio->map_bh.b_private, ret, is_async); + } else if (is_async) { + aio_complete(dio->iocb, ret, 0); + } + + if (dio->flags & DIO_LOCKING) + /* lockdep: non-owner release */ + up_read_non_owner(&dio->inode->i_alloc_sem); + return ret; } @@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error) spin_unlock_irqrestore(&dio->bio_lock, flags); if (remaining == 0) { - int ret = dio_complete(dio, dio->iocb->ki_pos, 0); - aio_complete(dio->iocb, ret, 0); + dio_complete(dio, dio->iocb->ki_pos, 0, true); kfree(dio); } } @@ -632,7 +634,7 @@ static int dio_send_cur_page(struct dio *dio) int ret = 0; if (dio->bio) { - loff_t cur_offset = dio->block_in_file << dio->blkbits; + loff_t cur_offset = dio->cur_page_fs_offset; loff_t bio_next_offset = dio->logical_offset_in_bio + dio->bio->bi_size; @@ -657,7 +659,7 @@ static int dio_send_cur_page(struct dio *dio) * Submit now if the underlying fs is about to perform a * metadata read */ - if (dio->boundary) + else if (dio->boundary) dio_bio_submit(dio); } @@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, spin_unlock_irqrestore(&dio->bio_lock, flags); if (ret2 == 0) { - ret = dio_complete(dio, offset, ret); + ret = dio_complete(dio, offset, ret, false); kfree(dio); } else BUG_ON(ret != -EIOCBQUEUED); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index e8fcf4e..622c9514 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -199,7 +199,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) "the persistent file for the dentry with name " "[%s]; rc = [%d]\n", __func__, ecryptfs_dentry->d_name.name, rc); - goto out; + goto out_free; } } if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) @@ -207,7 +207,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) rc = -EPERM; printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " "file must hence be opened RO\n", __func__); - goto out; + goto out_free; } ecryptfs_set_file_lower( file, ecryptfs_inode_to_private(inode)->lower_file); @@ -292,12 +292,40 @@ static int ecryptfs_fasync(int fd, struct file *file, int flag) return rc; } -static int ecryptfs_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg); +static long +ecryptfs_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct file *lower_file = NULL; + long rc = -ENOTTY; + + if (ecryptfs_file_to_private(file)) + lower_file = ecryptfs_file_to_lower(file); + if (lower_file && lower_file->f_op && lower_file->f_op->unlocked_ioctl) + rc = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg); + return rc; +} + +#ifdef CONFIG_COMPAT +static long +ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct file *lower_file = NULL; + long rc = -ENOIOCTLCMD; + + if (ecryptfs_file_to_private(file)) + lower_file = ecryptfs_file_to_lower(file); + if (lower_file && lower_file->f_op && lower_file->f_op->compat_ioctl) + rc = lower_file->f_op->compat_ioctl(lower_file, cmd, arg); + return rc; +} +#endif const struct file_operations ecryptfs_dir_fops = { .readdir = ecryptfs_readdir, - .ioctl = ecryptfs_ioctl, + .unlocked_ioctl = ecryptfs_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ecryptfs_compat_ioctl, +#endif .open = ecryptfs_open, .flush = ecryptfs_flush, .release = ecryptfs_release, @@ -313,7 +341,10 @@ const struct file_operations ecryptfs_main_fops = { .write = do_sync_write, .aio_write = generic_file_aio_write, .readdir = ecryptfs_readdir, - .ioctl = ecryptfs_ioctl, + .unlocked_ioctl = ecryptfs_unlocked_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ecryptfs_compat_ioctl, +#endif .mmap = generic_file_mmap, .open = ecryptfs_open, .flush = ecryptfs_flush, @@ -322,20 +353,3 @@ const struct file_operations ecryptfs_main_fops = { .fasync = ecryptfs_fasync, .splice_read = generic_file_splice_read, }; - -static int -ecryptfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd, - unsigned long arg) -{ - int rc = 0; - struct file *lower_file = NULL; - - if (ecryptfs_file_to_private(file)) - lower_file = ecryptfs_file_to_lower(file); - if (lower_file && lower_file->f_op && lower_file->f_op->ioctl) - rc = lower_file->f_op->ioctl(ecryptfs_inode_to_lower(inode), - lower_file, cmd, arg); - else - rc = -ENOTTY; - return rc; -} diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 31ef525..1681c62 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include "ecryptfs_kernel.h" @@ -70,15 +71,19 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); struct dentry *dentry_save; struct vfsmount *vfsmount_save; + unsigned int flags_save; int rc; dentry_save = nd->path.dentry; vfsmount_save = nd->path.mnt; + flags_save = nd->flags; nd->path.dentry = lower_dentry; nd->path.mnt = lower_mnt; + nd->flags &= ~LOOKUP_OPEN; rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); nd->path.dentry = dentry_save; nd->path.mnt = vfsmount_save; + nd->flags = flags_save; return rc; } @@ -264,7 +269,7 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, printk(KERN_ERR "%s: Out of memory whilst attempting " "to allocate ecryptfs_dentry_info struct\n", __func__); - goto out_dput; + goto out_put; } ecryptfs_set_dentry_lower(ecryptfs_dentry, lower_dentry); ecryptfs_set_dentry_lower_mnt(ecryptfs_dentry, lower_mnt); @@ -339,8 +344,9 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, out_free_kmem: kmem_cache_free(ecryptfs_header_cache_2, page_virt); goto out; -out_dput: +out_put: dput(lower_dentry); + mntput(lower_mnt); d_drop(ecryptfs_dentry); out: return rc; @@ -997,6 +1003,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), ecryptfs_dentry_to_lower(dentry), &lower_stat); if (!rc) { + fsstack_copy_attr_all(dentry->d_inode, + ecryptfs_inode_to_lower(dentry->d_inode)); generic_fillattr(dentry->d_inode, stat); stat->blocks = lower_stat.blocks; } @@ -1015,10 +1023,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, rc = -EOPNOTSUPP; goto out; } - mutex_lock(&lower_dentry->d_inode->i_mutex); - rc = lower_dentry->d_inode->i_op->setxattr(lower_dentry, name, value, - size, flags); - mutex_unlock(&lower_dentry->d_inode->i_mutex); + + rc = vfs_setxattr(lower_dentry, name, value, size, flags); out: return rc; } diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 89c5476..d6e9355 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1543,6 +1543,7 @@ int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key, printk(KERN_ERR "Could not find key with description: [%s]\n", sig); rc = process_request_key_err(PTR_ERR(*auth_tok_key)); + (*auth_tok_key) = NULL; goto out; } (*auth_tok) = ecryptfs_get_key_payload_data(*auth_tok_key); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index b1d8275..b530d9c 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -368,6 +368,11 @@ static int ecryptfs_write_begin(struct file *file, && (pos != 0)) zero_user(page, 0, PAGE_CACHE_SIZE); out: + if (unlikely(rc)) { + unlock_page(page); + page_cache_release(page); + *pagep = NULL; + } return rc; } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 3817149..25bcbd1 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -63,6 +63,13 @@ * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). + * It is also acquired when inserting an epoll fd onto another epoll + * fd. We do this so that we walk the epoll tree and ensure that this + * insertion does not create a cycle of epoll file descriptors, which + * could lead to deadlock. We need a global mutex to prevent two + * simultaneous inserts (A into B and B into A) from racing and + * constructing a cycle without either insert observing that it is + * going to. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. @@ -227,6 +234,9 @@ static int max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +/* Used to check for epoll file descriptor inclusion loops */ +static struct nested_calls poll_loop_ncalls; + /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; @@ -1181,6 +1191,62 @@ retry: return res; } +/** + * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() + * API, to verify that adding an epoll file inside another + * epoll structure, does not violate the constraints, in + * terms of closed loops, or too deep chains (which can + * result in excessive stack usage). + * + * @priv: Pointer to the epoll file to be currently checked. + * @cookie: Original cookie for this call. This is the top-of-the-chain epoll + * data structure pointer. + * @call_nests: Current dept of the @ep_call_nested() call stack. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) +{ + int error = 0; + struct file *file = priv; + struct eventpoll *ep = file->private_data; + struct rb_node *rbp; + struct epitem *epi; + + mutex_lock(&ep->mtx); + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, epi->ffd.file, + epi->ffd.file->private_data, current); + if (error != 0) + break; + } + } + mutex_unlock(&ep->mtx); + + return error; +} + +/** + * ep_loop_check - Performs a check to verify that adding an epoll file (@file) + * another epoll file (represented by @ep) does not create + * closed loops or too deep chains. + * + * @ep: Pointer to the epoll private data structure. + * @file: Pointer to the epoll file to be checked. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check(struct eventpoll *ep, struct file *file) +{ + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); +} + /* * Open an eventpoll file descriptor. */ @@ -1229,6 +1295,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; + int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; @@ -1270,6 +1337,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, */ ep = file->private_data; + /* + * When we insert an epoll file descriptor, inside another epoll file + * descriptor, there is the change of creating closed loops, which are + * better be handled here, than in more critical paths. + * + * We hold epmutex across the loop check and the insert in this case, in + * order to prevent two separate inserts from racing and each doing the + * insert "at the same time" such that ep_loop_check passes on both + * before either one does the insert, thereby creating a cycle. + */ + if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { + mutex_lock(&epmutex); + did_lock_epmutex = 1; + error = -ELOOP; + if (ep_loop_check(ep, tfile) != 0) + goto error_tgt_fput; + } + + mutex_lock(&ep->mtx); /* @@ -1305,6 +1391,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: + if (unlikely(did_lock_epmutex)) + mutex_unlock(&epmutex); + fput(tfile); error_fput: fput(file); @@ -1423,6 +1512,12 @@ static int __init eventpoll_init(void) max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; + /* + * Initialize the structure used to perform epoll file descriptor + * inclusion loops checks. + */ + ep_nested_calls_init(&poll_loop_ncalls); + /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); diff --git a/fs/exec.c b/fs/exec.c index e19de6a..e03ddd4 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -158,7 +158,22 @@ out: #ifdef CONFIG_MMU -static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) +{ + struct mm_struct *mm = current->mm; + long diff = (long)(pages - bprm->vma_pages); + + if (!mm || !diff) + return; + + bprm->vma_pages = pages; + + down_write(&mm->mmap_sem); + mm->total_vm += diff; + up_write(&mm->mmap_sem); +} + +struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; @@ -180,6 +195,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; struct rlimit *rlim; + acct_arg_size(bprm, size / PAGE_SIZE); + /* * We've historically supported up to 32 pages (ARG_MAX) * of argument strings even with small stacks @@ -248,6 +265,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm) vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); INIT_LIST_HEAD(&vma->anon_vma_chain); + + err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); + if (err) + goto err; + err = insert_vm_struct(mm, vma); if (err) goto err; @@ -270,7 +292,11 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len) #else -static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, +void acct_arg_size(struct linux_binprm *bprm, unsigned long pages) +{ +} + +struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; @@ -377,6 +403,9 @@ static int count(char __user * __user * argv, int max) argv++; if (i++ >= max) return -E2BIG; + + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; cond_resched(); } } @@ -420,6 +449,12 @@ static int copy_strings(int argc, char __user * __user * argv, while (len > 0) { int offset, bytes_to_copy; + if (fatal_signal_pending(current)) { + ret = -ERESTARTNOHAND; + goto out; + } + cond_resched(); + offset = pos % PAGE_SIZE; if (offset == 0) offset = PAGE_SIZE; @@ -594,6 +629,11 @@ int setup_arg_pages(struct linux_binprm *bprm, #else stack_top = arch_align_stack(stack_top); stack_top = PAGE_ALIGN(stack_top); + + if (unlikely(stack_top < mmap_min_addr) || + unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr)) + return -ENOMEM; + stack_shift = vma->vm_end - stack_top; bprm->p -= stack_shift; @@ -977,6 +1017,7 @@ int flush_old_exec(struct linux_binprm * bprm) /* * Release all of the old mmap stuff */ + acct_arg_size(bprm, 0); retval = exec_mmap(bprm->mm); if (retval) goto out; @@ -1401,8 +1442,10 @@ int do_execve(char * filename, return retval; out: - if (bprm->mm) - mmput (bprm->mm); + if (bprm->mm) { + acct_arg_size(bprm, 0); + mmput(bprm->mm); + } out_file: if (bprm->file) { diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 71efb0e..b5d9028 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(old_inode); ext2_delete_entry (old_de, old_page); - inode_dec_link_count(old_inode); if (dir_de) { if (old_dir != new_dir) diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index ee18408..827b573 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1550,8 +1550,8 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, goto cleanup; node2 = (struct dx_node *)(bh2->b_data); entries2 = node2->entries; + memset(&node2->fake, 0, sizeof(struct fake_dirent)); node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize); - node2->fake.inode = 0; BUFFER_TRACE(frame->bh, "get_write_access"); err = ext3_journal_get_write_access(handle, frame->bh); if (err) diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 6c953bb..6c7efb6 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1459,6 +1459,13 @@ static void ext3_orphan_cleanup (struct super_block * sb, return; } + /* Check if feature set allows readwrite operations */ + if (EXT3_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) { + printk(KERN_INFO "EXT3-fs: %s: Skipping orphan cleanup due to " + "unknown ROCOMPAT features\n", sb->s_id); + return; + } + if (EXT3_SB(sb)->s_mount_state & EXT3_ERROR_FS) { if (es->s_last_orphan) jbd_debug(1, "Errors on filesystem, " diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 19a4de5..4c9f05d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -167,13 +167,15 @@ struct mpage_da_data { }; #define EXT4_IO_UNWRITTEN 0x1 typedef struct ext4_io_end { - struct list_head list; /* per-file finished AIO list */ + struct list_head list; /* per-file finished IO list */ struct inode *inode; /* file being written to */ unsigned int flag; /* unwritten or not */ struct page *page; /* page struct for buffer write */ loff_t offset; /* offset in the file */ ssize_t size; /* size of the extent */ struct work_struct work; /* data work queue */ + struct kiocb *iocb; /* iocb struct for AIO */ + int result; /* error value for AIO */ } ext4_io_end_t; /* diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 42272d6..0595b8c 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3668,6 +3668,8 @@ static int ext4_end_io_nolock(ext4_io_end_t *io) return ret; } + if (io->iocb) + aio_complete(io->iocb, io->result, 0); /* clear the DIO AIO unwritten flag */ io->flag = 0; return ret; @@ -3767,6 +3769,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) io->offset = 0; io->size = 0; io->page = NULL; + io->iocb = NULL; + io->result = 0; INIT_WORK(&io->work, ext4_end_io_work); INIT_LIST_HEAD(&io->list); } @@ -3775,7 +3779,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags) } static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, - ssize_t size, void *private) + ssize_t size, void *private, int ret, + bool is_async) { ext4_io_end_t *io_end = iocb->private; struct workqueue_struct *wq; @@ -3784,7 +3789,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, /* if not async direct IO or dio with 0 bytes write, just return */ if (!io_end || !size) - return; + goto out; ext_debug("ext4_end_io_dio(): io_end 0x%p" "for inode %lu, iocb 0x%p, offset %llu, size %llu\n", @@ -3795,12 +3800,18 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset, if (io_end->flag != EXT4_IO_UNWRITTEN){ ext4_free_io_end(io_end); iocb->private = NULL; +out: + if (is_async) + aio_complete(iocb, ret, 0); return; } io_end->offset = offset; io_end->size = size; - io_end->flag = EXT4_IO_UNWRITTEN; + if (is_async) { + io_end->iocb = iocb; + io_end->result = ret; + } wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq; /* queue the work to convert unwritten extents to written */ @@ -5582,13 +5593,12 @@ static int ext4_indirect_trans_blocks(struct inode *inode, int nrblocks, /* if nrblocks are contiguous */ if (chunk) { /* - * With N contiguous data blocks, it need at most - * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) indirect blocks - * 2 dindirect blocks - * 1 tindirect block + * With N contiguous data blocks, we need at most + * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks, + * 2 dindirect blocks, and 1 tindirect block */ - indirects = nrblocks / EXT4_ADDR_PER_BLOCK(inode->i_sb); - return indirects + 3; + return DIV_ROUND_UP(nrblocks, + EXT4_ADDR_PER_BLOCK(inode->i_sb)) + 4; } /* * if nrblocks are not contiguous, worse case, each block touch diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4e8983a..a45ced9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -241,7 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) if (sb->s_flags & MS_RDONLY) return ERR_PTR(-EROFS); - vfs_check_frozen(sb, SB_FREEZE_WRITE); + vfs_check_frozen(sb, SB_FREEZE_TRANS); /* Special case here: if the journal has aborted behind our * backs (eg. EIO in the commit thread), then we still need to * take the FS itself readonly cleanly. */ @@ -3491,7 +3491,7 @@ int ext4_force_commit(struct super_block *sb) journal = EXT4_SB(sb)->s_journal; if (journal) { - vfs_check_frozen(sb, SB_FREEZE_WRITE); + vfs_check_frozen(sb, SB_FREEZE_TRANS); ret = ext4_journal_force_commit(journal); } diff --git a/fs/file_table.c b/fs/file_table.c index 5c7d10e..684ffe3 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -123,13 +123,13 @@ struct file *get_empty_filp(void) goto fail; percpu_counter_inc(&nr_files); + f->f_cred = get_cred(cred); if (security_file_alloc(f)) goto fail_sec; INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); - f->f_cred = get_cred(cred); spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 9424796..e5cdabf 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1552,6 +1552,14 @@ __acquires(&fc->lock) } } +static void end_queued_requests(struct fuse_conn *fc) +{ + fc->max_background = UINT_MAX; + flush_bg_queue(fc); + end_requests(fc, &fc->pending); + end_requests(fc, &fc->processing); +} + /* * Abort all requests. * @@ -1578,8 +1586,7 @@ void fuse_abort_conn(struct fuse_conn *fc) fc->connected = 0; fc->blocked = 0; end_io_requests(fc); - end_requests(fc, &fc->pending); - end_requests(fc, &fc->processing); + end_queued_requests(fc); wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); kill_fasync(&fc->fasync, SIGIO, POLL_IN); @@ -1594,8 +1601,9 @@ int fuse_dev_release(struct inode *inode, struct file *file) if (fc) { spin_lock(&fc->lock); fc->connected = 0; - end_requests(fc, &fc->pending); - end_requests(fc, &fc->processing); + fc->blocked = 0; + end_queued_requests(fc); + wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); fuse_conn_put(fc); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ada0ade..9576aed 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -13,6 +13,7 @@ #include #include #include +#include static const struct file_operations fuse_direct_io_file_operations; @@ -85,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) return ff; } +static void fuse_release_async(struct work_struct *work) +{ + struct fuse_req *req; + struct fuse_conn *fc; + struct path path; + + req = container_of(work, struct fuse_req, misc.release.work); + path = req->misc.release.path; + fc = get_fuse_conn(path.dentry->d_inode); + + fuse_put_request(fc, req); + path_put(&path); +} + static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - path_put(&req->misc.release.path); + if (fc->destroy_req) { + /* + * If this is a fuseblk mount, then it's possible that + * releasing the path will result in releasing the + * super block and sending the DESTROY request. If + * the server is single threaded, this would hang. + * For this reason do the path_put() in a separate + * thread. + */ + atomic_inc(&req->count); + INIT_WORK(&req->misc.release.work, fuse_release_async); + schedule_work(&req->misc.release.work); + } else { + path_put(&req->misc.release.path); + } } -static void fuse_file_put(struct fuse_file *ff) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; - req->end = fuse_release_end; - fuse_request_send_background(ff->fc, req); + if (sync) { + fuse_request_send(ff->fc, req); + path_put(&req->misc.release.path); + fuse_put_request(ff->fc, req); + } else { + req->end = fuse_release_end; + fuse_request_send_background(ff->fc, req); + } kfree(ff); } } @@ -134,6 +169,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open); void fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; + struct fuse_conn *fc = get_fuse_conn(inode); if (ff->open_flags & FOPEN_DIRECT_IO) file->f_op = &fuse_direct_io_file_operations; @@ -141,6 +177,15 @@ void fuse_finish_open(struct inode *inode, struct file *file) invalidate_inode_pages2(inode->i_mapping); if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fc->lock); + fi->attr_version = ++fc->attr_version; + i_size_write(inode, 0); + spin_unlock(&fc->lock); + fuse_invalidate_attr(inode); + } } int fuse_open_common(struct inode *inode, struct file *file, bool isdir) @@ -208,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) * Normally this will send the RELEASE request, however if * some asynchronous READ or WRITE requests are outstanding, * the sending will be delayed. + * + * Make the release synchronous if this is a fuseblk mount, + * synchronous RELEASE is allowed (and desirable) in this case + * because the server can be trusted not to screw up. */ - fuse_file_put(ff); + fuse_file_put(ff, ff->fc->destroy_req != NULL); } static int fuse_open(struct inode *inode, struct file *file) @@ -547,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) page_cache_release(page); } if (req->ff) - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_send_readpages(struct fuse_req *req, struct file *file) @@ -1126,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) { __free_page(req->pages[0]); - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) @@ -1617,6 +1666,58 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + +/* + * CUSE servers compiled on 32bit broke on 64bit kernels because the + * ABI was defined to be 'struct iovec' which is different on 32bit + * and 64bit. Fortunately we can determine which structure the server + * used from the size of the reply. + */ +static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, + size_t transferred, unsigned count, + bool is_compat) +{ +#ifdef CONFIG_COMPAT + if (count * sizeof(struct compat_iovec) == transferred) { + struct compat_iovec *ciov = src; + unsigned i; + + /* + * With this interface a 32bit server cannot support + * non-compat (i.e. ones coming from 64bit apps) ioctl + * requests + */ + if (!is_compat) + return -EINVAL; + + for (i = 0; i < count; i++) { + dst[i].iov_base = compat_ptr(ciov[i].iov_base); + dst[i].iov_len = ciov[i].iov_len; + } + return 0; + } +#endif + + if (count * sizeof(struct iovec) != transferred) + return -EIO; + + memcpy(dst, src, transferred); + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1798,18 +1899,25 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; - err = -EIO; - if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred) - goto out; - - /* okay, copy in iovs and retry */ vaddr = kmap_atomic(pages[0], KM_USER0); - memcpy(page_address(iov_page), vaddr, transferred); + err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr, + transferred, in_iovs + out_iovs, + (flags & FUSE_IOCTL_COMPAT) != 0); kunmap_atomic(vaddr, KM_USER0); + if (err) + goto out; in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 8f309f0..6b9a746 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -21,6 +21,7 @@ #include #include #include +#include /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 @@ -257,7 +258,10 @@ struct fuse_req { union { struct fuse_forget_in forget_in; struct { - struct fuse_release_in in; + union { + struct fuse_release_in in; + struct work_struct work; + }; struct path path; } release; struct fuse_init_in init_in; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 6a857e2..83917b5 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -932,7 +932,7 @@ int gfs2_logd(void *data) do { prepare_to_wait(&sdp->sd_logd_waitq, &wait, - TASK_UNINTERRUPTIBLE); + TASK_INTERRUPTIBLE); if (!gfs2_ail_flush_reqd(sdp) && !gfs2_jrnl_flush_reqd(sdp) && !kthread_should_stop()) diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index fa96bbb..2d7f165 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -86,46 +86,25 @@ struct ea_buffer { #define EA_MALLOC 0x0008 +static int is_known_namespace(const char *name) +{ + if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) && + strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) && + strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && + strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) + return false; + + return true; +} + /* * These three routines are used to recognize on-disk extended attributes * that are in a recognized namespace. If the attribute is not recognized, * "os2." is prepended to the name */ -static inline int is_os2_xattr(struct jfs_ea *ea) +static int is_os2_xattr(struct jfs_ea *ea) { - /* - * Check for "system." - */ - if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) && - !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return false; - /* - * Check for "user." - */ - if ((ea->namelen >= XATTR_USER_PREFIX_LEN) && - !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) - return false; - /* - * Check for "security." - */ - if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) && - !strncmp(ea->name, XATTR_SECURITY_PREFIX, - XATTR_SECURITY_PREFIX_LEN)) - return false; - /* - * Check for "trusted." - */ - if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) && - !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) - return false; - /* - * Add any other valid namespace prefixes here - */ - - /* - * We assume it's OS/2's flat namespace - */ - return true; + return !is_known_namespace(ea->name); } static inline int name_size(struct jfs_ea *ea) @@ -764,13 +743,23 @@ static int can_set_xattr(struct inode *inode, const char *name, if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return can_set_system_xattr(inode, name, value, value_len); + if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) { + /* + * This makes sure that we aren't trying to set an + * attribute in a different namespace by prefixing it + * with "os2." + */ + if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN)) + return -EOPNOTSUPP; + return 0; + } + /* * Don't allow setting an attribute in an unknown namespace. */ if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) && strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && - strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) && - strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) + strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) return -EOPNOTSUPP; return 0; @@ -952,19 +941,8 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data, int xattr_size; ssize_t size; int namelen = strlen(name); - char *os2name = NULL; char *value; - if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { - os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, - GFP_KERNEL); - if (!os2name) - return -ENOMEM; - strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); - name = os2name; - namelen -= XATTR_OS2_PREFIX_LEN; - } - down_read(&JFS_IP(inode)->xattr_sem); xattr_size = ea_get(inode, &ea_buf, 0); @@ -1002,8 +980,6 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data, out: up_read(&JFS_IP(inode)->xattr_sem); - kfree(os2name); - return size; } @@ -1012,6 +988,19 @@ ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data, { int err; + if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { + /* + * skip past "os2." prefix + */ + name += XATTR_OS2_PREFIX_LEN; + /* + * Don't allow retrieving properly prefixed attributes + * by prepending them with "os2." + */ + if (is_known_namespace(name)) + return -EOPNOTSUPP; + } + err = __jfs_getxattr(dentry->d_inode, name, data, buf_size); return err; diff --git a/fs/minix/namei.c b/fs/minix/namei.c index e20ee85..f3f3578 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -115,7 +115,7 @@ static int minix_mkdir(struct inode * dir, struct dentry *dentry, int mode) inode_inc_link_count(dir); - inode = minix_new_inode(dir, mode, &err); + inode = minix_new_inode(dir, S_IFDIR | mode, &err); if (!inode) goto out_dir; diff --git a/fs/namespace.c b/fs/namespace.c index 88058de..32dcd24 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1984,7 +1984,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; - flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | + flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d25b525..e006770 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -274,7 +274,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, sin1->sin6_scope_id != sin2->sin6_scope_id) return 0; - return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr); + return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr); } #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e60416d..d69551e 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1103,7 +1103,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) goto no_open_dput; /* We can't create new files, or truncate existing ones here */ - openflags &= ~(O_CREAT|O_TRUNC); + openflags &= ~(O_CREAT|O_EXCL|O_TRUNC); /* * Note: we're not holding inode->i_mutex and so may be racing with diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index ad4cd31..b1c23b7 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -402,15 +402,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, pos += vec->iov_len; } + /* + * If no bytes were started, return the error, and let the + * generic layer handle the completion. + */ + if (requested_bytes == 0) { + nfs_direct_req_release(dreq); + return result < 0 ? result : -EIO; + } + if (put_dreq(dreq)) nfs_direct_complete(dreq); - - if (requested_bytes != 0) - return 0; - - if (result < 0) - return result; - return -EIO; + return 0; } static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, @@ -830,15 +833,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, pos += vec->iov_len; } + /* + * If no bytes were started, return the error, and let the + * generic layer handle the completion. + */ + if (requested_bytes == 0) { + nfs_direct_req_release(dreq); + return result < 0 ? result : -EIO; + } + if (put_dreq(dreq)) nfs_direct_write_complete(dreq, dreq->inode); - - if (requested_bytes != 0) - return 0; - - if (result < 0) - return result; - return -EIO; + return 0; } static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f036153..6fd4fba 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -222,7 +222,7 @@ static int nfs_do_fsync(struct nfs_open_context *ctx, struct inode *inode) have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); if (have_error) ret = xchg(&ctx->error, 0); - if (!ret) + if (!ret && status < 0) ret = status; return ret; } @@ -560,7 +560,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct file *filp = vma->vm_file; struct dentry *dentry = filp->f_path.dentry; unsigned pagelen; - int ret = -EINVAL; + int ret = VM_FAULT_NOPAGE; struct address_space *mapping; dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n", @@ -576,21 +576,20 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (mapping != dentry->d_inode->i_mapping) goto out_unlock; - ret = 0; pagelen = nfs_page_length(page); if (pagelen == 0) goto out_unlock; - ret = nfs_flush_incompatible(filp, page); - if (ret != 0) - goto out_unlock; + ret = VM_FAULT_LOCKED; + if (nfs_flush_incompatible(filp, page) == 0 && + nfs_updatepage(filp, page, 0, pagelen) == 0) + goto out; - ret = nfs_updatepage(filp, page, 0, pagelen); + ret = VM_FAULT_SIGBUS; out_unlock: - if (!ret) - return VM_FAULT_LOCKED; unlock_page(page); - return VM_FAULT_SIGBUS; +out: + return ret; } static const struct vm_operations_struct nfs_file_vm_ops = { @@ -697,6 +696,7 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) { struct inode *inode = filp->f_mapping->host; int status = 0; + unsigned int saved_type = fl->fl_type; /* Try local locking first */ posix_test_lock(filp, fl); @@ -704,6 +704,7 @@ static int do_getlk(struct file *filp, int cmd, struct file_lock *fl) /* found a conflict */ goto out; } + fl->fl_type = saved_type; if (nfs_have_delegation(inode, FMODE_READ)) goto out_noconflict; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 099b351..0cafbdb 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -811,9 +811,10 @@ out: return ret; } -static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) +static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) { struct nfs_inode *nfsi = NFS_I(inode); + unsigned long ret = 0; if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE) && (fattr->valid & NFS_ATTR_FATTR_CHANGE) @@ -821,25 +822,32 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->change_attr = fattr->change_attr; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; + ret |= NFS_INO_INVALID_ATTR; } /* If we have atomic WCC data, we may update some attributes */ if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME) && (fattr->valid & NFS_ATTR_FATTR_CTIME) - && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) - memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); + && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) { + memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); + ret |= NFS_INO_INVALID_ATTR; + } if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME) && (fattr->valid & NFS_ATTR_FATTR_MTIME) && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) { - memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); - if (S_ISDIR(inode->i_mode)) - nfsi->cache_validity |= NFS_INO_INVALID_DATA; + memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); + if (S_ISDIR(inode->i_mode)) + nfsi->cache_validity |= NFS_INO_INVALID_DATA; + ret |= NFS_INO_INVALID_ATTR; } if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE) && (fattr->valid & NFS_ATTR_FATTR_SIZE) && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size) - && nfsi->npages == 0) - i_size_write(inode, nfs_size_to_loff_t(fattr->size)); + && nfsi->npages == 0) { + i_size_write(inode, nfs_size_to_loff_t(fattr->size)); + ret |= NFS_INO_INVALID_ATTR; + } + return ret; } /** @@ -1153,7 +1161,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | NFS_INO_REVAL_PAGECACHE); /* Do atomic weak cache consistency updates */ - nfs_wcc_update_inode(inode, fattr); + invalid |= nfs_wcc_update_inode(inode, fattr); /* More cache consistency checks */ if (fattr->valid & NFS_ATTR_FATTR_CHANGE) { diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 59047f8..3dde50c 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -503,13 +503,13 @@ static struct rpc_procinfo mnt3_procedures[] = { static struct rpc_version mnt_version1 = { .number = 1, - .nrprocs = 2, + .nrprocs = ARRAY_SIZE(mnt_procedures), .procs = mnt_procedures, }; static struct rpc_version mnt_version3 = { .number = 3, - .nrprocs = 2, + .nrprocs = ARRAY_SIZE(mnt3_procedures), .procs = mnt3_procedures, }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 70015dd..f7b7698 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -255,9 +255,6 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, nfs4_state_mark_reclaim_nograce(clp, state); goto do_state_recovery; case -NFS4ERR_STALE_STATEID: - if (state == NULL) - break; - nfs4_state_mark_reclaim_reboot(clp, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_EXPIRED: goto do_state_recovery; @@ -1102,6 +1099,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * clear_bit(NFS_DELEGATED_STATE, &state->flags); smp_rmb(); if (state->n_rdwr != 0) { + clear_bit(NFS_O_RDWR_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate); if (ret != 0) return ret; @@ -1109,6 +1107,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * return -ESTALE; } if (state->n_wronly != 0) { + clear_bit(NFS_O_WRONLY_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate); if (ret != 0) return ret; @@ -1116,6 +1115,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * return -ESTALE; } if (state->n_rdonly != 0) { + clear_bit(NFS_O_RDONLY_STATE, &state->flags); ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate); if (ret != 0) return ret; @@ -2023,7 +2023,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) struct rpc_cred *cred; struct nfs4_state *state; struct dentry *res; - fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); + int open_flags = nd->intent.open.flags; + fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; @@ -2031,8 +2032,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if (!IS_POSIXACL(dir)) attr.ia_mode &= ~current_umask(); } else { + open_flags &= ~O_EXCL; attr.ia_valid = 0; - BUG_ON(nd->intent.open.flags & O_CREAT); + BUG_ON(open_flags & O_CREAT); } cred = rpc_lookup_cred(); @@ -2041,7 +2043,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) parent = dentry->d_parent; /* Protect against concurrent sillydeletes */ nfs_block_sillyrename(parent); - state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred); + state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred); put_rpccred(cred); if (IS_ERR(state)) { if (PTR_ERR(state) == -ENOENT) { @@ -3477,9 +3479,6 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, nfs4_state_mark_reclaim_nograce(clp, state); goto do_state_recovery; case -NFS4ERR_STALE_STATEID: - if (state == NULL) - break; - nfs4_state_mark_reclaim_reboot(clp, state); case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_EXPIRED: goto do_state_recovery; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 34acf59..53f05dd 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1111,17 +1111,14 @@ static void nfs4_reclaim_complete(struct nfs_client *clp, (void)ops->reclaim_complete(clp); } -static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) +static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) { struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) - return; - - nfs4_reclaim_complete(clp, - nfs4_reboot_recovery_ops[clp->cl_minorversion]); + return 0; for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); @@ -1135,6 +1132,14 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) } nfs_delegation_reap_unclaimed(clp); + return 1; +} + +static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) +{ + if (!nfs4_state_clear_reclaim_reboot(clp)) + return; + nfs4_reclaim_complete(clp, nfs4_reboot_recovery_ops[clp->cl_minorversion]); } static void nfs_delegation_clear_all(struct nfs_client *clp) @@ -1161,7 +1166,7 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_LEASE_MOVED: set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); - nfs4_state_end_reclaim_reboot(clp); + nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); break; case -NFS4ERR_EXPIRED: diff --git a/fs/nfs/super.c b/fs/nfs/super.c index f9df16d..381d929 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -431,7 +431,15 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf) goto out_err; error = server->nfs_client->rpc_ops->statfs(server, fh, &res); + if (unlikely(error == -ESTALE)) { + struct dentry *pd_dentry; + pd_dentry = dget_parent(dentry); + if (pd_dentry != NULL) { + nfs_zap_caches(pd_dentry->d_inode); + dput(pd_dentry); + } + } nfs_free_fattr(res.fattr); if (error < 0) goto out_err; @@ -652,6 +660,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, if (nfss->options & NFS_OPTION_FSCACHE) seq_printf(m, ",fsc"); + + if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) { + if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE) + seq_printf(m, ",lookupcache=none"); + else + seq_printf(m, ",lookupcache=pos"); + } } /* diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c index 0c6d816..7c831a2 100644 --- a/fs/nfsd/lockd.c +++ b/fs/nfsd/lockd.c @@ -38,7 +38,6 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp) exp_readlock(); nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp); fh_put(&fh); - rqstp->rq_client = NULL; exp_readunlock(); /* We return nlm error codes as nlm doesn't know * about nfsd, but nfsd does know about nlm.. diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 2a533a0..7e84a85 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -260,9 +260,11 @@ void fill_post_wcc(struct svc_fh *fhp) err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, &fhp->fh_post_attr); fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version; - if (err) + if (err) { fhp->fh_post_saved = 0; - else + /* Grab the ctime anyway - set_change_info might use it */ + fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime; + } else fhp->fh_post_saved = 1; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 59ec449..bb775a5 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -954,8 +954,8 @@ typedef __be32(*nfsd4op_func)(struct svc_rqst *, struct nfsd4_compound_state *, void *); enum nfsd4_op_flags { ALLOWED_WITHOUT_FH = 1 << 0, /* No current filehandle required */ - ALLOWED_ON_ABSENT_FS = 2 << 0, /* ops processed on absent fs */ - ALLOWED_AS_FIRST_OP = 3 << 0, /* ops reqired first in compound */ + ALLOWED_ON_ABSENT_FS = 1 << 1, /* ops processed on absent fs */ + ALLOWED_AS_FIRST_OP = 1 << 2, /* ops reqired first in compound */ }; struct nfsd4_operation { diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index ac17a70..d88539d 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -316,8 +316,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) + return status; iattr->ia_valid |= ATTR_UID; } if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { @@ -327,8 +327,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) + return status; iattr->ia_valid |= ATTR_GID; } if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { @@ -1180,8 +1180,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy * 4); - for (i = 0; i < dummy; ++i) - READ32(dummy); break; case RPC_AUTH_GSS: dprintk("RPC_AUTH_GSS callback secflavor " @@ -1197,7 +1195,6 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, READ_BUF(4); READ32(dummy); READ_BUF(dummy); - p += XDR_QUADLEN(dummy); break; default: dprintk("Illegal callback secflavor\n"); diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 3c11112..4096277 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -819,7 +819,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino) if (ra->p_count == 0) frap = rap; } - depth = nfsdstats.ra_size*11/10; + depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 4d476ff..60fce3d 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -484,18 +484,17 @@ static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) static inline void set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) { - BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); - cinfo->atomic = 1; + BUG_ON(!fhp->fh_pre_saved); + cinfo->atomic = fhp->fh_post_saved; cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); - if (cinfo->change_supported) { - cinfo->before_change = fhp->fh_pre_change; - cinfo->after_change = fhp->fh_post_change; - } else { - cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; - cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; - cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; - cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; - } + + cinfo->before_change = fhp->fh_pre_change; + cinfo->after_change = fhp->fh_post_change; + cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; + cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; + cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; + cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; + } int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index c9a30d7..d9d5d81 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -72,10 +72,9 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) /* * check to see if the page is mapped already (no holes) */ - if (PageMappedToDisk(page)) { - unlock_page(page); + if (PageMappedToDisk(page)) goto mapped; - } + if (page_has_buffers(page)) { struct buffer_head *bh, *head; int fully_mapped = 1; @@ -90,7 +89,6 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (fully_mapped) { SetPageMappedToDisk(page); - unlock_page(page); goto mapped; } } @@ -105,16 +103,18 @@ static int nilfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; ret = block_page_mkwrite(vma, vmf, nilfs_get_block); - if (unlikely(ret)) { + if (ret != VM_FAULT_LOCKED) { nilfs_transaction_abort(inode->i_sb); return ret; } + nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, + 1 << (PAGE_SHIFT - inode->i_blkbits)); nilfs_transaction_commit(inode->i_sb); mapped: SetPageChecked(page); wait_on_page_writeback(page); - return 0; + return VM_FAULT_LOCKED; } static const struct vm_operations_struct nilfs_file_vm_ops = { diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 414ef68..fbb354c 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -336,9 +336,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) list_add(&sbi->s_list, &nilfs->ns_supers); up_write(&nilfs->ns_super_sem); + err = -ENOMEM; sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size); if (!sbi->s_ifile) - return -ENOMEM; + goto delist; down_read(&nilfs->ns_segctor_sem); err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, @@ -369,6 +370,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) nilfs_mdt_destroy(sbi->s_ifile); sbi->s_ifile = NULL; + delist: down_write(&nilfs->ns_super_sem); list_del_init(&sbi->s_list); up_write(&nilfs->ns_super_sem); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index e46ca68..72f8825 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -96,8 +96,11 @@ static inline __u32 inotify_arg_to_mask(u32 arg) { __u32 mask; - /* everything should accept their own ignored and cares about children */ - mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD); + /* + * everything should accept their own ignored, cares about children, + * and should receive events when the inode is unmounted + */ + mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT); /* mask off the flags used to open the fd */ mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT)); @@ -671,6 +674,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags) if (ret >= 0) return ret; + fsnotify_put_group(group); atomic_dec(&user->inotify_devs); out_free_uid: free_uid(user); diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index da70229..a76e0aa 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle, int ocfs2_check_acl(struct inode *inode, int mask) { - struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *di_bh = NULL; + struct posix_acl *acl; + int ret = -EAGAIN; - if (IS_ERR(acl)) + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return ret; + + ret = ocfs2_read_inode_block(inode, &di_bh); + if (ret < 0) { + mlog_errno(ret); + return ret; + } + + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh); + + brelse(di_bh); + + if (IS_ERR(acl)) { + mlog_errno(PTR_ERR(acl)); return PTR_ERR(acl); + } if (acl) { - int ret = posix_acl_permission(inode, acl, mask); + ret = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return ret; } @@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle, { struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct posix_acl *acl = NULL; - int ret = 0; + int ret = 0, ret2; mode_t mode; if (!S_ISLNK(inode->i_mode)) { @@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle, mode = inode->i_mode; ret = posix_acl_create_masq(clone, &mode); if (ret >= 0) { - ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); + ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode); + if (ret2) { + mlog_errno(ret2); + ret = ret2; + goto cleanup; + } if (ret > 0) { ret = ocfs2_set_acl(handle, inode, di_bh, ACL_TYPE_ACCESS, diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 356e976..1514e27 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -578,7 +578,9 @@ bail: static void ocfs2_dio_end_io(struct kiocb *iocb, loff_t offset, ssize_t bytes, - void *private) + void *private, + int ret, + bool is_async) { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; int level; @@ -592,6 +594,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb, if (!level) up_read(&inode->i_alloc_sem); ocfs2_rw_unlock(inode, level); + + if (is_async) + aio_complete(iocb, ret, 0); } /* @@ -1034,6 +1039,12 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, ocfs2_figure_cluster_boundaries(OCFS2_SB(inode->i_sb), cpos, &cluster_start, &cluster_end); + /* treat the write as new if the a hole/lseek spanned across + * the page boundary. + */ + new = new | ((i_size_read(inode) <= page_offset(page)) && + (page_offset(page) <= user_pos)); + if (page == wc->w_target_page) { map_from = user_pos & (PAGE_CACHE_SIZE - 1); map_to = map_from + user_len; diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 94b97fc..ffb4c68 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref) atomic_dec(&dlm->res_cur_count); - dlm_put(dlm); - if (!hlist_unhashed(&res->hash_node) || !list_empty(&res->granted) || !list_empty(&res->converting) || @@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, res->migration_pending = 0; res->inflight_locks = 0; - /* put in dlm_lockres_release */ - dlm_grab(dlm); res->dlm = dlm; kref_init(&res->refs); @@ -3050,8 +3046,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, /* check for pre-existing lock */ spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres(dlm, name, namelen, hash); - spin_lock(&dlm->master_lock); - if (res) { spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { @@ -3069,14 +3063,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&res->spinlock); } + spin_lock(&dlm->master_lock); /* ignore status. only nonzero status would BUG. */ ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, namelen, migrate->new_master, migrate->master); -unlock: spin_unlock(&dlm->master_lock); +unlock: spin_unlock(&dlm->spinlock); if (oldmle) { diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 9dfaac7..aaaffbc 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, struct list_head *queue; struct dlm_lock *lock, *next; + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&res->spinlock); res->state |= DLM_LOCK_RES_RECOVERING; if (!list_empty(&res->recovering)) { mlog(0, @@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); if (res->owner == dead_node) { - if (res->state & DLM_LOCK_RES_DROPPING_REF) - mlog(0, "%s:%.*s: owned by " - "dead node %u, this node was " - "dropping its ref when it died. " - "continue, dropping the flag.\n", - dlm->name, res->lockname.len, - res->lockname.name, dead_node); - - /* the wake_up for this will happen when the - * RECOVERING flag is dropped later */ - res->state &= ~DLM_LOCK_RES_DROPPING_REF; + if (res->state & DLM_LOCK_RES_DROPPING_REF) { + mlog(ML_NOTICE, "Ignore %.*s for " + "recovery as it is being freed\n", + res->lockname.len, + res->lockname.name); + } else + dlm_move_lockres_to_recovery_list(dlm, + res); - dlm_move_lockres_to_recovery_list(dlm, res); } else if (res->owner == dlm->node_num) { dlm_free_dead_locks(dlm, res, dead_node); __dlm_lockres_calc_usage(dlm, res); diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index d4f73ca..2211acf 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res) * truly ready to be freed. */ int __dlm_lockres_unused(struct dlm_lock_resource *res) { - if (!__dlm_lockres_has_locks(res) && - (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) { - /* try not to scan the bitmap unless the first two - * conditions are already true */ - int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); - if (bit >= O2NM_MAX_NODES) { - /* since the bit for dlm->node_num is not - * set, inflight_locks better be zero */ - BUG_ON(res->inflight_locks != 0); - return 1; - } - } - return 0; + int bit; + + if (__dlm_lockres_has_locks(res)) + return 0; + + if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) + return 0; + + if (res->state & DLM_LOCK_RES_RECOVERING) + return 0; + + bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); + if (bit < O2NM_MAX_NODES) + return 0; + + /* + * since the bit for dlm->node_num is not set, inflight_locks better + * be zero + */ + BUG_ON(res->inflight_locks != 0); + return 1; } @@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, spin_unlock(&dlm->spinlock); } -static int dlm_purge_lockres(struct dlm_ctxt *dlm, +static void dlm_purge_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int master; int ret = 0; - spin_lock(&res->spinlock); - if (!__dlm_lockres_unused(res)) { - mlog(0, "%s:%.*s: tried to purge but not unused\n", - dlm->name, res->lockname.len, res->lockname.name); - __dlm_print_one_lock_resource(res); - spin_unlock(&res->spinlock); - BUG(); - } - - if (res->state & DLM_LOCK_RES_MIGRATING) { - mlog(0, "%s:%.*s: Delay dropref as this lockres is " - "being remastered\n", dlm->name, res->lockname.len, - res->lockname.name); - /* Re-add the lockres to the end of the purge list */ - if (!list_empty(&res->purge)) { - list_del_init(&res->purge); - list_add_tail(&res->purge, &dlm->purge_list); - } - spin_unlock(&res->spinlock); - return 0; - } + assert_spin_locked(&dlm->spinlock); + assert_spin_locked(&res->spinlock); master = (res->owner == dlm->node_num); - if (!master) - res->state |= DLM_LOCK_RES_DROPPING_REF; - spin_unlock(&res->spinlock); mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, res->lockname.name, master); if (!master) { + res->state |= DLM_LOCK_RES_DROPPING_REF; /* drop spinlock... retake below */ + spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); spin_lock(&res->spinlock); @@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm, mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", dlm->name, res->lockname.len, res->lockname.name, ret); spin_lock(&dlm->spinlock); + spin_lock(&res->spinlock); } - spin_lock(&res->spinlock); if (!list_empty(&res->purge)) { mlog(0, "removing lockres %.*s:%p from purgelist, " "master = %d\n", res->lockname.len, res->lockname.name, res, master); list_del_init(&res->purge); - spin_unlock(&res->spinlock); dlm_lockres_put(res); dlm->purge_count--; - } else - spin_unlock(&res->spinlock); + } + + if (!__dlm_lockres_unused(res)) { + mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n", + dlm->name, res->lockname.len, res->lockname.name); + __dlm_print_one_lock_resource(res); + BUG(); + } __dlm_unhash_lockres(res); /* lockres is not in the hash now. drop the flag and wake up * any processes waiting in dlm_get_lock_resource. */ if (!master) { - spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_DROPPING_REF; spin_unlock(&res->spinlock); wake_up(&res->wq); - } - return 0; + } else + spin_unlock(&res->spinlock); } static void dlm_run_purge_list(struct dlm_ctxt *dlm, @@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, lockres = list_entry(dlm->purge_list.next, struct dlm_lock_resource, purge); - /* Status of the lockres *might* change so double - * check. If the lockres is unused, holding the dlm - * spinlock will prevent people from getting and more - * refs on it -- there's no need to keep the lockres - * spinlock. */ spin_lock(&lockres->spinlock); - unused = __dlm_lockres_unused(lockres); - spin_unlock(&lockres->spinlock); - - if (!unused) - continue; purge_jiffies = lockres->last_used + msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); @@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, * in tail order, we can stop at the first * unpurgable resource -- anyone added after * him will have a greater last_used value */ + spin_unlock(&lockres->spinlock); break; } + /* Status of the lockres *might* change so double + * check. If the lockres is unused, holding the dlm + * spinlock will prevent people from getting and more + * refs on it. */ + unused = __dlm_lockres_unused(lockres); + if (!unused || + (lockres->state & DLM_LOCK_RES_MIGRATING)) { + mlog(0, "lockres %s:%.*s: is in use or " + "being remastered, used %d, state %d\n", + dlm->name, lockres->lockname.len, + lockres->lockname.name, !unused, lockres->state); + list_move_tail(&dlm->purge_list, &lockres->purge); + spin_unlock(&lockres->spinlock); + continue; + } + dlm_lockres_get(lockres); - /* This may drop and reacquire the dlm spinlock if it - * has to do migration. */ - if (dlm_purge_lockres(dlm, lockres)) - BUG(); + dlm_purge_lockres(dlm, lockres); dlm_lockres_put(lockres); diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index abb0a95..201e7bc 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -488,7 +488,11 @@ static int ocfs2_read_locked_inode(struct inode *inode, OCFS2_BH_IGNORE_CACHE); } else { status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh); - if (!status) + /* + * If buffer is in jbd, then its checksum may not have been + * computed as yet. + */ + if (!status && !buffer_jbd(bh)) status = ocfs2_validate_inode_block(osb->sb, bh); } if (status < 0) { diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 3ac5aa7..c90b2dd 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + le32_to_cpu(rec.r_clusters)) - cpos; /* - * If the refcount rec already exist, cool. We just need - * to check whether there is a split. Otherwise we just need - * to increase the refcount. - * If we will insert one, increases recs_add. - * * We record all the records which will be inserted to the * same refcount block, so that we can tell exactly whether * we need a new refcount block or not. + * + * If we will insert a new one, this is easy and only happens + * during adding refcounted flag to the extent, so we don't + * have a chance of spliting. We just need one record. + * + * If the refcount rec already exists, that would be a little + * complicated. we may have to: + * 1) split at the beginning if the start pos isn't aligned. + * we need 1 more record in this case. + * 2) split int the end if the end pos isn't aligned. + * we need 1 more record in this case. + * 3) split in the middle because of file system fragmentation. + * we need 2 more records in this case(we can't detect this + * beforehand, so always think of the worst case). */ if (rec.r_refcount) { + recs_add += 2; /* Check whether we need a split at the beginning. */ if (cpos == start_cpos && cpos != le64_to_cpu(rec.r_cpos)) @@ -3205,7 +3215,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, u32 num_clusters, unsigned int e_flags) { int ret, delete, index, credits = 0; - u32 new_bit, new_len; + u32 new_bit, new_len, orig_num_clusters; unsigned int set_len; struct ocfs2_super *osb = OCFS2_SB(sb); handle_t *handle; @@ -3238,6 +3248,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, goto out; } + orig_num_clusters = num_clusters; + while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, p_cluster, num_clusters, @@ -3325,7 +3337,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, * in write-back mode. */ if (context->get_clusters == ocfs2_di_get_clusters) { - ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); + ret = ocfs2_cow_sync_writeback(sb, context, cpos, + orig_num_clusters); if (ret) mlog_errno(ret); } diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c index 2dc57bc..22db114 100644 --- a/fs/ocfs2/stack_user.c +++ b/fs/ocfs2/stack_user.c @@ -191,7 +191,7 @@ static struct ocfs2_live_connection *ocfs2_connection_find(const char *name) return c; } - return c; + return NULL; } /* diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index 32499d21..9975457 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -128,7 +128,7 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry, } /* Fast symlinks can't be large */ - len = strlen(target); + len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb)); link = kzalloc(len + 1, GFP_NOFS); if (!link) { status = -ENOMEM; diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 648c9d8..5638c19 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) } vm->vblk_size = get_unaligned_be32(data + 0x08); + if (vm->vblk_size == 0) { + ldm_error ("Illegal VBLK size"); + return false; + } + vm->vblk_offset = get_unaligned_be32(data + 0x0C); vm->last_vblk_seq = get_unaligned_be32(data + 0x04); diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 74465ff..0984d92 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) int mac_partition(struct parsed_partitions *state) { - int slot = 1; Sector sect; unsigned char *data; - int blk, blocks_in_map; + int slot, blocks_in_map; unsigned secsize; #ifdef CONFIG_PPC_PMAC int found_root = 0; @@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) put_dev_sector(sect); return 0; /* not a MacOS disk */ } - printk(" [mac]"); blocks_in_map = be32_to_cpu(part->map_count); - for (blk = 1; blk <= blocks_in_map; ++blk) { - int pos = blk * secsize; + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { + put_dev_sector(sect); + return 0; + } + printk(" [mac]"); + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; put_dev_sector(sect); data = read_part_sector(state, pos/512, §); if (!data) @@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) } if (goodness > found_root_goodness) { - found_root = blk; + found_root = slot; found_root_goodness = goodness; } } #endif /* CONFIG_PPC_PMAC */ - - ++slot; } #ifdef CONFIG_PPC_PMAC if (found_root_goodness) diff --git a/fs/pipe.c b/fs/pipe.c index 279eef9..a58d7ee 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -382,7 +382,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov, error = ops->confirm(pipe, buf); if (error) { if (!ret) - error = ret; + ret = error; break; } @@ -1197,12 +1197,24 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf, return ret; } +/* + * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same + * location, so checking ->i_pipe is not enough to verify that this is a + * pipe. + */ +struct pipe_inode_info *get_pipe_info(struct file *file) +{ + struct inode *i = file->f_path.dentry->d_inode; + + return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL; +} + long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct pipe_inode_info *pipe; long ret; - pipe = file->f_path.dentry->d_inode->i_pipe; + pipe = get_pipe_info(file); if (!pipe) return -EBADF; diff --git a/fs/proc/array.c b/fs/proc/array.c index fff6572..9e5f430 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_cap(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); -#if defined(CONFIG_S390) - task_show_regs(m, task); -#endif task_context_switch_counts(m, task); return 0; } @@ -492,8 +489,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, vsize, mm ? get_mm_rss(mm) : 0, rsslim, - mm ? mm->start_code : 0, - mm ? mm->end_code : 0, + mm ? (permitted ? mm->start_code : 1) : 0, + mm ? (permitted ? mm->end_code : 1) : 0, (permitted && mm) ? mm->start_stack : 0, esp, eip, diff --git a/fs/proc/base.c b/fs/proc/base.c index acb7ef8..d073b90 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2867,11 +2867,16 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi /* for the /proc/ directory itself, after non-process stuff has been done */ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir) { - unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; - struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); + unsigned int nr; + struct task_struct *reaper; struct tgid_iter iter; struct pid_namespace *ns; + if (filp->f_pos >= PID_MAX_LIMIT + TGID_OFFSET) + goto out_no_task; + nr = filp->f_pos - FIRST_PROCESS_ENTRY; + + reaper = get_proc_task(filp->f_path.dentry->d_inode); if (!reaper) goto out_no_task; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index aea1d3f..0dfd815 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -210,6 +210,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) int flags = vma->vm_flags; unsigned long ino = 0; unsigned long long pgoff = 0; + unsigned long start; dev_t dev = 0; int len; @@ -220,8 +221,14 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } + /* We don't show the stack guard page in /proc/maps */ + start = vma->vm_start; + if (vma->vm_flags & VM_GROWSDOWN) + if (!vma_stack_continue(vma->vm_prev, vma->vm_start)) + start += PAGE_SIZE; + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", - vma->vm_start, + start, vma->vm_end, flags & VM_READ ? 'r' : '-', flags & VM_WRITE ? 'w' : '-', @@ -241,8 +248,8 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) const char *name = arch_vma_name(vma); if (!name) { if (mm) { - if (vma->vm_start <= mm->start_brk && - vma->vm_end >= mm->brk) { + if (vma->vm_start <= mm->brk && + vma->vm_end >= mm->start_brk) { name = "[heap]"; } else if (vma->vm_start <= mm->start_stack && vma->vm_end >= mm->start_stack) { diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 91c817f..2367fb3 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -163,7 +163,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer, static const struct file_operations proc_vmcore_operations = { .read = read_vmcore, - .llseek = generic_file_llseek, + .llseek = default_llseek, }; static struct vmcore* __init get_new_element(void) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 437d2ca..42b0881 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -422,7 +422,7 @@ EXPORT_SYMBOL(dquot_acquire); */ int dquot_commit(struct dquot *dquot) { - int ret = 0, ret2 = 0; + int ret = 0; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dqopt->dqio_mutex); @@ -434,15 +434,10 @@ int dquot_commit(struct dquot *dquot) spin_unlock(&dq_list_lock); /* Inactive dquot can be only if there was error during read/init * => we have better not writing it */ - if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) { + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot); - if (info_dirty(&dqopt->info[dquot->dq_type])) { - ret2 = dqopt->ops[dquot->dq_type]->write_file_info( - dquot->dq_sb, dquot->dq_type); - } - if (ret >= 0) - ret = ret2; - } + else + ret = -EIO; out_sem: mutex_unlock(&dqopt->dqio_mutex); return ret; diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index d532c20..9bfb81b 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -112,6 +112,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) SetPageDirty(page); unlock_page(page); + put_page(page); } return 0; diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index f53505d..4131f4a 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -170,6 +170,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page, int reiserfs_unpack(struct inode *inode, struct file *filp) { int retval = 0; + int depth; int index; struct page *page; struct address_space *mapping; @@ -185,11 +186,10 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) return 0; } - /* we need to make sure nobody is changing the file size beneath - ** us - */ - mutex_lock(&inode->i_mutex); - reiserfs_write_lock(inode->i_sb); + depth = reiserfs_write_lock_once(inode->i_sb); + + /* we need to make sure nobody is changing the file size beneath us */ + reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); write_from = inode->i_size & (blocksize - 1); /* if we are on a block boundary, we are already unpacked. */ @@ -224,6 +224,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) out: mutex_unlock(&inode->i_mutex); - reiserfs_write_unlock(inode->i_sb); + reiserfs_write_unlock_once(inode->i_sb, depth); return retval; } diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 536d697..90d2fcb 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -472,7 +472,9 @@ int reiserfs_acl_chmod(struct inode *inode) struct reiserfs_transaction_handle th; size_t size = reiserfs_xattr_nblocks(inode, reiserfs_acl_size(clone->a_count)); - reiserfs_write_lock(inode->i_sb); + int depth; + + depth = reiserfs_write_lock_once(inode->i_sb); error = journal_begin(&th, inode->i_sb, size * 2); if (!error) { int error2; @@ -482,7 +484,7 @@ int reiserfs_acl_chmod(struct inode *inode) if (error2) error = error2; } - reiserfs_write_unlock(inode->i_sb); + reiserfs_write_unlock_once(inode->i_sb, depth); } posix_acl_release(clone); return error; diff --git a/fs/signalfd.c b/fs/signalfd.c index f329849c..1c5a6ad 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -88,6 +88,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, err |= __put_user(kinfo->si_tid, &uinfo->ssi_tid); err |= __put_user(kinfo->si_overrun, &uinfo->ssi_overrun); err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr); + err |= __put_user(kinfo->si_int, &uinfo->ssi_int); break; case __SI_POLL: err |= __put_user(kinfo->si_band, &uinfo->ssi_band); @@ -111,6 +112,7 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo, err |= __put_user(kinfo->si_pid, &uinfo->ssi_pid); err |= __put_user(kinfo->si_uid, &uinfo->ssi_uid); err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr); + err |= __put_user(kinfo->si_int, &uinfo->ssi_int); break; default: /* diff --git a/fs/splice.c b/fs/splice.c index efdbfec..188f61a 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1323,18 +1323,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, struct pipe_inode_info *opipe, size_t len, unsigned int flags); -/* - * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same - * location, so checking ->i_pipe is not enough to verify that this is a - * pipe. - */ -static inline struct pipe_inode_info *pipe_info(struct inode *inode) -{ - if (S_ISFIFO(inode->i_mode)) - return inode->i_pipe; - - return NULL; -} /* * Determine where to splice to/from. @@ -1348,8 +1336,8 @@ static long do_splice(struct file *in, loff_t __user *off_in, loff_t offset, *off; long ret; - ipipe = pipe_info(in->f_path.dentry->d_inode); - opipe = pipe_info(out->f_path.dentry->d_inode); + ipipe = get_pipe_info(in); + opipe = get_pipe_info(out); if (ipipe && opipe) { if (off_in || off_out) @@ -1567,7 +1555,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov, int error; long ret; - pipe = pipe_info(file->f_path.dentry->d_inode); + pipe = get_pipe_info(file); if (!pipe) return -EBADF; @@ -1654,7 +1642,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, }; long ret; - pipe = pipe_info(file->f_path.dentry->d_inode); + pipe = get_pipe_info(file); if (!pipe) return -EBADF; @@ -2034,8 +2022,8 @@ static int link_pipe(struct pipe_inode_info *ipipe, static long do_tee(struct file *in, struct file *out, size_t len, unsigned int flags) { - struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode); - struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode); + struct pipe_inode_info *ipipe = get_pipe_info(in); + struct pipe_inode_info *opipe = get_pipe_info(out); int ret = -EINVAL; /* diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index 12b933a..a37d445 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c @@ -172,6 +172,11 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) length += sizeof(dirh); dir_count = le32_to_cpu(dirh.count) + 1; + + /* dir_count should never be larger than 256 */ + if (dir_count > 256) + goto failed_read; + while (dir_count--) { /* * Read directory entry. @@ -183,6 +188,10 @@ static int squashfs_readdir(struct file *file, void *dirent, filldir_t filldir) size = le16_to_cpu(dire->size) + 1; + /* size should never be larger than SQUASHFS_NAME_LEN */ + if (size > SQUASHFS_NAME_LEN) + goto failed_read; + err = squashfs_read_metadata(inode->i_sb, dire->name, &block, &offset, size); if (err < 0) diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c index 7a9464d..5d922a6 100644 --- a/fs/squashfs/namei.c +++ b/fs/squashfs/namei.c @@ -176,6 +176,11 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, length += sizeof(dirh); dir_count = le32_to_cpu(dirh.count) + 1; + + /* dir_count should never be larger than 256 */ + if (dir_count > 256) + goto data_error; + while (dir_count--) { /* * Read directory entry. @@ -187,6 +192,10 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry, size = le16_to_cpu(dire->size) + 1; + /* size should never be larger than SQUASHFS_NAME_LEN */ + if (size > SQUASHFS_NAME_LEN) + goto data_error; + err = squashfs_read_metadata(dir->i_sb, dire->name, &block, &offset, size); if (err < 0) @@ -228,6 +237,9 @@ exit_lookup: d_add(dentry, inode); return ERR_PTR(0); +data_error: + err = -EIO; + read_failure: ERROR("Unable to read directory block [%llx:%x]\n", squashfs_i(dir)->start + msblk->directory_table, diff --git a/fs/super.c b/fs/super.c index 938119a..c7765bd 100644 --- a/fs/super.c +++ b/fs/super.c @@ -305,8 +305,13 @@ retry: if (s) { up_write(&s->s_umount); destroy_super(s); + s = NULL; } down_write(&old->s_umount); + if (unlikely(!(old->s_flags & MS_BORN))) { + deactivate_locked_super(old); + goto retry; + } return old; } } @@ -909,6 +914,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void goto out_free_secdata; BUG_ON(!mnt->mnt_sb); WARN_ON(!mnt->mnt_sb->s_bdi); + mnt->mnt_sb->s_flags |= MS_BORN; error = security_sb_kern_mount(mnt->mnt_sb, flags, secdata); if (error) diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 1beaa73..cd79684 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file) char *p; p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file)); - if (p) + if (!IS_ERR(p)) memmove(last_sysfs_file, p, strlen(p) + 1); /* need attr_sd for attr and ops, its parent for kobj */ diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index 37fa7ed..de01f28 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -519,7 +519,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) size_t sz; if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX)) - goto out; + return 0; INIT_LIST_HEAD(&list); diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index c2a68ba..a0c09fc 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -961,11 +961,39 @@ void dbg_dump_index(struct ubifs_info *c) void dbg_save_space_info(struct ubifs_info *c) { struct ubifs_debug_info *d = c->dbg; - - ubifs_get_lp_stats(c, &d->saved_lst); + int freeable_cnt; spin_lock(&c->space_lock); + memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats)); + + /* + * We use a dirty hack here and zero out @c->freeable_cnt, because it + * affects the free space calculations, and UBIFS might not know about + * all freeable eraseblocks. Indeed, we know about freeable eraseblocks + * only when we read their lprops, and we do this only lazily, upon the + * need. So at any given point of time @c->freeable_cnt might be not + * exactly accurate. + * + * Just one example about the issue we hit when we did not zero + * @c->freeable_cnt. + * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the + * amount of free space in @d->saved_free + * 2. We re-mount R/W, which makes UBIFS to read the "lsave" + * information from flash, where we cache LEBs from various + * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()' + * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()' + * -> 'ubifs_get_pnode()' -> 'update_cats()' + * -> 'ubifs_add_to_cat()'). + * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt + * becomes %1. + * 4. We calculate the amount of free space when the re-mount is + * finished in 'dbg_check_space_info()' and it does not match + * @d->saved_free. + */ + freeable_cnt = c->freeable_cnt; + c->freeable_cnt = 0; d->saved_free = ubifs_get_free_space_nolock(c); + c->freeable_cnt = freeable_cnt; spin_unlock(&c->space_lock); } @@ -982,12 +1010,15 @@ int dbg_check_space_info(struct ubifs_info *c) { struct ubifs_debug_info *d = c->dbg; struct ubifs_lp_stats lst; - long long avail, free; + long long free; + int freeable_cnt; spin_lock(&c->space_lock); - avail = ubifs_calc_available(c, c->min_idx_lebs); + freeable_cnt = c->freeable_cnt; + c->freeable_cnt = 0; + free = ubifs_get_free_space_nolock(c); + c->freeable_cnt = freeable_cnt; spin_unlock(&c->space_lock); - free = ubifs_get_free_space(c); if (free != d->saved_free) { ubifs_err("free space changed from %lld to %lld", @@ -2656,19 +2687,19 @@ int dbg_debugfs_init_fs(struct ubifs_info *c) } fname = "dump_lprops"; - dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); + dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); if (IS_ERR(dent)) goto out_remove; d->dfs_dump_lprops = dent; fname = "dump_budg"; - dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); + dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); if (IS_ERR(dent)) goto out_remove; d->dfs_dump_budg = dent; fname = "dump_tnc"; - dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops); + dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops); if (IS_ERR(dent)) goto out_remove; d->dfs_dump_tnc = dent; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 12f445c..47f55ea 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1315,6 +1315,9 @@ int ubifs_fsync(struct file *file, int datasync) dbg_gen("syncing inode %lu", inode->i_ino); + if (inode->i_sb->s_flags & MS_RDONLY) + return 0; + /* * VFS has already synchronized dirty pages for this inode. Synchronize * the inode unless this is a 'datasync()' call. diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index ad7f67b..ead230e 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -1270,10 +1270,9 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip) lnum = branch->lnum; offs = branch->offs; pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_NOFS); - if (!pnode) { - err = -ENOMEM; - goto out; - } + if (!pnode) + return -ENOMEM; + if (lnum == 0) { /* * This pnode was not written which just means that the LEB diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c index 109c6ea..b712ed6 100644 --- a/fs/ubifs/recovery.c +++ b/fs/ubifs/recovery.c @@ -300,6 +300,32 @@ int ubifs_recover_master_node(struct ubifs_info *c) goto out_free; } memcpy(c->rcvrd_mst_node, c->mst_node, UBIFS_MST_NODE_SZ); + + /* + * We had to recover the master node, which means there was an + * unclean reboot. However, it is possible that the master node + * is clean at this point, i.e., %UBIFS_MST_DIRTY is not set. + * E.g., consider the following chain of events: + * + * 1. UBIFS was cleanly unmounted, so the master node is clean + * 2. UBIFS is being mounted R/W and starts changing the master + * node in the first (%UBIFS_MST_LNUM). A power cut happens, + * so this LEB ends up with some amount of garbage at the + * end. + * 3. UBIFS is being mounted R/O. We reach this place and + * recover the master node from the second LEB + * (%UBIFS_MST_LNUM + 1). But we cannot update the media + * because we are being mounted R/O. We have to defer the + * operation. + * 4. However, this master node (@c->mst_node) is marked as + * clean (since the step 1). And if we just return, the + * mount code will be confused and won't recover the master + * node when it is re-mounter R/W later. + * + * Thus, to force the recovery by marking the master node as + * dirty. + */ + c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY); } else { /* Write the recovered master node */ c->max_sqnum = le64_to_cpu(mst->ch.sqnum) - 1; diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 34640d6..f9f5567 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -275,8 +275,11 @@ xfs_end_io( xfs_finish_ioend(ioend, 0); /* ensure we don't spin on blocked ioends */ delay(1); - } else + } else { + if (ioend->io_iocb) + aio_complete(ioend->io_iocb, ioend->io_result, 0); xfs_destroy_ioend(ioend); + } } /* @@ -309,6 +312,8 @@ xfs_alloc_ioend( atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); ioend->io_offset = 0; ioend->io_size = 0; + ioend->io_iocb = NULL; + ioend->io_result = 0; INIT_WORK(&ioend->io_work, xfs_end_io); return ioend; @@ -1599,9 +1604,12 @@ xfs_end_io_direct( struct kiocb *iocb, loff_t offset, ssize_t size, - void *private) + void *private, + int ret, + bool is_async) { xfs_ioend_t *ioend = iocb->private; + bool complete_aio = is_async; /* * Non-NULL private data means we need to issue a transaction to @@ -1627,7 +1635,14 @@ xfs_end_io_direct( if (ioend->io_type == IO_READ) { xfs_finish_ioend(ioend, 0); } else if (private && size > 0) { - xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); + if (is_async) { + ioend->io_iocb = iocb; + ioend->io_result = ret; + complete_aio = false; + xfs_finish_ioend(ioend, 0); + } else { + xfs_finish_ioend(ioend, 1); + } } else { /* * A direct I/O write ioend starts it's life in unwritten @@ -1645,6 +1660,9 @@ xfs_end_io_direct( * against double-freeing. */ iocb->private = NULL; + + if (complete_aio) + aio_complete(iocb, ret, 0); } STATIC ssize_t diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index 4cfc6ea..9f566d9 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -37,6 +37,8 @@ typedef struct xfs_ioend { size_t io_size; /* size of the extent */ xfs_off_t io_offset; /* offset in the file */ struct work_struct io_work; /* xfsdatad work queue */ + struct kiocb *io_iocb; + int io_result; } xfs_ioend_t; extern const struct address_space_operations xfs_address_space_operations; diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index e59a810..0a3eb18 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -703,14 +703,19 @@ xfs_ioc_fsgeometry_v1( xfs_mount_t *mp, void __user *arg) { - xfs_fsop_geom_v1_t fsgeo; + xfs_fsop_geom_t fsgeo; int error; - error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); + error = xfs_fs_geometry(mp, &fsgeo, 3); if (error) return -error; - if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) + /* + * Caller should have passed an argument of type + * xfs_fsop_geom_v1_t. This is a proper subset of the + * xfs_fsop_geom_t that xfs_fs_geometry() fills in. + */ + if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) return -XFS_ERROR(EFAULT); return 0; } @@ -794,6 +799,8 @@ xfs_ioc_fsgetxattr( { struct fsxattr fa; + memset(&fa, 0, sizeof(struct fsxattr)); + xfs_ilock(ip, XFS_ILOCK_SHARED); fa.fsx_xflags = xfs_ip2xflags(ip); fa.fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog; diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index a51a07c..f38037d 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -711,13 +711,10 @@ xfs_inode_set_reclaim_tag( } void -__xfs_inode_clear_reclaim_tag( - xfs_mount_t *mp, +__xfs_inode_clear_reclaim( xfs_perag_t *pag, xfs_inode_t *ip) { - radix_tree_tag_clear(&pag->pag_ici_root, - XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); pag->pag_ici_reclaimable--; if (!pag->pag_ici_reclaimable) { /* clear the reclaim tag from the perag radix tree */ @@ -731,6 +728,17 @@ __xfs_inode_clear_reclaim_tag( } } +void +__xfs_inode_clear_reclaim_tag( + xfs_mount_t *mp, + xfs_perag_t *pag, + xfs_inode_t *ip) +{ + radix_tree_tag_clear(&pag->pag_ici_root, + XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); + __xfs_inode_clear_reclaim(pag, ip); +} + /* * Inodes in different states need to be treated differently, and the return * value of xfs_iflush is not sufficient to get this right. The following table diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index e28139a..e4f7e1f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -47,6 +47,7 @@ int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); void __xfs_inode_set_reclaim_tag(struct xfs_perag *pag, struct xfs_inode *ip); +void __xfs_inode_clear_reclaim(struct xfs_perag *pag, struct xfs_inode *ip); void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag, struct xfs_inode *ip); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 37a6f62..4e7f02b 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -57,6 +57,9 @@ xfs_fs_geometry( xfs_fsop_geom_t *geo, int new_version) { + + memset(geo, 0, sizeof(*geo)); + geo->blocksize = mp->m_sb.sb_blocksize; geo->rtextsize = mp->m_sb.sb_rextsize; geo->agblocks = mp->m_sb.sb_agblocks; diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index c7142a0..eb779af 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -1217,7 +1217,6 @@ xfs_imap_lookup( struct xfs_inobt_rec_incore rec; struct xfs_btree_cur *cur; struct xfs_buf *agbp; - xfs_agino_t startino; int error; int i; @@ -1231,13 +1230,13 @@ xfs_imap_lookup( } /* - * derive and lookup the exact inode record for the given agino. If the - * record cannot be found, then it's an invalid inode number and we - * should abort. + * Lookup the inode record for the given agino. If the record cannot be + * found, then it's an invalid inode number and we should abort. Once + * we have a record, we need to ensure it contains the inode number + * we are looking up. */ cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); - startino = agino & ~(XFS_IALLOC_INODES(mp) - 1); - error = xfs_inobt_lookup(cur, startino, XFS_LOOKUP_EQ, &i); + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i); if (!error) { if (i) error = xfs_inobt_get_rec(cur, &rec, &i); @@ -1250,6 +1249,11 @@ xfs_imap_lookup( if (error) return error; + /* check that the returned record contains the required inode */ + if (rec.ir_startino > agino || + rec.ir_startino + XFS_IALLOC_INODES(mp) <= agino) + return EINVAL; + /* for untrusted inodes check it is allocated first */ if ((flags & XFS_IGET_UNTRUSTED) && (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 8f8b91b..a8bb8c2 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -492,6 +492,7 @@ xfs_ireclaim( write_lock(&pag->pag_ici_lock); if (!radix_tree_delete(&pag->pag_ici_root, agino)) ASSERT(0); + __xfs_inode_clear_reclaim(pag, ip); write_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index b76a829..f702218 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1927,6 +1927,11 @@ xfs_iunlink_remove( return 0; } +/* + * A big issue when freeing the inode cluster is is that we _cannot_ skip any + * inodes that are in memory - they all must be marked stale and attached to + * the cluster buffer. + */ STATIC void xfs_ifree_cluster( xfs_inode_t *free_ip, @@ -1958,8 +1963,6 @@ xfs_ifree_cluster( } for (j = 0; j < nbufs; j++, inum += ninodes) { - int found = 0; - blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), XFS_INO_TO_AGBNO(mp, inum)); @@ -1978,7 +1981,9 @@ xfs_ifree_cluster( /* * Walk the inodes already attached to the buffer and mark them * stale. These will all have the flush locks held, so an - * in-memory inode walk can't lock them. + * in-memory inode walk can't lock them. By marking them all + * stale first, we will not attempt to lock them in the loop + * below as the XFS_ISTALE flag will be set. */ lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); while (lip) { @@ -1990,11 +1995,11 @@ xfs_ifree_cluster( &iip->ili_flush_lsn, &iip->ili_item.li_lsn); xfs_iflags_set(iip->ili_inode, XFS_ISTALE); - found++; } lip = lip->li_bio_list; } + /* * For each inode in memory attempt to add it to the inode * buffer and set it up for being staled on buffer IO @@ -2006,6 +2011,7 @@ xfs_ifree_cluster( * even trying to lock them. */ for (i = 0; i < ninodes; i++) { +retry: read_lock(&pag->pag_ici_lock); ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, (inum + i))); @@ -2016,38 +2022,36 @@ xfs_ifree_cluster( continue; } - /* don't try to lock/unlock the current inode */ + /* + * Don't try to lock/unlock the current inode, but we + * _cannot_ skip the other inodes that we did not find + * in the list attached to the buffer and are not + * already marked stale. If we can't lock it, back off + * and retry. + */ if (ip != free_ip && !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { read_unlock(&pag->pag_ici_lock); - continue; + delay(1); + goto retry; } read_unlock(&pag->pag_ici_lock); - if (!xfs_iflock_nowait(ip)) { - if (ip != free_ip) - xfs_iunlock(ip, XFS_ILOCK_EXCL); - continue; - } - + xfs_iflock(ip); xfs_iflags_set(ip, XFS_ISTALE); - if (xfs_inode_clean(ip)) { - ASSERT(ip != free_ip); - xfs_ifunlock(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - continue; - } + /* + * we don't need to attach clean inodes or those only + * with unlogged changes (which we throw away, anyway). + */ iip = ip->i_itemp; - if (!iip) { - /* inode with unlogged changes only */ + if (!iip || xfs_inode_clean(ip)) { ASSERT(ip != free_ip); ip->i_update_core = 0; xfs_ifunlock(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); continue; } - found++; iip->ili_last_fields = iip->ili_format.ilf_fields; iip->ili_format.ilf_fields = 0; @@ -2063,8 +2067,7 @@ xfs_ifree_cluster( xfs_iunlock(ip, XFS_ILOCK_EXCL); } - if (found) - xfs_trans_stale_inode_buf(tp, bp); + xfs_trans_stale_inode_buf(tp, bp); xfs_trans_binval(tp, bp); } diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index e5039a2..103f08a 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -148,13 +148,17 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) #define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a) #define ACPI_FREE(a) kfree(a) -/* Used within ACPICA to show where it is safe to preempt execution */ -#include +#ifndef CONFIG_PREEMPT +/* + * Used within ACPICA to show where it is safe to preempt execution + * when CONFIG_PREEMPT=n + */ #define ACPI_PREEMPTION_POINT() \ do { \ - if (!in_atomic_preempt_off() && !irqs_disabled()) \ + if (!irqs_disabled()) \ cond_resched(); \ } while (0) +#endif #endif /* __KERNEL__ */ diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c1b9871..3667812 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -802,7 +802,6 @@ struct drm_driver { */ int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); - void (*gem_free_object_unlocked) (struct drm_gem_object *obj); /* vga arb irq handler */ void (*vgaarb_irq)(struct drm_device *dev, bool state); @@ -1022,7 +1021,7 @@ struct drm_device { struct pci_controller *hose; #endif struct drm_sg_mem *sg; /**< Scatter gather memory */ - int num_crtcs; /**< Number of CRTCs on this device */ + unsigned int num_crtcs; /**< Number of CRTCs on this device */ void *dev_private; /**< device private data */ void *mm_private; struct address_space *dev_mapping; @@ -1149,6 +1148,7 @@ extern int drm_release(struct inode *inode, struct file *filp); extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); extern void drm_vm_open_locked(struct vm_area_struct *vma); +extern void drm_vm_close_locked(struct vm_area_struct *vma); extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); @@ -1430,7 +1430,6 @@ int drm_gem_init(struct drm_device *dev); void drm_gem_destroy(struct drm_device *dev); void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); -void drm_gem_object_free_unlocked(struct kref *kref); struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, size_t size); int drm_gem_object_init(struct drm_device *dev, @@ -1456,8 +1455,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { - if (obj != NULL) - kref_put(&obj->refcount, drm_gem_object_free_unlocked); + if (obj != NULL) { + struct drm_device *dev = obj->dev; + mutex_lock(&dev->struct_mutex); + kref_put(&obj->refcount, drm_gem_object_free); + mutex_unlock(&dev->struct_mutex); + } } int drm_gem_handle_create(struct drm_file *file_priv, diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 2d428b0..40b1f0e 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -28,7 +28,6 @@ {0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \ {0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \ {0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ - {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \ {0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ {0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ {0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ @@ -85,7 +84,6 @@ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ - {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ @@ -103,6 +101,7 @@ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ @@ -146,6 +145,8 @@ {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \ {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \ @@ -161,6 +162,7 @@ {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \ @@ -174,6 +176,7 @@ {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \ @@ -314,6 +317,7 @@ {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \ {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -324,6 +328,7 @@ {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ @@ -366,6 +371,7 @@ {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index 5cb86c3..fc48754 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h @@ -99,7 +99,6 @@ struct rxrpc_key_token { * structure of raw payloads passed to add_key() or instantiate key */ struct rxrpc_key_data_v1 { - u32 kif_version; /* 1 */ u16 security_index; u16 ticket_length; u32 expiry; /* time_t */ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 2fc8e14..9aa9bca 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -276,6 +276,7 @@ ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm_para.h \ $(srctree)/include/asm-$(SRCARCH)/kvm_para.h),) unifdef-y += kvm_para.h endif +unifdef-y += l2tp.h unifdef-y += llc.h unifdef-y += loop.h unifdef-y += lp.h diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index c809e28..074b620 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -29,6 +29,7 @@ struct linux_binprm{ char buf[BINPRM_BUF_SIZE]; #ifdef CONFIG_MMU struct vm_area_struct *vma; + unsigned long vma_pages; #else # define MAX_ARG_PAGES 32 struct page *page[MAX_ARG_PAGES]; @@ -59,6 +60,10 @@ struct linux_binprm{ unsigned long loader, exec; }; +extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages); +extern struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, + int write); + #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 09a8402..c35de76 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -325,7 +325,7 @@ struct queue_limits { unsigned char misaligned; unsigned char discard_misaligned; - unsigned char no_cluster; + unsigned char cluster; signed char discard_zeroes_data; }; @@ -448,7 +448,6 @@ struct request_queue #endif }; -#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ @@ -469,7 +468,6 @@ struct request_queue #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ - (1 << QUEUE_FLAG_CLUSTER) | \ (1 << QUEUE_FLAG_STACKABLE) | \ (1 << QUEUE_FLAG_SAME_COMP)) @@ -636,6 +634,11 @@ enum { #define rq_data_dir(rq) ((rq)->cmd_flags & 1) +static inline unsigned int blk_queue_cluster(struct request_queue *q) +{ + return q->limits.cluster; +} + /* * We regard a request as sync, if either a read or a sync write */ @@ -936,7 +939,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); -extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); +extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); @@ -1086,7 +1089,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) return q->limits.physical_block_size; } -static inline int bdev_physical_block_size(struct block_device *bdev) +static inline unsigned int bdev_physical_block_size(struct block_device *bdev) { return queue_physical_block_size(bdev_get_queue(bdev)); } diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 266ab92..499dfe9 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h @@ -105,6 +105,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, #define alloc_bootmem(x) \ __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) +#define alloc_bootmem_align(x, align) \ + __alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_nopanic(x) \ __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) #define alloc_bootmem_pages(x) \ diff --git a/include/linux/compat.h b/include/linux/compat.h index 168f7da..c766441 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type, const struct compat_iovec __user *uvector, unsigned long nr_segs, unsigned long fast_segs, struct iovec *fast_pointer, struct iovec **ret_pointer); + +extern void __user *compat_alloc_user_space(unsigned long len); + #endif /* CONFIG_COMPAT */ #endif /* _LINUX_COMPAT_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 151f5d7..69b43db 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -91,6 +91,54 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */ #define BPF_TAX 0x00 #define BPF_TXA 0x80 +enum { + BPF_S_RET_K = 0, + BPF_S_RET_A, + BPF_S_ALU_ADD_K, + BPF_S_ALU_ADD_X, + BPF_S_ALU_SUB_K, + BPF_S_ALU_SUB_X, + BPF_S_ALU_MUL_K, + BPF_S_ALU_MUL_X, + BPF_S_ALU_DIV_X, + BPF_S_ALU_AND_K, + BPF_S_ALU_AND_X, + BPF_S_ALU_OR_K, + BPF_S_ALU_OR_X, + BPF_S_ALU_LSH_K, + BPF_S_ALU_LSH_X, + BPF_S_ALU_RSH_K, + BPF_S_ALU_RSH_X, + BPF_S_ALU_NEG, + BPF_S_LD_W_ABS, + BPF_S_LD_H_ABS, + BPF_S_LD_B_ABS, + BPF_S_LD_W_LEN, + BPF_S_LD_W_IND, + BPF_S_LD_H_IND, + BPF_S_LD_B_IND, + BPF_S_LD_IMM, + BPF_S_LDX_W_LEN, + BPF_S_LDX_B_MSH, + BPF_S_LDX_IMM, + BPF_S_MISC_TAX, + BPF_S_MISC_TXA, + BPF_S_ALU_DIV_K, + BPF_S_LD_MEM, + BPF_S_LDX_MEM, + BPF_S_ST, + BPF_S_STX, + BPF_S_JMP_JA, + BPF_S_JMP_JEQ_K, + BPF_S_JMP_JEQ_X, + BPF_S_JMP_JGE_K, + BPF_S_JMP_JGE_X, + BPF_S_JMP_JGT_K, + BPF_S_JMP_JGT_X, + BPF_S_JMP_JSET_K, + BPF_S_JMP_JSET_X, +}; + #ifndef BPF_MAXINSNS #define BPF_MAXINSNS 4096 #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index 68ca1b0..509ca14 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -145,11 +145,11 @@ struct inodes_stat_t { * */ #define RW_MASK 1 -#define RWA_MASK 2 +#define RWA_MASK 16 #define READ 0 #define WRITE 1 -#define READA 2 /* read-ahead - don't block if no resources */ -#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ +#define READA 16 /* readahead - don't block if no resources */ +#define SWRITE 17 /* for ll_rw_block(), wait for buffer lock */ #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) #define READ_META (READ | (1 << BIO_RW_META)) #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) @@ -209,6 +209,7 @@ struct inodes_stat_t { #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define MS_I_VERSION (1<<23) /* Update inode I_version field */ #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ +#define MS_BORN (1<<29) #define MS_ACTIVE (1<<30) #define MS_NOUSER (1<<31) @@ -415,7 +416,8 @@ struct buffer_head; typedef int (get_block_t)(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, - ssize_t bytes, void *private); + ssize_t bytes, void *private, int ret, + bool is_async); /* * Attribute flags. These should be or-ed together to figure out what diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 41e4633..59c3939 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -423,6 +423,7 @@ extern void unregister_ftrace_graph(void); extern void ftrace_graph_init_task(struct task_struct *t); extern void ftrace_graph_exit_task(struct task_struct *t); +extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); static inline int task_curr_ret_stack(struct task_struct *t) { @@ -446,6 +447,7 @@ static inline void unpause_graph_tracing(void) static inline void ftrace_graph_init_task(struct task_struct *t) { } static inline void ftrace_graph_exit_task(struct task_struct *t) { } +static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 975609c..81483c2 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -339,7 +339,7 @@ void drain_local_pages(void *dummy); extern gfp_t gfp_allowed_mask; -extern void set_gfp_allowed_mask(gfp_t mask); -extern gfp_t clear_gfp_allowed_mask(gfp_t mask); +extern void pm_restrict_gfp_mask(void); +extern void pm_restore_gfp_mask(void); #endif /* __LINUX_GFP_H */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b3876..ff43e92 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -64,6 +64,8 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) + #ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) @@ -82,10 +84,13 @@ /* * Are we doing bottom half or hardware interrupt processing? * Are we in a softirq context? Interrupt context? + * in_softirq - Are we currently processing softirq or have bh disabled? + * in_serving_softirq - Are we currently processing softirq? */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) /* * Are we in NMI context? @@ -132,10 +137,12 @@ extern void synchronize_irq(unsigned int irq); struct task_struct; -#ifndef CONFIG_VIRT_CPU_ACCOUNTING +#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) static inline void account_system_vtime(struct task_struct *tsk) { } +#else +extern void account_system_vtime(struct task_struct *tsk); #endif #if defined(CONFIG_NO_HZ) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 97b2eae..731854c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -959,7 +959,7 @@ struct ieee80211_ht_info { /* block-ack parameters */ #define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 #define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C -#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFA0 +#define IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0 #define IEEE80211_DELBA_PARAM_TID_MASK 0xF000 #define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800 diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h new file mode 100644 index 0000000..1d19ab2 --- /dev/null +++ b/include/linux/intel-gtt.h @@ -0,0 +1,20 @@ +/* + * Common Intel AGPGART and GTT definitions. + */ +#ifndef _INTEL_GTT_H +#define _INTEL_GTT_H + +#include + +/* This is for Intel only GTT controls. + * + * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only + */ + +#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) +#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) + +/* flag for GFDT type */ +#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) + +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c233113..a0384a4 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -53,16 +53,21 @@ * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. + * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend + * */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 -#define IRQF_TIMER 0x00000200 +#define __IRQF_TIMER 0x00000200 #define IRQF_PERCPU 0x00000400 #define IRQF_NOBALANCING 0x00000800 #define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 +#define IRQF_NO_SUSPEND 0x00004000 + +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) /* * Bits used by threaded handlers: diff --git a/include/linux/klist.h b/include/linux/klist.h index e91a4e5..a370ce5 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -22,7 +22,7 @@ struct klist { struct list_head k_list; void (*get)(struct klist_node *); void (*put)(struct klist_node *); -} __attribute__ ((aligned (4))); +} __attribute__ ((aligned (sizeof(void *)))); #define KLIST_INIT(_name, _get, _put) \ { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \ diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 43bdab7..e809eb1 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -16,6 +16,9 @@ struct stable_node; struct mem_cgroup; +struct page *ksm_does_need_to_copy(struct page *page, + struct vm_area_struct *vma, unsigned long address); + #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); @@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, * but what if the vma was unmerged while the page was swapped out? */ -struct page *ksm_does_need_to_copy(struct page *page, - struct vm_area_struct *vma, unsigned long address); -static inline struct page *ksm_might_need_to_copy(struct page *page, +static inline int ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct anon_vma *anon_vma = page_anon_vma(page); - if (!anon_vma || - (anon_vma == vma->anon_vma && - page->index == linear_page_index(vma, address))) - return page; - - return ksm_does_need_to_copy(page, vma, address); + return anon_vma && + (anon_vma != vma->anon_vma || + page->index != linear_page_index(vma, address)); } int page_referenced_ksm(struct page *page, @@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; } -static inline struct page *ksm_might_need_to_copy(struct page *page, +static inline int ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) { - return page; + return 0; } static inline int page_referenced_ksm(struct page *page, diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7cb116a..f24d8fc 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -123,6 +123,7 @@ struct kvm_memory_slot { } *lpage_info[KVM_NR_PAGE_SIZES - 1]; unsigned long userspace_addr; int user_alloc; + int id; }; static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) diff --git a/include/linux/libata.h b/include/linux/libata.h index b85f3ff..8a9b4cf 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -335,6 +335,7 @@ enum { ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ ATA_EHI_QUIET = (1 << 3), /* be quiet */ + ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index 5c51f36..add8a1b 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -29,7 +29,7 @@ struct wm8994_ldo_pdata { #define WM8994_CONFIGURE_GPIO 0x8000 #define WM8994_DRC_REGS 5 -#define WM8994_EQ_REGS 19 +#define WM8994_EQ_REGS 20 /** * DRC configurations are specified with a label and a set of register diff --git a/include/linux/mm.h b/include/linux/mm.h index a2b4804..a724d89 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp); #define VM_MAYSHARE 0x00000080 #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) #define VM_GROWSUP 0x00000200 +#else +#define VM_GROWSUP 0x00000000 +#endif #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ @@ -859,6 +863,12 @@ int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); +/* Is the vma a continuation of the stack vma above it? */ +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +{ + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +} + extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len); @@ -1329,8 +1339,10 @@ unsigned long ra_submit(struct file_ra_state *ra, /* Do stack extension */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -#ifdef CONFIG_IA64 +#if VM_GROWSUP extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#else + #define expand_upwards(vma, address) do { } while (0) #endif extern int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b8bb9a6..ee7e258 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -134,7 +134,7 @@ struct vm_area_struct { within vm_mm. */ /* linked list of VM areas per task, sorted by address */ - struct vm_area_struct *vm_next; + struct vm_area_struct *vm_next, *vm_prev; pgprot_t vm_page_prot; /* Access permissions of this VMA. */ unsigned long vm_flags; /* Flags, see mm.h. */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f65913c..513ff03 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -124,6 +124,7 @@ struct mmc_host { unsigned int f_min; unsigned int f_max; u32 ocr_avail; + struct notifier_block pm_notify; #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ @@ -183,6 +184,7 @@ struct mmc_host { /* Only used with MMC_CAP_DISABLE */ int enabled; /* host is enabled */ + int rescan_disable; /* disable card detection */ int nesting_cnt; /* "enable" nesting count */ int en_dis_recurs; /* detect recursion */ unsigned int disable_delay; /* disable delay in msecs */ @@ -257,6 +259,7 @@ int mmc_card_can_sleep(struct mmc_host *host); int mmc_host_enable(struct mmc_host *host); int mmc_host_disable(struct mmc_host *host); int mmc_host_lazy_disable(struct mmc_host *host); +int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); static inline void mmc_set_disable_delay(struct mmc_host *host, unsigned int disable_delay) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b4d109e..1e3d0b4 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -284,6 +284,13 @@ struct zone { unsigned long watermark[NR_WMARK]; /* + * When free pages are below this point, additional steps are taken + * when reading the number of free pages to avoid per-cpu counter + * drift allowing watermarks to be breached + */ + unsigned long percpu_drift_mark; + + /* * We don't know if the memory that we're going to allocate will be freeable * or/and it will be released eventually, so to avoid totally wasting several * GB of ram we must reserve some of the lower zone memory (otherwise we risk @@ -655,7 +662,9 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free); void build_all_zonelists(void *data); void wakeup_kswapd(struct zone *zone, int order); -int zone_watermark_ok(struct zone *z, int order, unsigned long mark, +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags); +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags); enum memmap_context { MEMMAP_EARLY, diff --git a/include/linux/msi.h b/include/linux/msi.h index 6991ab5..91b05c1 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -14,8 +14,10 @@ struct irq_desc; extern void mask_msi_irq(unsigned int irq); extern void unmask_msi_irq(unsigned int irq); extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); +extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); +extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); struct msi_desc { diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index f43e9b4..23cc10f 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h @@ -92,7 +92,7 @@ struct flchip { /* This is used to handle contention on write/erase operations between partitions of the same physical chip. */ struct flchip_shared { - spinlock_t lock; + struct mutex lock; struct flchip *writing; struct flchip *erasing; }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b21e405..e857c94 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1775,6 +1775,8 @@ extern void netif_carrier_on(struct net_device *dev); extern void netif_carrier_off(struct net_device *dev); +extern void netif_notify_peers(struct net_device *dev); + /** * netif_dormant_on - mark device as dormant. * @dev: network device @@ -2342,6 +2344,10 @@ do { \ }) #endif +#define MODULE_ALIAS_NETDEV(device) \ + MODULE_ALIAS("netdev-" device) + + #endif /* __KERNEL__ */ #endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 89341c3..03317c8 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -215,7 +215,7 @@ NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sk_buff *skb, int ret; if (!cond || - (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1)) + ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1)) ret = okfn(skb); return ret; } diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 540703b..b2f1a4d 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret) #define NETDEV_POST_INIT 0x0010 #define NETDEV_UNREGISTER_BATCH 0x0011 #define NETDEV_BONDING_DESLAVE 0x0012 +#define NETDEV_NOTIFY_PEERS 0x0013 #define SYS_DOWN 0x0001 /* Notify of system down */ #define SYS_RESTART SYS_DOWN diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 3bedcc1..f1cbd85 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -393,6 +393,9 @@ #define PCI_DEVICE_ID_VLSI_82C147 0x0105 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 +/* AMD RD890 Chipset */ +#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 + #define PCI_VENDOR_ID_ADL 0x1005 #define PCI_DEVICE_ID_ADL_2301 0x2301 @@ -2035,6 +2038,7 @@ #define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 #define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 +#define PCI_VENDOR_ID_BCM_GVC 0x14a4 #define PCI_VENDOR_ID_BROADCOM 0x14e4 #define PCI_DEVICE_ID_TIGON3_5752 0x1600 #define PCI_DEVICE_ID_TIGON3_5752M 0x1601 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5d0266d..bfacc8e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -764,6 +764,7 @@ struct perf_event_context { int nr_active; int is_active; int nr_stat; + int rotate_disable; atomic_t refcount; struct task_struct *task; diff --git a/include/linux/pid.h b/include/linux/pid.h index 49f1c2f..ec9f2df 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -117,7 +117,7 @@ extern struct pid *find_vpid(int nr); */ extern struct pid *find_get_pid(int nr); extern struct pid *find_ge_pid(int nr, struct pid_namespace *); -int next_pidmap(struct pid_namespace *pid_ns, int last); +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); extern struct pid *alloc_pid(struct pid_namespace *ns); extern void free_pid(struct pid *pid); diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 4457969..bb27d7e 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -160,5 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ long pipe_fcntl(struct file *, unsigned int, unsigned long arg); +struct pipe_inode_info *get_pipe_info(struct file *file); #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 6e81888..5ea4b15 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -67,7 +67,8 @@ static inline void device_set_run_wake(struct device *dev, bool enable) static inline bool pm_runtime_suspended(struct device *dev) { - return dev->power.runtime_status == RPM_SUSPENDED; + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; } #else /* !CONFIG_PM_RUNTIME */ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 55ca73c..8a8de14 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -36,17 +36,6 @@ * RCU. */ #define RADIX_TREE_INDIRECT_PTR 1 -#define RADIX_TREE_RETRY ((void *)-1UL) - -static inline void *radix_tree_ptr_to_indirect(void *ptr) -{ - return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); -} - -static inline void *radix_tree_indirect_to_ptr(void *ptr) -{ - return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); -} static inline int radix_tree_is_indirect_ptr(void *ptr) { @@ -138,16 +127,29 @@ do { \ * removed. * * For use with radix_tree_lookup_slot(). Caller must hold tree at least read - * locked across slot lookup and dereference. More likely, will be used with - * radix_tree_replace_slot(), as well, so caller will hold tree write locked. + * locked across slot lookup and dereference. Not required if write lock is + * held (ie. items cannot be concurrently inserted). + * + * radix_tree_deref_retry must be used to confirm validity of the pointer if + * only the read lock is held. */ static inline void *radix_tree_deref_slot(void **pslot) { - void *ret = rcu_dereference(*pslot); - if (unlikely(radix_tree_is_indirect_ptr(ret))) - ret = RADIX_TREE_RETRY; - return ret; + return rcu_dereference(*pslot); } + +/** + * radix_tree_deref_retry - check radix_tree_deref_slot + * @arg: pointer returned by radix_tree_deref_slot + * Returns: 0 if retry is not required, otherwise retry is required + * + * radix_tree_deref_retry must be used with radix_tree_deref_slot. + */ +static inline int radix_tree_deref_retry(void *arg) +{ + return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR); +} + /** * radix_tree_replace_slot - replace item in a slot * @pslot: pointer to slot, returned by radix_tree_lookup_slot diff --git a/include/linux/sched.h b/include/linux/sched.h index 0478888..fc531a7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -143,7 +143,7 @@ extern unsigned long nr_iowait_cpu(int cpu); extern unsigned long this_cpu_load(void); -extern void calc_global_load(void); +extern void calc_global_load(unsigned long ticks); extern unsigned long get_parent_ip(unsigned long addr); @@ -274,17 +274,11 @@ extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); extern int get_nohz_load_balancer(void); -extern int nohz_ratelimit(int cpu); #else static inline int select_nohz_load_balancer(int cpu) { return 0; } - -static inline int nohz_ratelimit(int cpu) -{ - return 0; -} #endif /* @@ -862,6 +856,7 @@ struct sched_group { * single CPU. */ unsigned int cpu_power; + unsigned int group_weight; /* * The CPUs this group covers. @@ -1082,7 +1077,7 @@ struct sched_class { struct task_struct *task); #ifdef CONFIG_FAIR_GROUP_SCHED - void (*moved_group) (struct task_struct *p, int on_rq); + void (*task_move_group) (struct task_struct *p, int on_rq); #endif }; @@ -1691,8 +1686,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * Per process flags */ -#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ - /* Not implemented yet, only for 486*/ +#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ @@ -1830,6 +1824,19 @@ extern void sched_clock_idle_wakeup_event(u64 delta_ns); */ extern unsigned long long cpu_clock(int cpu); +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + extern unsigned long long task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); @@ -2369,9 +2376,9 @@ extern int __cond_resched_lock(spinlock_t *lock); extern int __cond_resched_softirq(void); -#define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ - __cond_resched_softirq(); \ +#define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ }) /* diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f89e7fd..eb674b7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -169,6 +169,7 @@ struct skb_shared_hwtstamps { * @software: generate software time stamp * @in_progress: device driver is going to provide * hardware time stamp + * @prevent_sk_orphan: make sk reference available on driver level * @flags: all shared_tx flags * * These flags are attached to packets as part of the @@ -178,7 +179,8 @@ union skb_shared_tx { struct { __u8 hardware:1, software:1, - in_progress:1; + in_progress:1, + prevent_sk_orphan:1; }; __u8 flags; }; diff --git a/include/linux/socket.h b/include/linux/socket.h index 032a19e..a2fada9 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -24,6 +24,9 @@ struct __kernel_sockaddr_storage { #include /* pid_t */ #include /* __user */ +struct pid; +struct cred; + #define __sockaddr_check_size(size) \ BUILD_BUG_ON(((size) > sizeof(struct __kernel_sockaddr_storage))) @@ -309,6 +312,8 @@ struct ucred { #define IPX_TYPE 1 #ifdef __KERNEL__ +extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); + extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, int offset, int len); diff --git a/include/linux/swap.h b/include/linux/swap.h index ff4acea..11c1e47 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -19,6 +19,7 @@ struct bio; #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_PRIO_SHIFT 0 +#define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ static inline int current_is_kswapd(void) { @@ -142,7 +143,7 @@ struct swap_extent { enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ - SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ + SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ diff --git a/include/linux/tipc.h b/include/linux/tipc.h index 181c8d0..d10614b 100644 --- a/include/linux/tipc.h +++ b/include/linux/tipc.h @@ -127,17 +127,23 @@ static inline unsigned int tipc_node(__u32 addr) * TIPC topology subscription service definitions */ -#define TIPC_SUB_SERVICE 0x00 /* Filter for service availability */ -#define TIPC_SUB_PORTS 0x01 /* Filter for port availability */ -#define TIPC_SUB_CANCEL 0x04 /* Cancel a subscription */ +#define TIPC_SUB_PORTS 0x01 /* filter for port availability */ +#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */ +#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */ +#if 0 +/* The following filter options are not currently implemented */ +#define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */ +#define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */ +#define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */ +#endif #define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */ struct tipc_subscr { - struct tipc_name_seq seq; /* NBO. Name sequence of interest */ - __u32 timeout; /* NBO. Subscription duration (in ms) */ - __u32 filter; /* NBO. Bitmask of filter options */ - char usr_handle[8]; /* Opaque. Available for subscriber use */ + struct tipc_name_seq seq; /* name sequence of interest */ + __u32 timeout; /* subscription duration (in ms) */ + __u32 filter; /* bitmask of filter options */ + char usr_handle[8]; /* available for subscriber use */ }; #define TIPC_PUBLISHED 1 /* publication event */ @@ -145,11 +151,11 @@ struct tipc_subscr { #define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ struct tipc_event { - __u32 event; /* NBO. Event type, as defined above */ - __u32 found_lower; /* NBO. Matching name seq instances */ - __u32 found_upper; /* " " " " " */ - struct tipc_portid port; /* NBO. Associated port */ - struct tipc_subscr s; /* Original, associated subscription */ + __u32 event; /* event type */ + __u32 found_lower; /* matching name seq instances */ + __u32 found_upper; /* " " " " */ + struct tipc_portid port; /* associated port */ + struct tipc_subscr s; /* associated subscription */ }; /* diff --git a/include/linux/tty.h b/include/linux/tty.h index 931078b..7802a24 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -552,6 +552,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk, } #endif +/* tty_io.c */ +extern int __init tty_init(void); + /* tty_ioctl.c */ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 16b7f33..3e93de7 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -26,4 +26,8 @@ and can't handle talking to these interfaces */ #define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 +/* device needs a pause during initialization, after we read the device + descriptor */ +#define USB_QUIRK_DELAY_INIT 0x00000040 + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 84a4c44..8ab6142 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -346,6 +346,9 @@ extern int usb_serial_handle_sysrq_char(struct tty_struct *tty, struct usb_serial_port *port, unsigned int ch); extern int usb_serial_handle_break(struct usb_serial_port *port); +extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, + unsigned int status); extern int usb_serial_bus_register(struct usb_serial_driver *device); diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index cc4f453..8178156 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -36,6 +36,9 @@ static inline void put_user_ns(struct user_namespace *ns) kref_put(&ns->kref, free_user_ns); } +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid); +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid); + #else static inline struct user_namespace *get_user_ns(struct user_namespace *ns) @@ -52,6 +55,17 @@ static inline void put_user_ns(struct user_namespace *ns) { } +static inline uid_t user_ns_map_uid(struct user_namespace *to, + const struct cred *cred, uid_t uid) +{ + return uid; +} +static inline gid_t user_ns_map_gid(struct user_namespace *to, + const struct cred *cred, gid_t gid) +{ + return gid; +} + #endif #endif /* _LINUX_USER_H */ diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7f43ccd..e4cc21c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, return x; } +/* + * More accurate version that also considers the currently pending + * deltas. For that we need to loop over all cpus to find the current + * deltas. There is no synchronization so the result cannot be + * exactly accurate either. + */ +static inline unsigned long zone_page_state_snapshot(struct zone *zone, + enum zone_stat_item item) +{ + long x = atomic_long_read(&zone->vm_stat[item]); + +#ifdef CONFIG_SMP + int cpu; + for_each_online_cpu(cpu) + x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; + + if (x < 0) + x = 0; +#endif + return x; +} + extern unsigned long global_reclaimable_pages(void); extern unsigned long zone_reclaimable_pages(struct zone *zone); @@ -232,6 +254,8 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); +void reduce_pgdat_percpu_threshold(pg_data_t *pgdat); +void restore_pgdat_percpu_threshold(pg_data_t *pgdat); #else /* CONFIG_SMP */ /* @@ -276,6 +300,9 @@ static inline void __dec_zone_page_state(struct page *page, #define dec_zone_page_state __dec_zone_page_state #define mod_zone_page_state __mod_zone_page_state +static inline void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) { } +static inline void restore_pgdat_percpu_threshold(pg_data_t *pgdat) { } + static inline void refresh_cpu_vm_stats(int cpu) { } #endif diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 20725e2..18e5c3f 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -10,6 +10,7 @@ extern void unix_inflight(struct file *fp); extern void unix_notinflight(struct file *fp); extern void unix_gc(void); extern void wait_for_unix_gc(void); +extern struct sock *unix_get_socket(struct file *filp); #define UNIX_HASH_SIZE 256 @@ -23,7 +24,8 @@ struct unix_address { }; struct unix_skb_parms { - struct ucred creds; /* Skb credentials */ + struct pid *pid; /* Skb credentials */ + const struct cred *cred; struct scm_fp_list *fp; /* Passed files */ #ifdef CONFIG_SECURITY_NETWORK u32 secid; /* Security ID */ @@ -31,7 +33,6 @@ struct unix_skb_parms { }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) -#define UNIXCREDS(skb) (&UNIXCB((skb)).creds) #define UNIXSID(skb) (&UNIXCB((skb)).secid) #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) @@ -56,6 +57,7 @@ struct unix_sock { spinlock_t lock; unsigned int gc_candidate : 1; unsigned int gc_maybe_cycle : 1; + unsigned char recursion_level; struct socket_wq peer_wq; }; #define unix_sk(__sk) ((struct unix_sock *)__sk) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index de22cbf..323eca1 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -287,6 +287,9 @@ struct ieee80211_bss_conf { * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this * frame and selects the maximum number of streams that it can use. + * + * Note: If you have to add new flags to the enumeration, then don't + * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. */ enum mac80211_tx_control_flags { IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), @@ -315,6 +318,19 @@ enum mac80211_tx_control_flags { #define IEEE80211_TX_CTL_STBC_SHIFT 23 }; +/* + * This definition is used as a mask to clear all temporary flags, which are + * set by the tx handlers for each transmission attempt by the mac80211 stack. + */ +#define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK | \ + IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT | \ + IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \ + IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \ + IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \ + IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_PSPOLL_RESPONSE | \ + IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \ + IEEE80211_TX_CTL_STBC) + /** * enum mac80211_rate_control_flags - per-rate flags set by the * Rate Control algorithm. diff --git a/include/net/netlink.h b/include/net/netlink.h index 4fc05b5..28c27d1 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -384,7 +384,7 @@ static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, * * Returns the first attribute which matches the specified type. */ -static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh, +static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, int hdrlen, int attrtype) { return nla_find(nlmsg_attrdata(nlh, hdrlen), diff --git a/include/net/scm.h b/include/net/scm.h index 8360e47..096286d 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -19,6 +19,8 @@ struct scm_fp_list { }; struct scm_cookie { + struct pid *pid; /* Skb credentials */ + const struct cred *cred; struct ucred creds; /* Skb credentials */ struct scm_fp_list *fp; /* Passed files */ #ifdef CONFIG_SECURITY_NETWORK @@ -42,8 +44,27 @@ static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_co { } #endif /* CONFIG_SECURITY_NETWORK */ +static __inline__ void scm_set_cred(struct scm_cookie *scm, + struct pid *pid, const struct cred *cred) +{ + scm->pid = get_pid(pid); + scm->cred = get_cred(cred); + cred_to_ucred(pid, cred, &scm->creds); +} + +static __inline__ void scm_destroy_cred(struct scm_cookie *scm) +{ + put_pid(scm->pid); + scm->pid = NULL; + + if (scm->cred) + put_cred(scm->cred); + scm->cred = NULL; +} + static __inline__ void scm_destroy(struct scm_cookie *scm) { + scm_destroy_cred(scm); if (scm && scm->fp) __scm_destroy(scm); } @@ -51,10 +72,7 @@ static __inline__ void scm_destroy(struct scm_cookie *scm) static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) { - struct task_struct *p = current; - scm->creds.uid = current_uid(); - scm->creds.gid = current_gid(); - scm->creds.pid = task_tgid_vnr(p); + scm_set_cred(scm, task_tgid(current), current_cred()); scm->fp = NULL; unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) @@ -96,6 +114,8 @@ static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, if (test_bit(SOCK_PASSCRED, &sock->flags)) put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); + scm_destroy_cred(scm); + scm_passec(sock, msg, scm); if (!scm->fp) diff --git a/include/net/sock.h b/include/net/sock.h index 0a691ea..a4089b3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -749,6 +749,7 @@ struct proto { /* Keeping track of sk's, looking them up, and port selection methods. */ void (*hash)(struct sock *sk); void (*unhash)(struct sock *sk); + void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); /* Keeping track of sockets in use */ @@ -1150,6 +1151,8 @@ extern void sk_common_release(struct sock *sk); /* Initialise core socket variables */ extern void sock_init_data(struct socket *sock, struct sock *sk); +extern void sk_filter_release_rcu(struct rcu_head *rcu); + /** * sk_filter_release - release a socket filter * @fp: filter to remove @@ -1160,7 +1163,7 @@ extern void sock_init_data(struct socket *sock, struct sock *sk); static inline void sk_filter_release(struct sk_filter *fp) { if (atomic_dec_and_test(&fp->refcnt)) - kfree(fp); + call_rcu_bh(&fp->rcu, sk_filter_release_rcu); } static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) diff --git a/include/net/tcp.h b/include/net/tcp.h index a144914..e824cf0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) return seq3 - seq2 >= seq1 - seq2; } -static inline int tcp_too_many_orphans(struct sock *sk, int num) +static inline bool tcp_too_many_orphans(struct sock *sk, int shift) { - return (num > sysctl_tcp_max_orphans) || - (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && - atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); + struct percpu_counter *ocp = sk->sk_prot->orphan_count; + int orphans = percpu_counter_read_positive(ocp); + + if (orphans << shift > sysctl_tcp_max_orphans) { + orphans = percpu_counter_sum_positive(ocp); + if (orphans << shift > sysctl_tcp_max_orphans) + return true; + } + + if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && + atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) + return true; + return false; } /* syncookies: remember time of last synqueue overflow */ @@ -509,8 +519,22 @@ extern unsigned int tcp_current_mss(struct sock *sk); /* Bound MSS / TSO packet size with the half of the window */ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) { - if (tp->max_window && pktsize > (tp->max_window >> 1)) - return max(tp->max_window >> 1, 68U - tp->tcp_header_len); + int cutoff; + + /* When peer uses tiny windows, there is no use in packetizing + * to sub-MSS pieces for the sake of SWS or making sure there + * are enough packets in the pipe for fast recovery. + * + * On the other hand, for extremely large MSS devices, handling + * smaller than MSS windows in this way does make sense. + */ + if (tp->max_window >= 512) + cutoff = (tp->max_window >> 1); + else + cutoff = tp->max_window; + + if (cutoff && pktsize > cutoff) + return max_t(int, cutoff, 68U - tp->tcp_header_len); else return pktsize; } diff --git a/include/net/udp.h b/include/net/udp.h index 5348d80..4201dc8 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk) } extern void udp_lib_unhash(struct sock *sk); +extern void udp_lib_rehash(struct sock *sk, u16 new_hash); static inline void udp_lib_close(struct sock *sk, long timeout) { diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index d80b6db..558fa2f 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -451,7 +451,7 @@ static inline int scsi_device_qas(struct scsi_device *sdev) } static inline int scsi_device_enclosure(struct scsi_device *sdev) { - return sdev->inquiry[6] & (1<<6); + return sdev->inquiry ? (sdev->inquiry[6] & (1<<6)) : 1; } static inline int scsi_device_protection(struct scsi_device *sdev) diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 6a664c3..7dc97d1 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -1707,6 +1707,7 @@ struct snd_emu10k1 { unsigned int card_type; /* EMU10K1_CARD_* */ unsigned int ecard_ctrl; /* ecard control bits */ unsigned long dma_mask; /* PCI DMA mask */ + unsigned int delay_pcm_irq; /* in samples */ int max_cache_pages; /* max memory size / PAGE_SIZE */ struct snd_dma_buffer silent_page; /* silent page */ struct snd_dma_buffer ptb_pages; /* page table pages */ diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 66ff4c1..f902141 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -46,25 +46,25 @@ /* platform domain */ #define SND_SOC_DAPM_INPUT(wname) \ { .id = snd_soc_dapm_input, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0} + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_OUTPUT(wname) \ { .id = snd_soc_dapm_output, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0} + .num_kcontrols = 0, .reg = SND_SOC_NOPM } #define SND_SOC_DAPM_MIC(wname, wevent) \ { .id = snd_soc_dapm_mic, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} #define SND_SOC_DAPM_HP(wname, wevent) \ { .id = snd_soc_dapm_hp, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_SPK(wname, wevent) \ { .id = snd_soc_dapm_spk, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_LINE(wname, wevent) \ { .id = snd_soc_dapm_line, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD} /* path domain */ @@ -161,11 +161,11 @@ /* events that are pre and post DAPM */ #define SND_SOC_DAPM_PRE(wname, wevent) \ { .id = snd_soc_dapm_pre, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD} #define SND_SOC_DAPM_POST(wname, wevent) \ { .id = snd_soc_dapm_post, .name = wname, .kcontrols = NULL, \ - .num_kcontrols = 0, .event = wevent, \ + .num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \ .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD} /* stream domain */ diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index 9496b96..fa8223a 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -74,14 +74,16 @@ TRACE_EVENT(timer_expire_entry, TP_STRUCT__entry( __field( void *, timer ) __field( unsigned long, now ) + __field( void *, function) ), TP_fast_assign( __entry->timer = timer; __entry->now = jiffies; + __entry->function = timer->function; ), - TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) + TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) ); /** @@ -213,14 +215,16 @@ TRACE_EVENT(hrtimer_expire_entry, TP_STRUCT__entry( __field( void *, hrtimer ) __field( s64, now ) + __field( void *, function) ), TP_fast_assign( __entry->hrtimer = hrtimer; __entry->now = now->tv64; + __entry->function = hrtimer->function; ), - TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, + TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) ); diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index e8cbf43..75271b9 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -24,8 +24,15 @@ typedef unsigned int RING_IDX; * A ring contains as many entries as will fit, rounded down to the nearest * power of two (so we can mask with (size-1) to loop around). */ -#define __RING_SIZE(_s, _sz) \ - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) +#define __CONST_RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ + sizeof(((struct _s##_sring *)0)->ring[0]))) + +/* + * The same for passing in an actual pointer instead of a name tag. + */ +#define __RING_SIZE(_s, _sz) \ + (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) /* * Macros to make the correct C datatypes for a new kind of ring. diff --git a/init/calibrate.c b/init/calibrate.c index 6eb48e5..24fe022 100644 --- a/init/calibrate.c +++ b/init/calibrate.c @@ -66,7 +66,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void) pre_start = 0; read_current_timer(&start); start_jiffies = jiffies; - while (jiffies <= (start_jiffies + 1)) { + while (time_before_eq(jiffies, start_jiffies + 1)) { pre_start = start; read_current_timer(&start); } @@ -74,8 +74,8 @@ static unsigned long __cpuinit calibrate_delay_direct(void) pre_end = 0; end = post_start; - while (jiffies <= - (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) { + while (time_before_eq(jiffies, start_jiffies + 1 + + DELAY_CALIBRATION_TICKS)) { pre_end = end; read_current_timer(&end); } diff --git a/ipc/compat.c b/ipc/compat.c index 9dc2c7d..845a287 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -241,6 +241,8 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) struct semid64_ds __user *up64; int version = compat_ipc_parse_version(&third); + memset(&s64, 0, sizeof(s64)); + if (!uptr) return -EINVAL; if (get_user(pad, (u32 __user *) uptr)) @@ -421,6 +423,8 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) int version = compat_ipc_parse_version(&second); void __user *p; + memset(&m64, 0, sizeof(m64)); + switch (second & (~IPC_64)) { case IPC_INFO: case IPC_RMID: @@ -594,6 +598,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) int err, err2; int version = compat_ipc_parse_version(&second); + memset(&s64, 0, sizeof(s64)); + switch (second & (~IPC_64)) { case IPC_RMID: case SHM_LOCK: diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c index d8d1e9f..380ea4f 100644 --- a/ipc/compat_mq.c +++ b/ipc/compat_mq.c @@ -53,6 +53,9 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, void __user *p = NULL; if (u_attr && oflag & O_CREAT) { struct mq_attr attr; + + memset(&attr, 0, sizeof(attr)); + p = compat_alloc_user_space(sizeof(attr)); if (get_compat_mq_attr(&attr, u_attr) || copy_to_user(p, &attr, sizeof(attr))) @@ -127,6 +130,8 @@ asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p)); long ret; + memset(&mqstat, 0, sizeof(mqstat)); + if (u_mqstat) { if (get_compat_mq_attr(&mqstat, u_mqstat) || copy_to_user(p, &mqstat, sizeof(mqstat))) diff --git a/ipc/shm.c b/ipc/shm.c index 52ed77e..b427380 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -473,6 +473,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ { struct shmid_ds out; + memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; diff --git a/kernel/compat.c b/kernel/compat.c index 5adab05..91b33fb 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -1137,3 +1137,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) return 0; } + +/* + * Allocate user-space memory for the duration of a single system call, + * in order to marshall parameters inside a compat thunk. + */ +void __user *compat_alloc_user_space(unsigned long len) +{ + void __user *ptr; + + /* If len would occupy more than half of the entire compat space... */ + if (unlikely(len > (((compat_uptr_t)~0) >> 1))) + return NULL; + + ptr = arch_compat_alloc_user_space(len); + + if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) + return NULL; + + return ptr; +} +EXPORT_SYMBOL_GPL(compat_alloc_user_space); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 02b9611..8c781d5 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1576,8 +1576,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, return -ENODEV; trialcs = alloc_trial_cpuset(cs); - if (!trialcs) - return -ENOMEM; + if (!trialcs) { + retval = -ENOMEM; + goto out; + } switch (cft->private) { case FILE_CPULIST: @@ -1592,6 +1594,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, } free_trial_cpuset(trialcs); +out: cgroup_unlock(); return retval; } diff --git a/kernel/cred.c b/kernel/cred.c index 60bc8b1..a8d12fe 100644 --- a/kernel/cred.c +++ b/kernel/cred.c @@ -256,13 +256,13 @@ struct cred *cred_alloc_blank(void) #endif atomic_set(&new->usage, 1); +#ifdef CONFIG_DEBUG_CREDENTIALS + new->magic = CRED_MAGIC; +#endif if (security_cred_alloc_blank(new, GFP_KERNEL) < 0) goto error; -#ifdef CONFIG_DEBUG_CREDENTIALS - new->magic = CRED_MAGIC; -#endif return new; error: @@ -663,6 +663,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) validate_creds(old); *new = *old; + atomic_set(&new->usage, 1); + set_cred_subscribers(new, 0); get_uid(new->user); get_group_info(new->group_info); @@ -680,8 +682,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon) if (security_prepare_creds(new, old, GFP_KERNEL) < 0) goto error; - atomic_set(&new->usage, 1); - set_cred_subscribers(new, 0); put_cred(old); validate_creds(new); return new; @@ -754,7 +754,11 @@ bool creds_are_invalid(const struct cred *cred) if (cred->magic != CRED_MAGIC) return true; #ifdef CONFIG_SECURITY_SELINUX - if (selinux_is_enabled()) { + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + if (selinux_is_enabled() && cred->security) { if ((unsigned long) cred->security < PAGE_SIZE) return true; if ((*(u32 *)cred->security & 0xffffff00) == diff --git a/kernel/exit.c b/kernel/exit.c index ceffc67..41d44c3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -95,6 +95,14 @@ static void __exit_signal(struct task_struct *tsk) sig->tty = NULL; } else { /* + * This can only happen if the caller is de_thread(). + * FIXME: this is the temporary hack, we should teach + * posix-cpu-timers to handle this case correctly. + */ + if (unlikely(has_group_leader_pid(tsk))) + posix_cpu_timers_exit_group(tsk); + + /* * If there is any task waiting for the group exit * then notify it: */ @@ -900,6 +908,15 @@ NORET_TYPE void do_exit(long code) if (unlikely(!tsk->pid)) panic("Attempted to kill the idle task!"); + /* + * If do_exit is called because this processes oopsed, it's possible + * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before + * continuing. Amongst other possible reasons, this is to prevent + * mm_release()->clear_child_tid() from writing to a user-controlled + * kernel address. + */ + set_fs(USER_DS); + tracehook_report_exit(&code); validate_creds_for_do_exit(tsk); @@ -1383,8 +1400,7 @@ static int wait_task_stopped(struct wait_opts *wo, if (!unlikely(wo->wo_flags & WNOWAIT)) *p_code = 0; - /* don't need the RCU readlock here as we're holding a spinlock */ - uid = __task_cred(p)->uid; + uid = task_uid(p); unlock_sig: spin_unlock_irq(&p->sighand->siglock); if (!exit_code) @@ -1457,7 +1473,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) } if (!unlikely(wo->wo_flags & WNOWAIT)) p->signal->flags &= ~SIGNAL_STOP_CONTINUED; - uid = __task_cred(p)->uid; + uid = task_uid(p); spin_unlock_irq(&p->sighand->siglock); pid = task_pid_vnr(p); diff --git a/kernel/fork.c b/kernel/fork.c index b6cce14..bc0af81 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -272,6 +272,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); + clear_tsk_need_resched(tsk); stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ @@ -300,7 +301,7 @@ out: #ifdef CONFIG_MMU static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { - struct vm_area_struct *mpnt, *tmp, **pprev; + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; struct rb_node **rb_link, *rb_parent; int retval; unsigned long charge; @@ -328,6 +329,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) if (retval) goto out; + prev = NULL; for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { struct file *file; @@ -359,7 +361,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) goto fail_nomem_anon_vma_fork; tmp->vm_flags &= ~VM_LOCKED; tmp->vm_mm = mm; - tmp->vm_next = NULL; + tmp->vm_next = tmp->vm_prev = NULL; file = tmp->vm_file; if (file) { struct inode *inode = file->f_path.dentry->d_inode; @@ -392,6 +394,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) */ *pprev = tmp; pprev = &tmp->vm_next; + tmp->vm_prev = prev; + prev = tmp; __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; diff --git a/kernel/futex.c b/kernel/futex.c index 6a3a5fa..e328f57 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -1363,7 +1363,6 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) { struct futex_hash_bucket *hb; - get_futex_key_refs(&q->key); hb = hash_futex(&q->key); q->lock_ptr = &hb->lock; @@ -1375,7 +1374,6 @@ static inline void queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb) { spin_unlock(&hb->lock); - drop_futex_key_refs(&q->key); } /** @@ -1480,8 +1478,6 @@ static void unqueue_me_pi(struct futex_q *q) q->pi_state = NULL; spin_unlock(q->lock_ptr); - - drop_futex_key_refs(&q->key); } /* @@ -1812,7 +1808,10 @@ static int futex_wait(u32 __user *uaddr, int fshared, } retry: - /* Prepare to wait on uaddr. */ + /* + * Prepare to wait on uaddr. On success, holds hb lock and increments + * q.key refs. + */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out; @@ -1822,24 +1821,23 @@ retry: /* If we were woken (and unqueued), we succeeded, whatever. */ ret = 0; + /* unqueue_me() drops q.key ref */ if (!unqueue_me(&q)) - goto out_put_key; + goto out; ret = -ETIMEDOUT; if (to && !to->task) - goto out_put_key; + goto out; /* * We expect signal_pending(current), but we might be the * victim of a spurious wakeup as well. */ - if (!signal_pending(current)) { - put_futex_key(fshared, &q.key); + if (!signal_pending(current)) goto retry; - } ret = -ERESTARTSYS; if (!abs_time) - goto out_put_key; + goto out; restart = ¤t_thread_info()->restart_block; restart->fn = futex_wait_restart; @@ -1856,8 +1854,6 @@ retry: ret = -ERESTART_RESTARTBLOCK; -out_put_key: - put_futex_key(fshared, &q.key); out: if (to) { hrtimer_cancel(&to->timer); @@ -2236,7 +2232,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, q.rt_waiter = &rt_waiter; q.requeue_pi_key = &key2; - /* Prepare to wait on uaddr. */ + /* + * Prepare to wait on uaddr. On success, increments q.key (key1) ref + * count. + */ ret = futex_wait_setup(uaddr, val, fshared, &q, &hb); if (ret) goto out_key2; @@ -2254,7 +2253,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, * In order for us to be here, we know our q.key == key2, and since * we took the hb->lock above, we also know that futex_requeue() has * completed and we no longer have to concern ourselves with a wakeup - * race with the atomic proxy lock acquition by the requeue code. + * race with the atomic proxy lock acquisition by the requeue code. The + * futex_requeue dropped our key1 reference and incremented our key2 + * reference count. */ /* Check if the requeue code acquired the second futex for us. */ diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index ef3c3f8..f83972b 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c @@ -33,10 +33,11 @@ * @children: child nodes * @all: list head for list of all nodes * @parent: parent node - * @info: associated profiling data structure if not a directory - * @ghost: when an object file containing profiling data is unloaded we keep a - * copy of the profiling data here to allow collecting coverage data - * for cleanup code. Such a node is called a "ghost". + * @loaded_info: array of pointers to profiling data sets for loaded object + * files. + * @num_loaded: number of profiling data sets for loaded object files. + * @unloaded_info: accumulated copy of profiling data sets for unloaded + * object files. Used only when gcov_persist=1. * @dentry: main debugfs entry, either a directory or data file * @links: associated symbolic links * @name: data file basename @@ -51,10 +52,11 @@ struct gcov_node { struct list_head children; struct list_head all; struct gcov_node *parent; - struct gcov_info *info; - struct gcov_info *ghost; + struct gcov_info **loaded_info; + struct gcov_info *unloaded_info; struct dentry *dentry; struct dentry **links; + int num_loaded; char name[0]; }; @@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = { }; /* - * Return the profiling data set for a given node. This can either be the - * original profiling data structure or a duplicate (also called "ghost") - * in case the associated object file has been unloaded. + * Return a profiling data set associated with the given node. This is + * either a data set for a loaded object file or a data set copy in case + * all associated object files have been unloaded. */ static struct gcov_info *get_node_info(struct gcov_node *node) { - if (node->info) - return node->info; + if (node->num_loaded > 0) + return node->loaded_info[0]; - return node->ghost; + return node->unloaded_info; +} + +/* + * Return a newly allocated profiling data set which contains the sum of + * all profiling data associated with the given node. + */ +static struct gcov_info *get_accumulated_info(struct gcov_node *node) +{ + struct gcov_info *info; + int i = 0; + + if (node->unloaded_info) + info = gcov_info_dup(node->unloaded_info); + else + info = gcov_info_dup(node->loaded_info[i++]); + if (!info) + return NULL; + for (; i < node->num_loaded; i++) + gcov_info_add(info, node->loaded_info[i]); + + return info; } /* @@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file) mutex_lock(&node_lock); /* * Read from a profiling data copy to minimize reference tracking - * complexity and concurrent access. + * complexity and concurrent access and to keep accumulating multiple + * profiling data sets associated with one node simple. */ - info = gcov_info_dup(get_node_info(node)); + info = get_accumulated_info(node); if (!info) goto out_unlock; iter = gcov_iter_new(info); @@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name) return NULL; } +/* + * Reset all profiling data associated with the specified node. + */ +static void reset_node(struct gcov_node *node) +{ + int i; + + if (node->unloaded_info) + gcov_info_reset(node->unloaded_info); + for (i = 0; i < node->num_loaded; i++) + gcov_info_reset(node->loaded_info[i]); +} + static void remove_node(struct gcov_node *node); /* * write() implementation for gcov data files. Reset profiling data for the - * associated file. If the object file has been unloaded (i.e. this is - * a "ghost" node), remove the debug fs node as well. + * corresponding file. If all associated object files have been unloaded, + * remove the debug fs node as well. */ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) @@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr, node = get_node_by_name(info->filename); if (node) { /* Reset counts or remove node for unloaded modules. */ - if (node->ghost) + if (node->num_loaded == 0) remove_node(node); else - gcov_info_reset(node->info); + reset_node(node); } /* Reset counts for open file. */ gcov_info_reset(info); @@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info, INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->all); - node->info = info; + if (node->loaded_info) { + node->loaded_info[0] = info; + node->num_loaded = 1; + } node->parent = parent; if (name) strcpy(node->name, name); @@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent, struct gcov_node *node; node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL); - if (!node) { - pr_warning("out of memory\n"); - return NULL; + if (!node) + goto err_nomem; + if (info) { + node->loaded_info = kcalloc(1, sizeof(struct gcov_info *), + GFP_KERNEL); + if (!node->loaded_info) + goto err_nomem; } init_node(node, info, name, parent); /* Differentiate between gcov data file nodes and directory nodes. */ @@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent, list_add(&node->all, &all_head); return node; + +err_nomem: + kfree(node); + pr_warning("out of memory\n"); + return NULL; } /* Remove symbolic links associated with node. */ @@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node) list_del(&node->all); debugfs_remove(node->dentry); remove_links(node); - if (node->ghost) - gcov_info_free(node->ghost); + kfree(node->loaded_info); + if (node->unloaded_info) + gcov_info_free(node->unloaded_info); kfree(node); } @@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent, /* * write() implementation for reset file. Reset all profiling data to zero - * and remove ghost nodes. + * and remove nodes for which all associated object files are unloaded. */ static ssize_t reset_write(struct file *file, const char __user *addr, size_t len, loff_t *pos) @@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr, mutex_lock(&node_lock); restart: list_for_each_entry(node, &all_head, all) { - if (node->info) - gcov_info_reset(node->info); + if (node->num_loaded > 0) + reset_node(node); else if (list_empty(&node->children)) { remove_node(node); /* Several nodes may have gone - restart loop. */ @@ -564,37 +614,115 @@ err_remove: } /* - * The profiling data set associated with this node is being unloaded. Store a - * copy of the profiling data and turn this node into a "ghost". + * Associate a profiling data set with an existing node. Needs to be called + * with node_lock held. */ -static int ghost_node(struct gcov_node *node) +static void add_info(struct gcov_node *node, struct gcov_info *info) { - node->ghost = gcov_info_dup(node->info); - if (!node->ghost) { - pr_warning("could not save data for '%s' (out of memory)\n", - node->info->filename); - return -ENOMEM; + struct gcov_info **loaded_info; + int num = node->num_loaded; + + /* + * Prepare new array. This is done first to simplify cleanup in + * case the new data set is incompatible, the node only contains + * unloaded data sets and there's not enough memory for the array. + */ + loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL); + if (!loaded_info) { + pr_warning("could not add '%s' (out of memory)\n", + info->filename); + return; + } + memcpy(loaded_info, node->loaded_info, + num * sizeof(struct gcov_info *)); + loaded_info[num] = info; + /* Check if the new data set is compatible. */ + if (num == 0) { + /* + * A module was unloaded, modified and reloaded. The new + * data set replaces the copy of the last one. + */ + if (!gcov_info_is_compatible(node->unloaded_info, info)) { + pr_warning("discarding saved data for %s " + "(incompatible version)\n", info->filename); + gcov_info_free(node->unloaded_info); + node->unloaded_info = NULL; + } + } else { + /* + * Two different versions of the same object file are loaded. + * The initial one takes precedence. + */ + if (!gcov_info_is_compatible(node->loaded_info[0], info)) { + pr_warning("could not add '%s' (incompatible " + "version)\n", info->filename); + kfree(loaded_info); + return; + } } - node->info = NULL; + /* Overwrite previous array. */ + kfree(node->loaded_info); + node->loaded_info = loaded_info; + node->num_loaded = num + 1; +} - return 0; +/* + * Return the index of a profiling data set associated with a node. + */ +static int get_info_index(struct gcov_node *node, struct gcov_info *info) +{ + int i; + + for (i = 0; i < node->num_loaded; i++) { + if (node->loaded_info[i] == info) + return i; + } + return -ENOENT; } /* - * Profiling data for this node has been loaded again. Add profiling data - * from previous instantiation and turn this node into a regular node. + * Save the data of a profiling data set which is being unloaded. */ -static void revive_node(struct gcov_node *node, struct gcov_info *info) +static void save_info(struct gcov_node *node, struct gcov_info *info) { - if (gcov_info_is_compatible(node->ghost, info)) - gcov_info_add(info, node->ghost); + if (node->unloaded_info) + gcov_info_add(node->unloaded_info, info); else { - pr_warning("discarding saved data for '%s' (version changed)\n", + node->unloaded_info = gcov_info_dup(info); + if (!node->unloaded_info) { + pr_warning("could not save data for '%s' " + "(out of memory)\n", info->filename); + } + } +} + +/* + * Disassociate a profiling data set from a node. Needs to be called with + * node_lock held. + */ +static void remove_info(struct gcov_node *node, struct gcov_info *info) +{ + int i; + + i = get_info_index(node, info); + if (i < 0) { + pr_warning("could not remove '%s' (not found)\n", info->filename); + return; } - gcov_info_free(node->ghost); - node->ghost = NULL; - node->info = info; + if (gcov_persist) + save_info(node, info); + /* Shrink array. */ + node->loaded_info[i] = node->loaded_info[node->num_loaded - 1]; + node->num_loaded--; + if (node->num_loaded > 0) + return; + /* Last loaded data set was removed. */ + kfree(node->loaded_info); + node->loaded_info = NULL; + node->num_loaded = 0; + if (!node->unloaded_info) + remove_node(node); } /* @@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info) node = get_node_by_name(info->filename); switch (action) { case GCOV_ADD: - /* Add new node or revive ghost. */ - if (!node) { + if (node) + add_info(node, info); + else add_node(info); - break; - } - if (gcov_persist) - revive_node(node, info); - else { - pr_warning("could not add '%s' (already exists)\n", - info->filename); - } break; case GCOV_REMOVE: - /* Remove node or turn into ghost. */ - if (!node) { + if (node) + remove_info(node, info); + else { pr_warning("could not remove '%s' (not found)\n", info->filename); - break; } - if (gcov_persist) { - if (!ghost_node(node)) - break; - } - remove_node(node); break; } mutex_unlock(&node_lock); diff --git a/kernel/groups.c b/kernel/groups.c index 53b1916..253dc0f 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp) right = group_info->ngroups; while (left < right) { unsigned int mid = (left+right)/2; - int cmp = grp - GROUP_AT(group_info, mid); - if (cmp > 0) + if (grp > GROUP_AT(group_info, mid)) left = mid + 1; - else if (cmp < 0) + else if (grp < GROUP_AT(group_info, mid)) right = mid; else return 1; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 5c69e99..21e0c5e 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -936,6 +936,7 @@ static inline int remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) { if (hrtimer_is_queued(timer)) { + unsigned long state; int reprogram; /* @@ -949,8 +950,13 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) debug_deactivate(timer); timer_stats_hrtimer_clear_start_info(timer); reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); - __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, - reprogram); + /* + * We must preserve the CALLBACK state flag here, + * otherwise we could move the timer base in + * switch_hrtimer_base. + */ + state = timer->state & HRTIMER_STATE_CALLBACK; + __remove_hrtimer(timer, base, state, reprogram); return 1; } return 0; @@ -1237,6 +1243,9 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); enqueue_hrtimer(timer, base); } + + WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK)); + timer->state &= ~HRTIMER_STATE_CALLBACK; } diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 7a56b22..6eb6422 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c @@ -417,7 +417,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, perf_overflow_handler_t triggered, struct task_struct *tsk) { - return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); + return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), + triggered); } EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index e149748..6a2281f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -216,7 +216,7 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) { if (suspend) { - if (!desc->action || (desc->action->flags & IRQF_TIMER)) + if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) return; desc->status |= IRQ_SUSPENDED; } @@ -1093,7 +1093,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, if (retval) kfree(action); -#ifdef CONFIG_DEBUG_SHIRQ +#ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 09a2ee5..345e0b7 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -214,7 +214,7 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v) static int irq_spurious_proc_open(struct inode *inode, struct file *file) { - return single_open(file, irq_spurious_proc_show, NULL); + return single_open(file, irq_spurious_proc_show, PDE(inode)->data); } static const struct file_operations irq_spurious_proc_fops = { diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 877fb30..17110a4 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) account_global_scheduler_latency(tsk, &lat); - /* - * short term hack; if we're > 32 we stop; future we recycle: - */ - tsk->latency_record_count++; - if (tsk->latency_record_count >= LT_SAVECOUNT) - goto out_unlock; - - for (i = 0; i < LT_SAVECOUNT; i++) { + for (i = 0; i < tsk->latency_record_count; i++) { struct latency_record *mylat; int same = 1; @@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) } } + /* + * short term hack; if we're > 32 we stop; future we recycle: + */ + if (tsk->latency_record_count >= LT_SAVECOUNT) + goto out_unlock; + /* Allocated a new one: */ - i = tsk->latency_record_count; + i = tsk->latency_record_count++; memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ff86c55..fa8123e 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -58,7 +58,8 @@ static atomic_t nr_task_events __read_mostly; */ int sysctl_perf_event_paranoid __read_mostly = 1; -int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ +/* Minimum for 128 pages + 1 for the user control page */ +int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ /* * max perf event sample rate @@ -1609,8 +1610,12 @@ static void rotate_ctx(struct perf_event_context *ctx) { raw_spin_lock(&ctx->lock); - /* Rotate the first entry last of non-pinned groups */ - list_rotate_left(&ctx->flexible_groups); + /* + * Rotate the first entry last of non-pinned groups. Rotation might be + * disabled by the inheritance code. + */ + if (!ctx->rotate_disable) + list_rotate_left(&ctx->flexible_groups); raw_spin_unlock(&ctx->lock); } @@ -1757,7 +1762,13 @@ static u64 perf_event_read(struct perf_event *event) unsigned long flags; raw_spin_lock_irqsave(&ctx->lock, flags); - update_context_time(ctx); + /* + * may read while context is not active + * (e.g., thread is blocked), in that case + * we cannot update context time + */ + if (ctx->is_active) + update_context_time(ctx); update_event_times(event); raw_spin_unlock_irqrestore(&ctx->lock, flags); } @@ -5390,17 +5401,20 @@ __perf_event_exit_task(struct perf_event *child_event, struct perf_event_context *child_ctx, struct task_struct *child) { - struct perf_event *parent_event; + if (child_event->parent) { + raw_spin_lock_irq(&child_ctx->lock); + perf_group_detach(child_event); + raw_spin_unlock_irq(&child_ctx->lock); + } perf_event_remove_from_context(child_event); - parent_event = child_event->parent; /* - * It can happen that parent exits first, and has events + * It can happen that the parent exits first, and has events * that are still around due to the child reference. These - * events need to be zapped - but otherwise linger. + * events need to be zapped. */ - if (parent_event) { + if (child_event->parent) { sync_child_event(child_event, child); free_event(child_event); } @@ -5590,6 +5604,7 @@ int perf_event_init_task(struct task_struct *child) struct perf_event *event; struct task_struct *parent = current; int inherited_all = 1; + unsigned long flags; int ret = 0; child->perf_event_ctxp = NULL; @@ -5630,6 +5645,15 @@ int perf_event_init_task(struct task_struct *child) break; } + /* + * We can't hold ctx->lock when iterating the ->flexible_group list due + * to allocations, but we need to prevent rotation because + * rotate_ctx() will change the list from interrupt context. + */ + raw_spin_lock_irqsave(&parent_ctx->lock, flags); + parent_ctx->rotate_disable = 1; + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); + list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { ret = inherit_task_group(event, parent, parent_ctx, child, &inherited_all); @@ -5637,6 +5661,10 @@ int perf_event_init_task(struct task_struct *child) break; } + raw_spin_lock_irqsave(&parent_ctx->lock, flags); + parent_ctx->rotate_disable = 0; + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); + child_ctx = child->perf_event_ctxp; if (child_ctx && inherited_all) { diff --git a/kernel/pid.c b/kernel/pid.c index e9fd8c1..4d0a9fc 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -183,11 +183,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) return -1; } -int next_pidmap(struct pid_namespace *pid_ns, int last) +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) { int offset; struct pidmap *map, *end; + if (last >= PID_MAX_LIMIT) + return -1; + offset = (last + 1) & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; end = &pid_ns->pidmap[PIDMAP_ENTRIES]; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index aa9e916..684463e 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -324,7 +324,6 @@ static int create_image(int platform_mode) int hibernation_snapshot(int platform_mode) { int error; - gfp_t saved_mask; error = platform_begin(platform_mode); if (error) @@ -336,7 +335,7 @@ int hibernation_snapshot(int platform_mode) goto Close; suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); + pm_restrict_gfp_mask(); error = dpm_suspend_start(PMSG_FREEZE); if (error) goto Recover_platform; @@ -345,7 +344,10 @@ int hibernation_snapshot(int platform_mode) goto Recover_platform; error = create_image(platform_mode); - /* Control returns here after successful restore */ + /* + * Control returns here (1) after the image has been created or the + * image creation has failed and (2) after a successful restore. + */ Resume_devices: /* We may need to release the preallocated image pages here. */ @@ -354,7 +356,10 @@ int hibernation_snapshot(int platform_mode) dpm_resume_end(in_suspend ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); - set_gfp_allowed_mask(saved_mask); + + if (error || !in_suspend) + pm_restore_gfp_mask(); + resume_console(); Close: platform_end(platform_mode); @@ -449,17 +454,16 @@ static int resume_target_kernel(bool platform_mode) int hibernation_restore(int platform_mode) { int error; - gfp_t saved_mask; pm_prepare_console(); suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); + pm_restrict_gfp_mask(); error = dpm_suspend_start(PMSG_QUIESCE); if (!error) { error = resume_target_kernel(platform_mode); dpm_resume_end(PMSG_RECOVER); } - set_gfp_allowed_mask(saved_mask); + pm_restore_gfp_mask(); resume_console(); pm_restore_console(); return error; @@ -473,7 +477,6 @@ int hibernation_restore(int platform_mode) int hibernation_platform_enter(void) { int error; - gfp_t saved_mask; if (!hibernation_ops) return -ENOSYS; @@ -489,7 +492,6 @@ int hibernation_platform_enter(void) entering_platform_hibernation = true; suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); error = dpm_suspend_start(PMSG_HIBERNATE); if (error) { if (hibernation_ops->recover) @@ -527,7 +529,6 @@ int hibernation_platform_enter(void) Resume_devices: entering_platform_hibernation = false; dpm_resume_end(PMSG_RESTORE); - set_gfp_allowed_mask(saved_mask); resume_console(); Close: @@ -635,6 +636,7 @@ int hibernate(void) swsusp_free(); if (!error) power_down(); + pm_restore_gfp_mask(); } else { pr_debug("PM: Image restored successfully.\n"); } diff --git a/kernel/power/main.c b/kernel/power/main.c index b58800b..51e2643 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -266,6 +266,7 @@ static int __init pm_init(void) int error = pm_start_workqueue(); if (error) return error; + hibernate_image_size_init(); power_kobj = kobject_create_and_add("power", NULL); if (!power_kobj) return -ENOMEM; diff --git a/kernel/power/power.h b/kernel/power/power.h index 006270f..54580cb 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -14,6 +14,9 @@ struct swsusp_info { } __attribute__((aligned(PAGE_SIZE))); #ifdef CONFIG_HIBERNATION +/* kernel/power/snapshot.c */ +extern void __init hibernate_image_size_init(void); + #ifdef CONFIG_ARCH_HIBERNATION_HEADER /* Maximum size of architecture specific data in a hibernation header */ #define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) @@ -49,7 +52,11 @@ static inline char *check_image_kernel(struct swsusp_info *info) extern int hibernation_snapshot(int platform_mode); extern int hibernation_restore(int platform_mode); extern int hibernation_platform_enter(void); -#endif + +#else /* !CONFIG_HIBERNATION */ + +static inline void hibernate_image_size_init(void) {} +#endif /* !CONFIG_HIBERNATION */ extern int pfn_is_nosave(unsigned long); diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 25ce010..4d8548d 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -46,7 +46,12 @@ static void swsusp_unset_page_forbidden(struct page *); * size will not exceed N bytes, but if that is impossible, it will * try to create the smallest image possible. */ -unsigned long image_size = 500 * 1024 * 1024; +unsigned long image_size; + +void __init hibernate_image_size_init(void) +{ + image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE; +} /* List of PBEs needed for restoring the pages that were allocated before * the suspend and included in the suspend image, but have also been @@ -1121,9 +1126,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) return nr_alloc; } -static unsigned long preallocate_image_memory(unsigned long nr_pages) +static unsigned long preallocate_image_memory(unsigned long nr_pages, + unsigned long avail_normal) { - return preallocate_image_pages(nr_pages, GFP_IMAGE); + unsigned long alloc; + + if (avail_normal <= alloc_normal) + return 0; + + alloc = avail_normal - alloc_normal; + if (nr_pages < alloc) + alloc = nr_pages; + + return preallocate_image_pages(alloc, GFP_IMAGE); } #ifdef CONFIG_HIGHMEM @@ -1169,15 +1184,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages, */ static void free_unnecessary_pages(void) { - unsigned long save_highmem, to_free_normal, to_free_highmem; + unsigned long save, to_free_normal, to_free_highmem; - to_free_normal = alloc_normal - count_data_pages(); - save_highmem = count_highmem_pages(); - if (alloc_highmem > save_highmem) { - to_free_highmem = alloc_highmem - save_highmem; + save = count_data_pages(); + if (alloc_normal >= save) { + to_free_normal = alloc_normal - save; + save = 0; + } else { + to_free_normal = 0; + save -= alloc_normal; + } + save += count_highmem_pages(); + if (alloc_highmem >= save) { + to_free_highmem = alloc_highmem - save; } else { to_free_highmem = 0; - to_free_normal -= save_highmem - alloc_highmem; + to_free_normal -= save - alloc_highmem; } memory_bm_position_reset(©_bm); @@ -1258,7 +1280,7 @@ int hibernate_preallocate_memory(void) { struct zone *zone; unsigned long saveable, size, max_size, count, highmem, pages = 0; - unsigned long alloc, save_highmem, pages_highmem; + unsigned long alloc, save_highmem, pages_highmem, avail_normal; struct timeval start, stop; int error; @@ -1295,26 +1317,38 @@ int hibernate_preallocate_memory(void) else count += zone_page_state(zone, NR_FREE_PAGES); } + avail_normal = count; count += highmem; count -= totalreserve_pages; /* Compute the maximum number of saveable pages to leave in memory. */ max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES; + /* Compute the desired number of image pages specified by image_size. */ size = DIV_ROUND_UP(image_size, PAGE_SIZE); if (size > max_size) size = max_size; /* - * If the maximum is not less than the current number of saveable pages - * in memory, allocate page frames for the image and we're done. + * If the desired number of image pages is at least as large as the + * current number of saveable pages in memory, allocate page frames for + * the image and we're done. */ if (size >= saveable) { pages = preallocate_image_highmem(save_highmem); - pages += preallocate_image_memory(saveable - pages); + pages += preallocate_image_memory(saveable - pages, avail_normal); goto out; } /* Estimate the minimum size of the image. */ pages = minimum_image_size(saveable); + /* + * To avoid excessive pressure on the normal zone, leave room in it to + * accommodate an image of the minimum size (unless it's already too + * small, in which case don't preallocate pages from it at all). + */ + if (avail_normal > pages) + avail_normal -= pages; + else + avail_normal = 0; if (size < pages) size = min_t(unsigned long, pages, max_size); @@ -1335,16 +1369,34 @@ int hibernate_preallocate_memory(void) */ pages_highmem = preallocate_image_highmem(highmem / 2); alloc = (count - max_size) - pages_highmem; - pages = preallocate_image_memory(alloc); - if (pages < alloc) - goto err_out; - size = max_size - size; - alloc = size; - size = preallocate_highmem_fraction(size, highmem, count); - pages_highmem += size; - alloc -= size; - pages += preallocate_image_memory(alloc); - pages += pages_highmem; + pages = preallocate_image_memory(alloc, avail_normal); + if (pages < alloc) { + /* We have exhausted non-highmem pages, try highmem. */ + alloc -= pages; + pages += pages_highmem; + pages_highmem = preallocate_image_highmem(alloc); + if (pages_highmem < alloc) + goto err_out; + pages += pages_highmem; + /* + * size is the desired number of saveable pages to leave in + * memory, so try to preallocate (all memory - size) pages. + */ + alloc = (count - pages) - size; + pages += preallocate_image_highmem(alloc); + } else { + /* + * There are approximately max_size saveable pages at this point + * and we want to reduce this number down to size. + */ + alloc = max_size - size; + size = preallocate_highmem_fraction(alloc, highmem, count); + pages_highmem += size; + alloc -= size; + size = preallocate_image_memory(alloc, avail_normal); + pages_highmem += preallocate_image_highmem(alloc - size); + pages += pages_highmem + size; + } /* * We only need as many page frames for the image as there are saveable @@ -1467,11 +1519,8 @@ static int swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, unsigned int nr_pages, unsigned int nr_highmem) { - int error = 0; - if (nr_highmem > 0) { - error = get_highmem_buffer(PG_ANY); - if (error) + if (get_highmem_buffer(PG_ANY)) goto err_out; if (nr_highmem > alloc_highmem) { nr_highmem -= alloc_highmem; @@ -1494,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, err_out: swsusp_free(); - return error; + return -ENOMEM; } asmlinkage int swsusp_save(void) diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index f37cb7d..b0f28dd 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -196,7 +196,6 @@ static int suspend_enter(suspend_state_t state) int suspend_devices_and_enter(suspend_state_t state) { int error; - gfp_t saved_mask; if (!suspend_ops) return -ENOSYS; @@ -207,7 +206,7 @@ int suspend_devices_and_enter(suspend_state_t state) goto Close; } suspend_console(); - saved_mask = clear_gfp_allowed_mask(GFP_IOFS); + pm_restrict_gfp_mask(); suspend_test_start(); error = dpm_suspend_start(PMSG_SUSPEND); if (error) { @@ -224,7 +223,7 @@ int suspend_devices_and_enter(suspend_state_t state) suspend_test_start(); dpm_resume_end(PMSG_RESUME); suspend_test_finish("resume devices"); - set_gfp_allowed_mask(saved_mask); + pm_restore_gfp_mask(); resume_console(); Close: if (suspend_ops->end) diff --git a/kernel/power/user.c b/kernel/power/user.c index e819e17..c36c3b9 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -137,7 +137,7 @@ static int snapshot_release(struct inode *inode, struct file *filp) free_all_swap_pages(data->swap); if (data->frozen) thaw_processes(); - pm_notifier_call_chain(data->mode == O_WRONLY ? + pm_notifier_call_chain(data->mode == O_RDONLY ? PM_POST_HIBERNATION : PM_POST_RESTORE); atomic_inc(&snapshot_device_available); @@ -263,6 +263,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, case SNAPSHOT_UNFREEZE: if (!data->frozen || data->ready) break; + pm_restore_gfp_mask(); thaw_processes(); usermodehelper_enable(); data->frozen = 0; @@ -275,6 +276,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, error = -EPERM; break; } + pm_restore_gfp_mask(); error = hibernation_snapshot(data->platform_support); if (!error) error = put_user(in_suspend, (int __user *)arg); diff --git a/kernel/printk.c b/kernel/printk.c index 444b770..b7b7f90 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1034,13 +1034,15 @@ void printk_tick(void) int printk_needs_cpu(int cpu) { + if (unlikely(cpu_is_offline(cpu))) + printk_tick(); return per_cpu(printk_pending, cpu); } void wake_up_klogd(void) { if (waitqueue_active(&log_wait)) - __raw_get_cpu_var(printk_pending) = 1; + this_cpu_write(printk_pending, 1); } /** diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 74a3d69..752321e 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -313,7 +313,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data) child->exit_code = data; dead = __ptrace_detach(current, child); if (!child->exit_state) - wake_up_process(child); + wake_up_state(child, TASK_TRACED | TASK_STOPPED); } write_unlock_irq(&tasklist_lock); diff --git a/kernel/sched.c b/kernel/sched.c index f52a880..d40d662 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -491,6 +491,7 @@ struct rq { struct mm_struct *prev_mm; u64 clock; + u64 clock_task; atomic_t nr_iowait; @@ -518,6 +519,10 @@ struct rq { u64 avg_idle; #endif +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; +#endif + /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; @@ -564,7 +569,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(p)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -641,10 +646,21 @@ static inline struct task_group *task_group(struct task_struct *p) #endif /* CONFIG_CGROUP_SCHED */ +static u64 irq_time_cpu(int cpu); +static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); + inline void update_rq_clock(struct rq *rq) { + int cpu = cpu_of(rq); + u64 irq_time; + if (!rq->skip_clock_update) rq->clock = sched_clock_cpu(cpu_of(rq)); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -721,7 +737,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; - char *cmp = buf; + char *cmp; int neg = 0; int i; @@ -732,6 +748,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, return -EFAULT; buf[cnt] = 0; + cmp = strstrip(buf); if (strncmp(buf, "NO_", 3) == 0) { neg = 1; @@ -739,9 +756,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf, } for (i = 0; sched_feat_names[i]; i++) { - int len = strlen(sched_feat_names[i]); - - if (strncmp(cmp, sched_feat_names[i], len) == 0) { + if (strcmp(cmp, sched_feat_names[i]) == 0) { if (neg) sysctl_sched_features &= ~(1UL << i); else @@ -1232,16 +1247,6 @@ void wake_up_idle_cpu(int cpu) smp_send_reschedule(cpu); } -int nohz_ratelimit(int cpu) -{ - struct rq *rq = cpu_rq(cpu); - u64 diff = rq->clock - rq->nohz_stamp; - - rq->nohz_stamp = rq->clock; - - return diff < (NSEC_PER_SEC / HZ) >> 1; -} - #endif /* CONFIG_NO_HZ */ static u64 sched_avg_period(void) @@ -1281,6 +1286,10 @@ static void resched_task(struct task_struct *p) static void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { } + +static void sched_avg_update(struct rq *rq) +{ +} #endif /* CONFIG_SMP */ #if BITS_PER_LONG == 32 @@ -1826,6 +1835,94 @@ static const struct sched_class rt_sched_class; #define for_each_class(class) \ for (class = sched_class_highest; class; class = class->next) +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +/* + * There are no locks covering percpu hardirq/softirq time. + * They are only modified in account_system_vtime, on corresponding CPU + * with interrupts disabled. So, writes are safe. + * They are read and saved off onto struct rq in update_rq_clock(). + * This may result in other CPU reading this CPU's irq time and can + * race with irq/account_system_vtime on this CPU. We would either get old + * or new value (or semi updated value on 32 bit) with a side effect of + * accounting a slice of irq time to wrong task when irq is in progress + * while we read rq->clock. That is a worthy compromise in place of having + * locks on each irq in account_system_time. + */ +static DEFINE_PER_CPU(u64, cpu_hardirq_time); +static DEFINE_PER_CPU(u64, cpu_softirq_time); + +static DEFINE_PER_CPU(u64, irq_start_time); +static int sched_clock_irqtime; + +void enable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 1; +} + +void disable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 0; +} + +static u64 irq_time_cpu(int cpu) +{ + if (!sched_clock_irqtime) + return 0; + + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); +} + +void account_system_vtime(struct task_struct *curr) +{ + unsigned long flags; + int cpu; + u64 now, delta; + + if (!sched_clock_irqtime) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + now = sched_clock_cpu(cpu); + delta = now - per_cpu(irq_start_time, cpu); + per_cpu(irq_start_time, cpu) = now; + /* + * We do not account for softirq time from ksoftirqd here. + * We want to continue accounting softirq time to ksoftirqd thread + * in that case, so as not to confuse scheduler with a special task + * that do not consume any time, but still wants to run. + */ + if (hardirq_count()) + per_cpu(cpu_hardirq_time, cpu) += delta; + else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) + per_cpu(cpu_softirq_time, cpu) += delta; + + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(account_system_vtime); + +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) +{ + if (sched_clock_irqtime && sched_feat(NONIRQ_POWER)) { + u64 delta_irq = curr_irq_time - rq->prev_irq_time; + rq->prev_irq_time = curr_irq_time; + sched_rt_avg_update(rq, delta_irq); + } +} + +#else + +static u64 irq_time_cpu(int cpu) +{ + return 0; +} + +static void sched_irq_time_avg_update(struct rq *rq, u64 curr_irq_time) { } + +#endif + #include "sched_stats.h" static void inc_nr_running(struct rq *rq) @@ -1840,12 +1937,6 @@ static void dec_nr_running(struct rq *rq) static void set_load_weight(struct task_struct *p) { - if (task_has_rt_policy(p)) { - p->se.load.weight = 0; - p->se.load.inv_weight = WMULT_CONST; - return; - } - /* * SCHED_IDLE tasks get minimal weight: */ @@ -1985,6 +2076,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) if (p->sched_class != &fair_sched_class) return 0; + if (unlikely(p->policy == SCHED_IDLE)) + return 0; + /* * Buddy candidates are cache hot: */ @@ -2907,6 +3001,15 @@ static long calc_load_fold_active(struct rq *this_rq) return delta; } +static unsigned long +calc_load(unsigned long load, unsigned long exp, unsigned long active) +{ + load *= exp; + load += active * (FIXED_1 - exp); + load += 1UL << (FSHIFT - 1); + return load >> FSHIFT; +} + #ifdef CONFIG_NO_HZ /* * For NO_HZ we delay the active fold to the next LOAD_FREQ update. @@ -2936,6 +3039,128 @@ static long calc_load_fold_idle(void) return delta; } + +/** + * fixed_power_int - compute: x^n, in O(log n) time + * + * @x: base of the power + * @frac_bits: fractional bits of @x + * @n: power to raise @x to. + * + * By exploiting the relation between the definition of the natural power + * function: x^n := x*x*...*x (x multiplied by itself for n times), and + * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i, + * (where: n_i \elem {0, 1}, the binary vector representing n), + * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is + * of course trivially computable in O(log_2 n), the length of our binary + * vector. + */ +static unsigned long +fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n) +{ + unsigned long result = 1UL << frac_bits; + + if (n) for (;;) { + if (n & 1) { + result *= x; + result += 1UL << (frac_bits - 1); + result >>= frac_bits; + } + n >>= 1; + if (!n) + break; + x *= x; + x += 1UL << (frac_bits - 1); + x >>= frac_bits; + } + + return result; +} + +/* + * a1 = a0 * e + a * (1 - e) + * + * a2 = a1 * e + a * (1 - e) + * = (a0 * e + a * (1 - e)) * e + a * (1 - e) + * = a0 * e^2 + a * (1 - e) * (1 + e) + * + * a3 = a2 * e + a * (1 - e) + * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e) + * = a0 * e^3 + a * (1 - e) * (1 + e + e^2) + * + * ... + * + * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1] + * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e) + * = a0 * e^n + a * (1 - e^n) + * + * [1] application of the geometric series: + * + * n 1 - x^(n+1) + * S_n := \Sum x^i = ------------- + * i=0 1 - x + */ +static unsigned long +calc_load_n(unsigned long load, unsigned long exp, + unsigned long active, unsigned int n) +{ + + return calc_load(load, fixed_power_int(exp, FSHIFT, n), active); +} + +/* + * NO_HZ can leave us missing all per-cpu ticks calling + * calc_load_account_active(), but since an idle CPU folds its delta into + * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold + * in the pending idle delta if our idle period crossed a load cycle boundary. + * + * Once we've updated the global active value, we need to apply the exponential + * weights adjusted to the number of cycles missed. + */ +static void calc_global_nohz(unsigned long ticks) +{ + long delta, active, n; + + if (time_before(jiffies, calc_load_update)) + return; + + /* + * If we crossed a calc_load_update boundary, make sure to fold + * any pending idle changes, the respective CPUs might have + * missed the tick driven calc_load_account_active() update + * due to NO_HZ. + */ + delta = calc_load_fold_idle(); + if (delta) + atomic_long_add(delta, &calc_load_tasks); + + /* + * If we were idle for multiple load cycles, apply them. + */ + if (ticks >= LOAD_FREQ) { + n = ticks / LOAD_FREQ; + + active = atomic_long_read(&calc_load_tasks); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n); + avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); + avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); + + calc_load_update += n * LOAD_FREQ; + } + + /* + * Its possible the remainder of the above division also crosses + * a LOAD_FREQ period, the regular check in calc_global_load() + * which comes after this will take care of that. + * + * Consider us being 11 ticks before a cycle completion, and us + * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will + * age us 4 cycles, and the test in calc_global_load() will + * pick up the final one. + */ +} #else static void calc_load_account_idle(struct rq *this_rq) { @@ -2945,6 +3170,10 @@ static inline long calc_load_fold_idle(void) { return 0; } + +static void calc_global_nohz(unsigned long ticks) +{ +} #endif /** @@ -2962,24 +3191,17 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift) loads[2] = (avenrun[2] + offset) << shift; } -static unsigned long -calc_load(unsigned long load, unsigned long exp, unsigned long active) -{ - load *= exp; - load += active * (FIXED_1 - exp); - return load >> FSHIFT; -} - /* * calc_load - update the avenrun load estimates 10 ticks after the * CPUs have updated calc_load_tasks. */ -void calc_global_load(void) +void calc_global_load(unsigned long ticks) { - unsigned long upd = calc_load_update + 10; long active; - if (time_before(jiffies, upd)) + calc_global_nohz(ticks); + + if (time_before(jiffies, calc_load_update + 10)) return; active = atomic_long_read(&calc_load_tasks); @@ -3041,6 +3263,8 @@ static void update_cpu_load(struct rq *this_rq) } calc_load_account_active(this_rq); + + sched_avg_update(this_rq); } #ifdef CONFIG_SMP @@ -3094,7 +3318,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) if (task_current(rq, p)) { update_rq_clock(rq); - ns = rq->clock - p->se.exec_start; + ns = rq->clock_task - p->se.exec_start; if ((s64)ns < 0) ns = 0; } @@ -3243,7 +3467,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, tmp = cputime_to_cputime64(cputime); if (hardirq_count() - hardirq_offset) cpustat->irq = cputime64_add(cpustat->irq, tmp); - else if (softirq_count()) + else if (in_serving_softirq()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); else cpustat->system = cputime64_add(cpustat->system, tmp); @@ -3359,9 +3583,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) rtime = nsecs_to_cputime(p->se.sum_exec_runtime); if (total) { - u64 temp; + u64 temp = rtime; - temp = (u64)(rtime * utime); + temp *= utime; do_div(temp, total); utime = (cputime_t)temp; } else @@ -3392,9 +3616,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) rtime = nsecs_to_cputime(cputime.sum_exec_runtime); if (total) { - u64 temp; + u64 temp = rtime; - temp = (u64)(rtime * cputime.utime); + temp *= cputime.utime; do_div(temp, total); utime = (cputime_t)temp; } else @@ -3546,7 +3770,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -3609,7 +3832,6 @@ need_resched_nonpreemptible: hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { if (unlikely(signal_pending_state(prev->state, prev))) @@ -3626,6 +3848,8 @@ need_resched_nonpreemptible: put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -3704,8 +3928,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) /* * Owner changed, break to re-assess state. */ - if (lock->owner != owner) + if (lock->owner != owner) { + /* + * If the lock has switched to a different owner, + * we likely have heavy contention. Return 0 to quit + * optimistic spinning and not contend further: + */ + if (lock->owner) + return 0; break; + } /* * Is that owner really running on that cpu? @@ -5166,7 +5398,19 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) idle->se.exec_start = sched_clock(); cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); + /* + * We're having a chicken and egg problem, even though we are + * holding rq->lock, the cpu isn't yet set to this cpu so the + * lockdep check in task_group() will fail. + * + * Similar case to sched_fork(). / Alternatively we could + * use task_rq_lock() here and obtain the other rq->lock. + * + * Silence PROVE_RCU + */ + rcu_read_lock(); __set_task_cpu(idle, cpu); + rcu_read_unlock(); rq->curr = rq->idle = idle; #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) @@ -5184,7 +5428,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) * The idle tasks have their own, simple scheduling class: */ idle->sched_class = &idle_sched_class; - ftrace_graph_init_task(idle); + ftrace_graph_init_idle_task(idle, cpu); } /* @@ -6595,6 +6839,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) if (cpu != group_first_cpu(sd->groups)) return; + sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); + child = sd->child; sd->groups->cpu_power = 0; @@ -8088,12 +8334,12 @@ void sched_move_task(struct task_struct *tsk) if (unlikely(running)) tsk->sched_class->put_prev_task(rq, tsk); - set_task_rq(tsk, task_cpu(tsk)); - #ifdef CONFIG_FAIR_GROUP_SCHED - if (tsk->sched_class->moved_group) - tsk->sched_class->moved_group(tsk, on_rq); + if (tsk->sched_class->task_move_group) + tsk->sched_class->task_move_group(tsk, on_rq); + else #endif + set_task_rq(tsk, task_cpu(tsk)); if (unlikely(running)) tsk->sched_class->set_curr_task(rq); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index a878b53..09a9811 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -519,7 +519,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; - u64 now = rq_of(cfs_rq)->clock; + u64 now = rq_of(cfs_rq)->clock_task; unsigned long delta_exec; if (unlikely(!curr)) @@ -602,7 +602,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * We are starting a new run period: */ - se->exec_start = rq_of(cfs_rq)->clock; + se->exec_start = rq_of(cfs_rq)->clock_task; } /************************************************** @@ -1765,6 +1765,10 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); check_preempt_curr(this_rq, p, 0); + + /* re-arm NEWIDLE balancing when moving tasks */ + src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost; + this_rq->idle_stamp = 0; } /* @@ -1799,7 +1803,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, * 2) too many balance attempts have failed. */ - tsk_cache_hot = task_hot(p, rq->clock, sd); + tsk_cache_hot = task_hot(p, rq->clock_task, sd); if (!tsk_cache_hot || sd->nr_balance_failed > sd->cache_nice_tries) { #ifdef CONFIG_SCHEDSTATS @@ -2031,12 +2035,17 @@ struct sd_lb_stats { unsigned long this_load; unsigned long this_load_per_task; unsigned long this_nr_running; + unsigned long this_has_capacity; + unsigned int this_idle_cpus; /* Statistics of the busiest group */ + unsigned int busiest_idle_cpus; unsigned long max_load; unsigned long busiest_load_per_task; unsigned long busiest_nr_running; unsigned long busiest_group_capacity; + unsigned long busiest_has_capacity; + unsigned int busiest_group_weight; int group_imb; /* Is there imbalance in this sd */ #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -2058,7 +2067,10 @@ struct sg_lb_stats { unsigned long sum_nr_running; /* Nr tasks running in the group */ unsigned long sum_weighted_load; /* Weighted load of group's tasks */ unsigned long group_capacity; + unsigned long idle_cpus; + unsigned long group_weight; int group_imb; /* Is there an imbalance in the group ? */ + int group_has_capacity; /* Is there extra capacity in the group? */ }; /** @@ -2268,11 +2280,14 @@ unsigned long scale_rt_power(int cpu) struct rq *rq = cpu_rq(cpu); u64 total, available; - sched_avg_update(rq); - total = sched_avg_period() + (rq->clock - rq->age_stamp); - available = total - rq->rt_avg; + if (unlikely(total < rq->rt_avg)) { + /* Ensures that power won't end up being negative */ + available = 0; + } else { + available = total - rq->rt_avg; + } if (unlikely((s64)total < SCHED_LOAD_SCALE)) total = SCHED_LOAD_SCALE; @@ -2354,7 +2369,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, int local_group, const struct cpumask *cpus, int *balance, struct sg_lb_stats *sgs) { - unsigned long load, max_cpu_load, min_cpu_load; + unsigned long load, max_cpu_load, min_cpu_load, max_nr_running; int i; unsigned int balance_cpu = -1, first_idle_cpu = 0; unsigned long avg_load_per_task = 0; @@ -2365,6 +2380,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, /* Tally up the load of all CPUs in the group */ max_cpu_load = 0; min_cpu_load = ~0UL; + max_nr_running = 0; for_each_cpu_and(i, sched_group_cpus(group), cpus) { struct rq *rq = cpu_rq(i); @@ -2382,8 +2398,10 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, load = target_load(i, load_idx); } else { load = source_load(i, load_idx); - if (load > max_cpu_load) + if (load > max_cpu_load) { max_cpu_load = load; + max_nr_running = rq->nr_running; + } if (min_cpu_load > load) min_cpu_load = load; } @@ -2391,7 +2409,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, sgs->group_load += load; sgs->sum_nr_running += rq->nr_running; sgs->sum_weighted_load += weighted_cpuload(i); - + if (idle_cpu(i)) + sgs->idle_cpus++; } /* @@ -2423,11 +2442,14 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, if (sgs->sum_nr_running) avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; - if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) + if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1) sgs->group_imb = 1; - sgs->group_capacity = - DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); + sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); + sgs->group_weight = group->group_weight; + + if (sgs->group_capacity > sgs->sum_nr_running) + sgs->group_has_capacity = 1; } /** @@ -2474,9 +2496,14 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, /* * In case the child domain prefers tasks go to siblings * first, lower the group capacity to one so that we'll try - * and move all the excess tasks away. + * and move all the excess tasks away. We lower the capacity + * of a group only if the local group has the capacity to fit + * these excess tasks, i.e. nr_running < group_capacity. The + * extra check prevents the case where you always pull from the + * heaviest group when it is already under-utilized (possible + * with a large weight task outweighs the tasks on the system). */ - if (prefer_sibling) + if (prefer_sibling && !local_group && sds->this_has_capacity) sgs.group_capacity = min(sgs.group_capacity, 1UL); if (local_group) { @@ -2484,14 +2511,19 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, sds->this = group; sds->this_nr_running = sgs.sum_nr_running; sds->this_load_per_task = sgs.sum_weighted_load; + sds->this_has_capacity = sgs.group_has_capacity; + sds->this_idle_cpus = sgs.idle_cpus; } else if (sgs.avg_load > sds->max_load && (sgs.sum_nr_running > sgs.group_capacity || sgs.group_imb)) { sds->max_load = sgs.avg_load; sds->busiest = group; sds->busiest_nr_running = sgs.sum_nr_running; + sds->busiest_idle_cpus = sgs.idle_cpus; sds->busiest_group_capacity = sgs.group_capacity; + sds->busiest_group_weight = sgs.group_weight; sds->busiest_load_per_task = sgs.sum_weighted_load; + sds->busiest_has_capacity = sgs.group_has_capacity; sds->group_imb = sgs.group_imb; } @@ -2637,6 +2669,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, return fix_small_imbalance(sds, this_cpu, imbalance); } + /******* find_busiest_group() helpers end here *********************/ /** @@ -2688,6 +2721,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, * 4) This group is more busy than the avg busieness at this * sched_domain. * 5) The imbalance is within the specified limit. + * + * Note: when doing newidle balance, if the local group has excess + * capacity (i.e. nr_running < group_capacity) and the busiest group + * does not have any capacity, we force a load balance to pull tasks + * to the local group. In this case, we skip past checks 3, 4 and 5. */ if (!(*balance)) goto ret; @@ -2695,6 +2733,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (!sds.busiest || sds.busiest_nr_running == 0) goto out_balanced; + /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */ + if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity && + !sds.busiest_has_capacity) + goto force_balance; + if (sds.this_load >= sds.max_load) goto out_balanced; @@ -2703,9 +2746,28 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, if (sds.this_load >= sds.avg_load) goto out_balanced; - if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) - goto out_balanced; + /* + * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative. + * And to check for busy balance use !idle_cpu instead of + * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE + * even when they are idle. + */ + if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) { + if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) + goto out_balanced; + } else { + /* + * This cpu is idle. If the busiest group load doesn't + * have more tasks than the number of available cpu's and + * there is no imbalance between this and busiest group + * wrt to idle cpu's, it is balanced. + */ + if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && + sds.busiest_nr_running <= sds.busiest_group_weight) + goto out_balanced; + } +force_balance: /* Looks like there is an imbalance. Compute it */ calculate_imbalance(&sds, this_cpu, imbalance); return sds.busiest; @@ -2896,7 +2958,14 @@ redo: if (!ld_moved) { schedstat_inc(sd, lb_failed[idle]); - sd->nr_balance_failed++; + /* + * Increment the failure counter only on periodic balance. + * We do not want newidle balance, which can be very + * frequent, pollute the failure counter causing + * excessive cache_hot migrations and active balances. + */ + if (idle != CPU_NEWLY_IDLE) + sd->nr_balance_failed++; if (need_active_balance(sd, sd_idle, idle)) { raw_spin_lock_irqsave(&busiest->lock, flags); @@ -3017,10 +3086,8 @@ static void idle_balance(int this_cpu, struct rq *this_rq) interval = msecs_to_jiffies(sd->balance_interval); if (time_after(next_balance, sd->last_balance + interval)) next_balance = sd->last_balance + interval; - if (pulled_task) { - this_rq->idle_stamp = 0; + if (pulled_task) break; - } } raw_spin_lock(&this_rq->lock); @@ -3542,8 +3609,11 @@ static void task_fork_fair(struct task_struct *p) raw_spin_lock_irqsave(&rq->lock, flags); - if (unlikely(task_cpu(p) != this_cpu)) + if (unlikely(task_cpu(p) != this_cpu)) { + rcu_read_lock(); __set_task_cpu(p, this_cpu); + rcu_read_unlock(); + } update_curr(cfs_rq); @@ -3615,13 +3685,26 @@ static void set_curr_task_fair(struct rq *rq) } #ifdef CONFIG_FAIR_GROUP_SCHED -static void moved_group_fair(struct task_struct *p, int on_rq) +static void task_move_group_fair(struct task_struct *p, int on_rq) { - struct cfs_rq *cfs_rq = task_cfs_rq(p); - - update_curr(cfs_rq); + /* + * If the task was not on the rq at the time of this cgroup movement + * it must have been asleep, sleeping tasks keep their ->vruntime + * absolute on their old rq until wakeup (needed for the fair sleeper + * bonus in place_entity()). + * + * If it was on the rq, we've just 'preempted' it, which does convert + * ->vruntime to a relative base. + * + * Make sure both cases convert their relative position when migrating + * to another cgroup's rq. This does somewhat interfere with the + * fair sleeper stuff for the first placement, but who cares. + */ + if (!on_rq) + p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime; + set_task_rq(p, task_cpu(p)); if (!on_rq) - place_entity(cfs_rq, &p->se, 1); + p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime; } #endif @@ -3673,7 +3756,7 @@ static const struct sched_class fair_sched_class = { .get_rr_interval = get_rr_interval_fair, #ifdef CONFIG_FAIR_GROUP_SCHED - .moved_group = moved_group_fair, + .task_move_group = task_move_group_fair, #endif }; diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 83c66e8..185f920 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -61,3 +61,8 @@ SCHED_FEAT(ASYM_EFF_LOAD, 1) * release the lock. Decreases scheduling overhead. */ SCHED_FEAT(OWNER_SPIN, 1) + +/* + * Decrement CPU power based on irq activity + */ +SCHED_FEAT(NONIRQ_POWER, 1) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8afb953..a851cc0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -609,7 +609,7 @@ static void update_curr_rt(struct rq *rq) if (!task_has_rt_policy(curr)) return; - delta_exec = rq->clock - curr->se.exec_start; + delta_exec = rq->clock_task - curr->se.exec_start; if (unlikely((s64)delta_exec < 0)) delta_exec = 0; @@ -618,7 +618,7 @@ static void update_curr_rt(struct rq *rq) curr->se.sum_exec_runtime += delta_exec; account_group_exec_runtime(curr, delta_exec); - curr->se.exec_start = rq->clock; + curr->se.exec_start = rq->clock_task; cpuacct_charge(curr, delta_exec); sched_rt_avg_update(rq, delta_exec); @@ -960,18 +960,19 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags) * runqueue. Otherwise simply start this RT task * on its current runqueue. * - * We want to avoid overloading runqueues. Even if - * the RT task is of higher priority than the current RT task. - * RT tasks behave differently than other tasks. If - * one gets preempted, we try to push it off to another queue. - * So trying to keep a preempting RT task on the same - * cache hot CPU will force the running RT task to - * a cold CPU. So we waste all the cache for the lower - * RT task in hopes of saving some of a RT task - * that is just being woken and probably will have - * cold cache anyway. + * We want to avoid overloading runqueues. If the woken + * task is a higher priority, then it will stay on this CPU + * and the lower prio task should be moved to another CPU. + * Even though this will probably make the lower prio task + * lose its cache, we do not want to bounce a higher task + * around just because it gave up its CPU, perhaps for a + * lock? + * + * For equal prio tasks, we just let the scheduler sort it out. */ if (unlikely(rt_task(rq->curr)) && + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio) && (p->rt.nr_cpus_allowed > 1)) { int cpu = find_lowest_rq(p); @@ -1074,7 +1075,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq) } while (rt_rq); p = rt_task_of(rt_se); - p->se.exec_start = rq->clock; + p->se.exec_start = rq->clock_task; return p; } @@ -1491,7 +1492,10 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) if (!task_running(rq, p) && !test_tsk_need_resched(rq->curr) && has_pushable_tasks(rq) && - p->rt.nr_cpus_allowed > 1) + p->rt.nr_cpus_allowed > 1 && + rt_task(rq->curr) && + (rq->curr->rt.nr_cpus_allowed < 2 || + rq->curr->prio < p->prio)) push_rt_tasks(rq); } @@ -1712,7 +1716,7 @@ static void set_curr_task_rt(struct rq *rq) { struct task_struct *p = rq->curr; - p->se.exec_start = rq->clock; + p->se.exec_start = rq->clock_task; /* The running task is never eligible for pushing */ dequeue_pushable_task(rq, p); diff --git a/kernel/signal.c b/kernel/signal.c index 906ae5a..fdecae0 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -637,7 +637,7 @@ static inline bool si_fromuser(const struct siginfo *info) /* * Bad permissions for sending the signal - * - the caller must hold at least the RCU read lock + * - the caller must hold the RCU read lock */ static int check_kill_permission(int sig, struct siginfo *info, struct task_struct *t) @@ -1127,11 +1127,14 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long /* * send signal info to all the members of a group - * - the caller must hold the RCU read lock at least */ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { - int ret = check_kill_permission(sig, info, p); + int ret; + + rcu_read_lock(); + ret = check_kill_permission(sig, info, p); + rcu_read_unlock(); if (!ret && sig) ret = do_send_sig_info(sig, info, p, true); @@ -2407,9 +2410,13 @@ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, return -EFAULT; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info.si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info.si_code >= 0 || info.si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info.si_code < 0); return -EPERM; + } info.si_signo = sig; /* POSIX.1b doesn't mention process groups. */ @@ -2423,9 +2430,13 @@ long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) return -EINVAL; /* Not even root can pretend to send signals from the kernel. - Nor can they impersonate a kill(), which adds source info. */ - if (info->si_code >= 0) + * Nor can they impersonate a kill()/tgkill(), which adds source info. + */ + if (info->si_code >= 0 || info->si_code == SI_TKILL) { + /* We used to allow any < 0 si_code */ + WARN_ON_ONCE(info->si_code < 0); return -EPERM; + } info->si_signo = sig; return do_send_specific(tgid, pid, sig, info); diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c..b89ef31 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -194,6 +194,24 @@ void generic_smp_call_function_interrupt(void) list_for_each_entry_rcu(data, &call_function.queue, csd.list) { int refs; + /* + * Since we walk the list without any locks, we might + * see an entry that was completed, removed from the + * list and is in the process of being reused. + * + * We must check that the cpu is in the cpumask before + * checking the refs, and both must be set before + * executing the callback on this cpu. + */ + + if (!cpumask_test_cpu(cpu, data->cpumask)) + continue; + + smp_rmb(); + + if (atomic_read(&data->refs) == 0) + continue; + if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) continue; @@ -409,7 +427,7 @@ void smp_call_function_many(const struct cpumask *mask, { struct call_function_data *data; unsigned long flags; - int cpu, next_cpu, this_cpu = smp_processor_id(); + int refs, cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. @@ -420,7 +438,7 @@ void smp_call_function_many(const struct cpumask *mask, WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); - /* So, what's a CPU they want? Ignoring this one. */ + /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); @@ -443,11 +461,48 @@ void smp_call_function_many(const struct cpumask *mask, data = &__get_cpu_var(cfd_data); csd_lock(&data->csd); + /* This BUG_ON verifies our reuse assertions and can be removed */ + BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask)); + + /* + * The global call function queue list add and delete are protected + * by a lock, but the list is traversed without any lock, relying + * on the rcu list add and delete to allow safe concurrent traversal. + * We reuse the call function data without waiting for any grace + * period after some other cpu removes it from the global queue. + * This means a cpu might find our data block as it is being + * filled out. + * + * We hold off the interrupt handler on the other cpu by + * ordering our writes to the cpu mask vs our setting of the + * refs counter. We assert only the cpu owning the data block + * will set a bit in cpumask, and each bit will only be cleared + * by the subject cpu. Each cpu must first find its bit is + * set and then check that refs is set indicating the element is + * ready to be processed, otherwise it must skip the entry. + * + * On the previous iteration refs was set to 0 by another cpu. + * To avoid the use of transitivity, set the counter to 0 here + * so the wmb will pair with the rmb in the interrupt handler. + */ + atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */ + data->csd.func = func; data->csd.info = info; + + /* Ensure 0 refs is visible before mask. Also orders func and info */ + smp_wmb(); + + /* We rely on the "and" being processed before the store */ cpumask_and(data->cpumask, mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, data->cpumask); - atomic_set(&data->refs, cpumask_weight(data->cpumask)); + refs = cpumask_weight(data->cpumask); + + /* Some callers race with other cpus changing the passed mask */ + if (unlikely(!refs)) { + csd_unlock(&data->csd); + return; + } raw_spin_lock_irqsave(&call_function.lock, flags); /* @@ -456,6 +511,12 @@ void smp_call_function_many(const struct cpumask *mask, * will not miss any other list entries: */ list_add_rcu(&data->csd.list, &call_function.queue); + /* + * We rely on the wmb() in list_add_rcu to complete our writes + * to the cpumask before this write to refs, which indicates + * data is on the list and is ready to be processed. + */ + atomic_set(&data->refs, refs); raw_spin_unlock_irqrestore(&call_function.lock, flags); /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 07b4f1b..79ee8f1 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -77,11 +77,21 @@ void wakeup_softirqd(void) } /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving + * softirq processing. + * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) + * on local_bh_disable or local_bh_enable. + * This lets us distinguish between whether we are currently processing + * softirq and whether we just have bh disabled. + */ + +/* * This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ #ifdef CONFIG_TRACE_IRQFLAGS -static void __local_bh_disable(unsigned long ip) +static void __local_bh_disable(unsigned long ip, unsigned int cnt) { unsigned long flags; @@ -95,32 +105,43 @@ static void __local_bh_disable(unsigned long ip) * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ - preempt_count() += SOFTIRQ_OFFSET; + preempt_count() += cnt; /* * Were softirqs turned off above: */ - if (softirq_count() == SOFTIRQ_OFFSET) + if (softirq_count() == cnt) trace_softirqs_off(ip); raw_local_irq_restore(flags); - if (preempt_count() == SOFTIRQ_OFFSET) + if (preempt_count() == cnt) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); } #else /* !CONFIG_TRACE_IRQFLAGS */ -static inline void __local_bh_disable(unsigned long ip) +static inline void __local_bh_disable(unsigned long ip, unsigned int cnt) { - add_preempt_count(SOFTIRQ_OFFSET); + add_preempt_count(cnt); barrier(); } #endif /* CONFIG_TRACE_IRQFLAGS */ void local_bh_disable(void) { - __local_bh_disable((unsigned long)__builtin_return_address(0)); + __local_bh_disable((unsigned long)__builtin_return_address(0), + SOFTIRQ_DISABLE_OFFSET); } EXPORT_SYMBOL(local_bh_disable); +static void __local_bh_enable(unsigned int cnt) +{ + WARN_ON_ONCE(in_irq()); + WARN_ON_ONCE(!irqs_disabled()); + + if (softirq_count() == cnt) + trace_softirqs_on((unsigned long)__builtin_return_address(0)); + sub_preempt_count(cnt); +} + /* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), @@ -128,12 +149,7 @@ EXPORT_SYMBOL(local_bh_disable); */ void _local_bh_enable(void) { - WARN_ON_ONCE(in_irq()); - WARN_ON_ONCE(!irqs_disabled()); - - if (softirq_count() == SOFTIRQ_OFFSET) - trace_softirqs_on((unsigned long)__builtin_return_address(0)); - sub_preempt_count(SOFTIRQ_OFFSET); + __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); } EXPORT_SYMBOL(_local_bh_enable); @@ -147,13 +163,13 @@ static inline void _local_bh_enable_ip(unsigned long ip) /* * Are softirqs going to be turned on now: */ - if (softirq_count() == SOFTIRQ_OFFSET) + if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) trace_softirqs_on(ip); /* * Keep preemption disabled until we are done with * softirq processing: */ - sub_preempt_count(SOFTIRQ_OFFSET - 1); + sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); @@ -198,7 +214,8 @@ asmlinkage void __do_softirq(void) pending = local_softirq_pending(); account_system_vtime(current); - __local_bh_disable((unsigned long)__builtin_return_address(0)); + __local_bh_disable((unsigned long)__builtin_return_address(0), + SOFTIRQ_OFFSET); lockdep_softirq_enter(); cpu = smp_processor_id(); @@ -245,7 +262,7 @@ restart: lockdep_softirq_exit(); account_system_vtime(current); - _local_bh_enable(); + __local_bh_enable(SOFTIRQ_OFFSET); } #ifndef __ARCH_HAS_DO_SOFTIRQ @@ -279,10 +296,16 @@ void irq_enter(void) rcu_irq_enter(); if (idle_cpu(cpu) && !in_interrupt()) { - __irq_enter(); + /* + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ + local_bh_disable(); tick_check_idle(cpu); - } else - __irq_enter(); + _local_bh_enable(); + } + + __irq_enter(); } #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED @@ -696,6 +719,7 @@ static int run_ksoftirqd(void * __bind_cpu) { set_current_state(TASK_INTERRUPTIBLE); + current->flags |= PF_KSOFTIRQD; while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { diff --git a/kernel/sys.c b/kernel/sys.c index e83ddbb..f4fcc6e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) pgid = pid; if (pgid < 0) return -EINVAL; + rcu_read_lock(); /* From this point forward we keep holding onto the tasklist lock * so that our parent does not change from under us. -DaveM @@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) out: /* All paths lead to here, thus we are safe. -DaveM */ write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); return err; } diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b3bafd5..2030f62 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void) return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; } +/* + * Check whether the broadcast device supports oneshot. + */ +bool tick_broadcast_oneshot_available(void) +{ + struct clock_event_device *bc = tick_broadcast_device.evtdev; + + return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; +} + #endif diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index b6b898d..61e296b 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -51,7 +51,11 @@ int tick_is_oneshot_available(void) { struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; - return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); + if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) + return 0; + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) + return 1; + return tick_broadcast_oneshot_available(); } /* diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 290eefb..f65d3a7 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); extern int tick_broadcast_oneshot_active(void); extern void tick_check_oneshot_broadcast(int cpu); +bool tick_broadcast_oneshot_available(void); # else /* BROADCAST */ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { @@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { } static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline void tick_check_oneshot_broadcast(int cpu) { } +static inline bool tick_broadcast_oneshot_available(void) { return true; } # endif /* !BROADCAST */ #else /* !ONESHOT */ @@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) return 0; } static inline int tick_broadcast_oneshot_active(void) { return 0; } +static inline bool tick_broadcast_oneshot_available(void) { return false; } #endif /* !TICK_ONESHOT */ /* diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 813993b..f898af6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -325,7 +325,7 @@ void tick_nohz_stop_sched_tick(int inidle) } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || - arch_needs_cpu(cpu) || nohz_ratelimit(cpu)) { + arch_needs_cpu(cpu)) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index caf8d4d..b87c22f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -736,6 +736,7 @@ static void timekeeping_adjust(s64 offset) static cycle_t logarithmic_accumulation(cycle_t offset, int shift) { u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; + u64 raw_nsecs; /* If the offset is smaller then a shifted interval, do nothing */ if (offset < timekeeper.cycle_interval<= NSEC_PER_SEC) { - raw_time.tv_nsec -= NSEC_PER_SEC; - raw_time.tv_sec++; + /* Accumulate raw time */ + raw_nsecs = timekeeper.raw_interval << shift; + raw_nsecs += raw_time.tv_nsec; + if (raw_nsecs >= NSEC_PER_SEC) { + u64 raw_secs = raw_nsecs; + raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); + raw_time.tv_sec += raw_secs; } + raw_time.tv_nsec = raw_nsecs; /* Accumulate error between NTP and clock interval */ timekeeper.ntp_error += tick_length << shift; diff --git a/kernel/timer.c b/kernel/timer.c index ee305c8..f7dec45 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1237,6 +1237,12 @@ unsigned long get_next_timer_interrupt(unsigned long now) struct tvec_base *base = __get_cpu_var(tvec_bases); unsigned long expires; + /* + * Pretend that there is no timer pending if the cpu is offline. + * Possible pending timers will be migrated later to an active cpu. + */ + if (cpu_is_offline(smp_processor_id())) + return now + NEXT_TIMER_MAX_DELTA; spin_lock(&base->lock); if (time_before_eq(base->next_timer, base->timer_jiffies)) base->next_timer = __next_timer_interrupt(base); @@ -1302,7 +1308,7 @@ void do_timer(unsigned long ticks) { jiffies_64 += ticks; update_wall_time(); - calc_global_load(); + calc_global_load(ticks); } #ifdef __ARCH_WANT_SYS_ALARM diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6d2cb14..d9aaa9b 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v) { struct ftrace_profile *rec = v; char str[KSYM_SYMBOL_LEN]; + int ret = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER - static DEFINE_MUTEX(mutex); static struct trace_seq s; unsigned long long avg; unsigned long long stddev; #endif + mutex_lock(&ftrace_profile_lock); + + /* we raced with function_profile_reset() */ + if (unlikely(rec->counter == 0)) { + ret = -EBUSY; + goto out; + } kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); seq_printf(m, " %-30.30s %10lu", str, rec->counter); @@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v) do_div(stddev, (rec->counter - 1) * 1000); } - mutex_lock(&mutex); trace_seq_init(&s); trace_print_graph_duration(rec->time, &s); trace_seq_puts(&s, " "); @@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v) trace_seq_puts(&s, " "); trace_print_graph_duration(stddev, &s); trace_print_seq(m, &s); - mutex_unlock(&mutex); #endif seq_putc(m, '\n'); +out: + mutex_unlock(&ftrace_profile_lock); - return 0; + return ret; } static void ftrace_profile_reset(struct ftrace_profile_stat *stat) @@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos) if (*pos > 0) return t_hash_start(m, pos); iter->flags |= FTRACE_ITER_PRINTALL; + /* reset in case of seek/pread */ + iter->flags &= ~FTRACE_ITER_HASH; return iter; } @@ -2410,7 +2419,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_regex_lseek, + .llseek = no_llseek, .release = ftrace_filter_release, }; @@ -3281,7 +3290,7 @@ static int start_graph_tracing(void) /* The cpu_boot init_task->ret_stack will never be freed */ for_each_online_cpu(cpu) { if (!idle_task(cpu)->ret_stack) - ftrace_graph_init_task(idle_task(cpu)); + ftrace_graph_init_idle_task(idle_task(cpu), cpu); } do { @@ -3371,6 +3380,49 @@ void unregister_ftrace_graph(void) mutex_unlock(&ftrace_lock); } +static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); + +static void +graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) +{ + atomic_set(&t->tracing_graph_pause, 0); + atomic_set(&t->trace_overrun, 0); + t->ftrace_timestamp = 0; + /* make curr_ret_stack visable before we add the ret_stack */ + smp_wmb(); + t->ret_stack = ret_stack; +} + +/* + * Allocate a return stack for the idle task. May be the first + * time through, or it may be done by CPU hotplug online. + */ +void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) +{ + t->curr_ret_stack = -1; + /* + * The idle task has no parent, it either has its own + * stack or no stack at all. + */ + if (t->ret_stack) + WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); + + if (ftrace_graph_active) { + struct ftrace_ret_stack *ret_stack; + + ret_stack = per_cpu(idle_ret_stack, cpu); + if (!ret_stack) { + ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH + * sizeof(struct ftrace_ret_stack), + GFP_KERNEL); + if (!ret_stack) + return; + per_cpu(idle_ret_stack, cpu) = ret_stack; + } + graph_init_task(t, ret_stack); + } +} + /* Allocate a return stack for newly created task */ void ftrace_graph_init_task(struct task_struct *t) { @@ -3386,12 +3438,7 @@ void ftrace_graph_init_task(struct task_struct *t) GFP_KERNEL); if (!ret_stack) return; - atomic_set(&t->tracing_graph_pause, 0); - atomic_set(&t->trace_overrun, 0); - t->ftrace_timestamp = 0; - /* make curr_ret_stack visable before we add the ret_stack */ - smp_wmb(); - t->ret_stack = ret_stack; + graph_init_task(t, ret_stack); } } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1da7b6e..6cd7334 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -405,7 +405,7 @@ static inline int test_time_stamp(u64 delta) #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) /* Max number of timestamps that can fit on a page */ -#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_STAMP) +#define RB_TIMESTAMPS_PER_PAGE (BUF_PAGE_SIZE / RB_LEN_TIME_EXTEND) int ring_buffer_print_page_header(struct trace_seq *s) { @@ -3868,6 +3868,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, rpos = reader->read; pos += size; + if (rpos >= commit) + break; + event = rb_reader_event(cpu_buffer); size = rb_event_length(event); } while (len > size); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 086d363..755d3ee 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2382,11 +2382,19 @@ tracing_write_stub(struct file *filp, const char __user *ubuf, return count; } +static loff_t tracing_seek(struct file *file, loff_t offset, int origin) +{ + if (file->f_mode & FMODE_READ) + return seq_lseek(file, offset, origin); + else + return 0; +} + static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .write = tracing_write_stub, - .llseek = seq_lseek, + .llseek = tracing_seek, .release = tracing_release, }; diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 79f4bac..b4c179a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter, * if the output fails. */ data->ent = *curr; - data->ret = *next; + /* + * If the next event is not a return type, then + * we only care about what type it is. Otherwise we can + * safely copy the entire event. + */ + if (next->ent.type == TRACE_GRAPH_RET) + data->ret = *next; + else + data->ret.ent.type = next->ent.type; } } diff --git a/kernel/user.c b/kernel/user.c index 7e72614..5c598ca 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -91,6 +91,7 @@ static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) * upon function exit. */ static void free_user(struct user_struct *up, unsigned long flags) + __releases(&uidhash_lock) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); @@ -157,6 +158,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { + put_user_ns(ns); key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index b2d70d3..2591583 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -9,6 +9,7 @@ #include #include #include +#include #include /* @@ -82,3 +83,46 @@ void free_user_ns(struct kref *kref) schedule_work(&ns->destroyer); } EXPORT_SYMBOL(free_user_ns); + +uid_t user_ns_map_uid(struct user_namespace *to, const struct cred *cred, uid_t uid) +{ + struct user_namespace *tmp; + + if (likely(to == cred->user->user_ns)) + return uid; + + + /* Is cred->user the creator of the target user_ns + * or the creator of one of it's parents? + */ + for ( tmp = to; tmp != &init_user_ns; + tmp = tmp->creator->user_ns ) { + if (cred->user == tmp->creator) { + return (uid_t)0; + } + } + + /* No useful relationship so no mapping */ + return overflowuid; +} + +gid_t user_ns_map_gid(struct user_namespace *to, const struct cred *cred, gid_t gid) +{ + struct user_namespace *tmp; + + if (likely(to == cred->user->user_ns)) + return gid; + + /* Is cred->user the creator of the target user_ns + * or the creator of one of it's parents? + */ + for ( tmp = to; tmp != &init_user_ns; + tmp = tmp->creator->user_ns ) { + if (cred->user == tmp->creator) { + return (gid_t)0; + } + } + + /* No useful relationship so no mapping */ + return overflowgid; +} diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb79..a7616fa 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, * element comparison is needed, so the client's cmp() * routine can invoke cond_resched() periodically. */ - (*cmp)(priv, tail, tail); + (*cmp)(priv, tail->next, tail->next); tail->next->prev = tail; tail = tail->next; diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index aeaa6d7..9d94212 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -76,6 +76,7 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, if (!fbc->counters) return -ENOMEM; #ifdef CONFIG_HOTPLUG_CPU + INIT_LIST_HEAD(&fbc->list); mutex_lock(&percpu_counters_lock); list_add(&fbc->list, &percpu_counters); mutex_unlock(&percpu_counters_lock); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 05da38b..5f0ed4b 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -82,6 +82,16 @@ struct radix_tree_preload { }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static inline void *ptr_to_indirect(void *ptr) +{ + return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR); +} + +static inline void *indirect_to_ptr(void *ptr) +{ + return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); +} + static inline gfp_t root_gfp_mask(struct radix_tree_root *root) { return root->gfp_mask & __GFP_BITS_MASK; @@ -263,7 +273,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) return -ENOMEM; /* Increase the height. */ - node->slots[0] = radix_tree_indirect_to_ptr(root->rnode); + node->slots[0] = indirect_to_ptr(root->rnode); /* Propagate the aggregated tag info into the new root */ for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { @@ -274,7 +284,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) newheight = root->height+1; node->height = newheight; node->count = 1; - node = radix_tree_ptr_to_indirect(node); + node = ptr_to_indirect(node); rcu_assign_pointer(root->rnode, node); root->height = newheight; } while (height > root->height); @@ -307,7 +317,7 @@ int radix_tree_insert(struct radix_tree_root *root, return error; } - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); height = root->height; shift = (height-1) * RADIX_TREE_MAP_SHIFT; @@ -323,8 +333,7 @@ int radix_tree_insert(struct radix_tree_root *root, rcu_assign_pointer(node->slots[offset], slot); node->count++; } else - rcu_assign_pointer(root->rnode, - radix_tree_ptr_to_indirect(slot)); + rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); } /* Go a level down */ @@ -372,7 +381,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, return NULL; return is_slot ? (void *)&root->rnode : node; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -391,7 +400,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, height--; } while (height > 0); - return is_slot ? (void *)slot:node; + return is_slot ? (void *)slot : indirect_to_ptr(node); } /** @@ -453,7 +462,7 @@ void *radix_tree_tag_set(struct radix_tree_root *root, height = root->height; BUG_ON(index > radix_tree_maxindex(height)); - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; while (height > 0) { @@ -507,7 +516,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; - slot = radix_tree_indirect_to_ptr(root->rnode); + slot = indirect_to_ptr(root->rnode); while (height > 0) { int offset; @@ -577,7 +586,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (!radix_tree_is_indirect_ptr(node)) return (index == 0); - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); height = node->height; if (index > radix_tree_maxindex(height)) @@ -767,7 +776,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, results[0] = node; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -835,7 +844,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, results[0] = (void **)&root->rnode; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -960,7 +969,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, results[0] = node; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -979,7 +988,8 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference_raw(slot); + results[ret + nr_found] = + indirect_to_ptr(rcu_dereference_raw(slot)); nr_found++; } ret += nr_found; @@ -1029,7 +1039,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, results[0] = (void **)&root->rnode; return 1; } - node = radix_tree_indirect_to_ptr(node); + node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); @@ -1065,7 +1075,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) void *newptr; BUG_ON(!radix_tree_is_indirect_ptr(to_free)); - to_free = radix_tree_indirect_to_ptr(to_free); + to_free = indirect_to_ptr(to_free); /* * The candidate node has more than one child, or its child @@ -1078,16 +1088,39 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) /* * We don't need rcu_assign_pointer(), since we are simply - * moving the node from one part of the tree to another. If - * it was safe to dereference the old pointer to it + * moving the node from one part of the tree to another: if it + * was safe to dereference the old pointer to it * (to_free->slots[0]), it will be safe to dereference the new - * one (root->rnode). + * one (root->rnode) as far as dependent read barriers go. */ newptr = to_free->slots[0]; if (root->height > 1) - newptr = radix_tree_ptr_to_indirect(newptr); + newptr = ptr_to_indirect(newptr); root->rnode = newptr; root->height--; + + /* + * We have a dilemma here. The node's slot[0] must not be + * NULLed in case there are concurrent lookups expecting to + * find the item. However if this was a bottom-level node, + * then it may be subject to the slot pointer being visible + * to callers dereferencing it. If item corresponding to + * slot[0] is subsequently deleted, these callers would expect + * their slot to become empty sooner or later. + * + * For example, lockless pagecache will look up a slot, deref + * the page pointer, and if the page is 0 refcount it means it + * was concurrently deleted from pagecache so try the deref + * again. Fortunately there is already a requirement for logic + * to retry the entire slot lookup -- the indirect pointer + * problem (replacing direct root node with an indirect pointer + * also results in a stale slot). So tag the slot as indirect + * to force callers to retry. + */ + if (root->height == 0) + *((unsigned long *)&to_free->slots[0]) |= + RADIX_TREE_INDIRECT_PTR; + radix_tree_node_free(to_free); } } @@ -1124,7 +1157,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) root->rnode = NULL; goto out; } - slot = radix_tree_indirect_to_ptr(slot); + slot = indirect_to_ptr(slot); shift = (height - 1) * RADIX_TREE_MAP_SHIFT; pathp->node = NULL; @@ -1166,8 +1199,7 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) radix_tree_node_free(to_free); if (pathp->node->count) { - if (pathp->node == - radix_tree_indirect_to_ptr(root->rnode)) + if (pathp->node == indirect_to_ptr(root->rnode)) radix_tree_shrink(root); goto out; } diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 123bcef..c0f7157 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -29,6 +29,7 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info); struct backing_dev_info noop_backing_dev_info = { .name = "noop", + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; EXPORT_SYMBOL_GPL(noop_backing_dev_info); @@ -254,6 +255,7 @@ static int __init default_bdi_init(void) err = bdi_init(&default_backing_dev_info); if (!err) bdi_register(&default_backing_dev_info, NULL, "default"); + err = bdi_init(&noop_backing_dev_info); return err; } diff --git a/mm/bounce.c b/mm/bounce.c index 13b6dad..1481de6 100644 --- a/mm/bounce.c +++ b/mm/bounce.c @@ -116,8 +116,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) */ vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; - flush_dcache_page(tovec->bv_page); bounce_copy_vec(tovec, vfrom); + flush_dcache_page(tovec->bv_page); } } diff --git a/mm/compaction.c b/mm/compaction.c index 94cce51..4d709ee 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -214,15 +214,16 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) /* Similar to reclaim, but different enough that they don't share logic */ static bool too_many_isolated(struct zone *zone) { - - unsigned long inactive, isolated; + unsigned long active, inactive, isolated; inactive = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_ANON); + active = zone_page_state(zone, NR_ACTIVE_FILE) + + zone_page_state(zone, NR_ACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_FILE) + zone_page_state(zone, NR_ISOLATED_ANON); - return isolated > inactive; + return isolated > (inactive + active) / 2; } /* diff --git a/mm/filemap.c b/mm/filemap.c index 20e5642..183d2d4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -631,7 +631,9 @@ repeat: pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); if (pagep) { page = radix_tree_deref_slot(pagep); - if (unlikely(!page || page == RADIX_TREE_RETRY)) + if (unlikely(!page)) + goto out; + if (radix_tree_deref_retry(page)) goto repeat; if (!page_cache_get_speculative(page)) @@ -647,6 +649,7 @@ repeat: goto repeat; } } +out: rcu_read_unlock(); return page; @@ -764,12 +767,11 @@ repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; - /* - * this can only trigger if nr_found == 1, making livelock - * a non issue. - */ - if (unlikely(page == RADIX_TREE_RETRY)) + if (radix_tree_deref_retry(page)) { + if (ret) + start = pages[ret-1]->index; goto restart; + } if (!page_cache_get_speculative(page)) goto repeat; @@ -817,11 +819,7 @@ repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; - /* - * this can only trigger if nr_found == 1, making livelock - * a non issue. - */ - if (unlikely(page == RADIX_TREE_RETRY)) + if (radix_tree_deref_retry(page)) goto restart; if (page->mapping == NULL || page->index != index) @@ -874,11 +872,7 @@ repeat: page = radix_tree_deref_slot((void **)pages[i]); if (unlikely(!page)) continue; - /* - * this can only trigger if nr_found == 1, making livelock - * a non issue. - */ - if (unlikely(page == RADIX_TREE_RETRY)) + if (radix_tree_deref_retry(page)) goto restart; if (!page_cache_get_speculative(page)) @@ -1016,6 +1010,9 @@ find_page: goto page_not_up_to_date; if (!trylock_page(page)) goto page_not_up_to_date; + /* Did it get truncated before we got the lock? */ + if (!page->mapping) + goto page_not_up_to_date_locked; if (!mapping->a_ops->is_partially_uptodate(page, desc, offset)) goto page_not_up_to_date_locked; diff --git a/mm/internal.h b/mm/internal.h index 6a697bb..dedb0af 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -62,7 +62,7 @@ extern bool is_free_buddy_page(struct page *page); */ static inline unsigned long page_order(struct page *page) { - VM_BUG_ON(!PageBuddy(page)); + /* PageBuddy() must be checked by the caller */ return page_private(page); } diff --git a/mm/ksm.c b/mm/ksm.c index 6c3e99b..79cd773 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -731,7 +731,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (!ptep) goto out; - if (pte_write(*ptep)) { + if (pte_write(*ptep) || pte_dirty(*ptep)) { pte_t entry; swapped = PageSwapCache(page); @@ -754,7 +754,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, set_pte_at(mm, addr, ptep, entry); goto out_unlock; } - entry = pte_wrprotect(entry); + if (pte_dirty(entry)) + set_page_dirty(page); + entry = pte_mkclean(pte_wrprotect(entry)); set_pte_at_notify(mm, addr, ptep, entry); } *orig_pte = *ptep; @@ -1523,8 +1525,6 @@ struct page *ksm_does_need_to_copy(struct page *page, { struct page *new_page; - unlock_page(page); /* any racers will COW it, not modify it */ - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); if (new_page) { copy_user_highpage(new_page, page, address, vma); @@ -1540,7 +1540,6 @@ struct page *ksm_does_need_to_copy(struct page *page, add_page_to_unevictable_list(new_page); } - page_cache_release(page); return new_page; } diff --git a/mm/memory.c b/mm/memory.c index bde42c6..7550758 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2626,7 +2626,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned int flags, pte_t orig_pte) { spinlock_t *ptl; - struct page *page; + struct page *page, *swapcache = NULL; swp_entry_t entry; pte_t pte; struct mem_cgroup *ptr = NULL; @@ -2681,10 +2681,25 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, lock_page(page); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); - page = ksm_might_need_to_copy(page, vma, address); - if (!page) { - ret = VM_FAULT_OOM; - goto out; + /* + * Make sure try_to_free_swap or reuse_swap_page or swapoff did not + * release the swapcache from under us. The page pin, and pte_same + * test below, are not enough to exclude that. Even if it is still + * swapcache, we need to check that the page's swap has not changed. + */ + if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val)) + goto out_page; + + if (ksm_might_need_to_copy(page, vma, address)) { + swapcache = page; + page = ksm_does_need_to_copy(page, vma, address); + + if (unlikely(!page)) { + ret = VM_FAULT_OOM; + page = swapcache; + swapcache = NULL; + goto out_page; + } } if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) { @@ -2735,6 +2750,18 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); unlock_page(page); + if (swapcache) { + /* + * Hold the lock to avoid the swap entry to be reused + * until we take the PT lock for the pte_same() check + * (to avoid false positives from pte_same). For + * further safety release the lock after the swap_free + * so that the swap count won't change under a + * parallel locked swapcache. + */ + unlock_page(swapcache); + page_cache_release(swapcache); + } if (flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); @@ -2756,10 +2783,48 @@ out_page: unlock_page(page); out_release: page_cache_release(page); + if (swapcache) { + unlock_page(swapcache); + page_cache_release(swapcache); + } return ret; } /* + * This is like a special single-page "expand_{down|up}wards()", + * except we must first make sure that 'address{-|+}PAGE_SIZE' + * doesn't hit another vma. + */ +static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +{ + address &= PAGE_MASK; + if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { + struct vm_area_struct *prev = vma->vm_prev; + + /* + * Is there a mapping abutting this one below? + * + * That's only ok if it's the same stack mapping + * that has gotten split.. + */ + if (prev && prev->vm_end == address) + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; + + expand_stack(vma, address - PAGE_SIZE); + } + if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { + struct vm_area_struct *next = vma->vm_next; + + /* As VM_GROWSDOWN but s/below/above/ */ + if (next && next->vm_start == address + PAGE_SIZE) + return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; + + expand_upwards(vma, address + PAGE_SIZE); + } + return 0; +} + +/* * We enter with non-exclusive mmap_sem (to exclude vma changes, * but allow concurrent faults), and pte mapped but not yet locked. * We return with mmap_sem still held, but pte unmapped and unlocked. @@ -2772,19 +2837,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl; pte_t entry; + pte_unmap(page_table); + + /* Check if we need to add a guard page to the stack */ + if (check_stack_guard_page(vma, address) < 0) + return VM_FAULT_SIGBUS; + + /* Use the zero-page for reads */ if (!(flags & FAULT_FLAG_WRITE)) { entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), vma->vm_page_prot)); - ptl = pte_lockptr(mm, pmd); - spin_lock(ptl); + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (!pte_none(*page_table)) goto unlock; goto setpte; } /* Allocate our own private page. */ - pte_unmap(page_table); - if (unlikely(anon_vma_prepare(vma))) goto oom; page = alloc_zeroed_user_highpage_movable(vma, address); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index a4cfcdc..6345dfe 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -584,19 +584,19 @@ static inline int pageblock_free(struct page *page) /* Return the start of the next active pageblock after a given page */ static struct page *next_active_pageblock(struct page *page) { - int pageblocks_stride; - /* Ensure the starting page is pageblock-aligned */ BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); - /* Move forward by at least 1 * pageblock_nr_pages */ - pageblocks_stride = 1; - /* If the entire pageblock is free, move to the end of free page */ - if (pageblock_free(page)) - pageblocks_stride += page_order(page) - pageblock_order; + if (pageblock_free(page)) { + int order; + /* be careful. we don't have locks, page_order can be changed.*/ + order = page_order(page); + if ((order < MAX_ORDER) && (order >= pageblock_order)) + return page + (1 << order); + } - return page + (pageblocks_stride * pageblock_nr_pages); + return page + pageblock_nr_pages; } /* Checks if this range of memory is likely to be hot-removable. */ @@ -659,7 +659,7 @@ static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) * Scanning pfn is much easier than scanning lru list. * Scan pfn from start to end and Find LRU page. */ -int scan_lru_pages(unsigned long start, unsigned long end) +unsigned long scan_lru_pages(unsigned long start, unsigned long end) { unsigned long pfn; struct page *page; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5bc0a96..407cda2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1576,7 +1576,7 @@ unsigned slab_node(struct mempolicy *policy) (void)first_zones_zonelist(zonelist, highest_zoneidx, &policy->v.nodes, &zone); - return zone->node; + return zone ? zone->node : numa_node_id(); } default: diff --git a/mm/mlock.c b/mm/mlock.c index 3f82720..b70919c 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -135,6 +135,13 @@ void munlock_vma_page(struct page *page) } } +static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +{ + return (vma->vm_flags & VM_GROWSDOWN) && + (vma->vm_start == addr) && + !vma_stack_continue(vma->vm_prev, addr); +} + /** * __mlock_vma_pages_range() - mlock a range of pages in the vma. * @vma: target vma @@ -167,6 +174,12 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, if (vma->vm_flags & VM_WRITE) gup_flags |= FOLL_WRITE; + /* We don't try to access the guard page of a stack vma */ + if (stack_guard_page(vma, start)) { + addr += PAGE_SIZE; + nr_pages--; + } + while (nr_pages > 0) { int i; diff --git a/mm/mmap.c b/mm/mmap.c index 456ec6f..073b39d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -388,17 +388,23 @@ static inline void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent) { + struct vm_area_struct *next; + + vma->vm_prev = prev; if (prev) { - vma->vm_next = prev->vm_next; + next = prev->vm_next; prev->vm_next = vma; } else { mm->mmap = vma; if (rb_parent) - vma->vm_next = rb_entry(rb_parent, + next = rb_entry(rb_parent, struct vm_area_struct, vm_rb); else - vma->vm_next = NULL; + next = NULL; } + vma->vm_next = next; + if (next) + next->vm_prev = vma; } void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, @@ -485,7 +491,11 @@ static inline void __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) { - prev->vm_next = vma->vm_next; + struct vm_area_struct *next = vma->vm_next; + + prev->vm_next = next; + if (next) + next->vm_prev = prev; rb_erase(&vma->vm_rb, &mm->mm_rb); if (mm->mmap_cache == vma) mm->mmap_cache = prev; @@ -1694,9 +1704,6 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ -#ifndef CONFIG_IA64 -static -#endif int expand_upwards(struct vm_area_struct *vma, unsigned long address) { int error; @@ -1900,6 +1907,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); + vma->vm_prev = NULL; do { rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; @@ -1907,6 +1915,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, vma = vma->vm_next; } while (vma && vma->vm_start < end); *insertion_point = vma; + if (vma) + vma->vm_prev = prev; tail_vma->vm_next = NULL; if (mm->unmap_area == arch_unmap_area) addr = prev ? prev->vm_end : mm->mmap_base; @@ -1984,6 +1994,7 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma, removed_exe_file_vma(mm); fput(new->vm_file); } + unlink_anon_vmas(new); out_free_mpol: mpol_put(pol); out_free_vma: @@ -2433,6 +2444,7 @@ int install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) { + int ret; struct vm_area_struct *vma; vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); @@ -2450,16 +2462,23 @@ int install_special_mapping(struct mm_struct *mm, vma->vm_ops = &special_mapping_vmops; vma->vm_private_data = pages; - if (unlikely(insert_vm_struct(mm, vma))) { - kmem_cache_free(vm_area_cachep, vma); - return -ENOMEM; - } + ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1); + if (ret) + goto out; + + ret = insert_vm_struct(mm, vma); + if (ret) + goto out; mm->total_vm += len >> PAGE_SHIFT; perf_event_mmap(vma); return 0; + +out: + kmem_cache_free(vm_area_cachep, vma); + return ret; } static DEFINE_MUTEX(mm_all_locks_mutex); diff --git a/mm/mprotect.c b/mm/mprotect.c index 2d1bf7c..4c51338 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -211,6 +211,7 @@ success: mmu_notifier_invalidate_range_end(mm, start, end); vm_stat_account(mm, oldflags, vma->vm_file, -nrpages); vm_stat_account(mm, newflags, vma->vm_file, nrpages); + perf_event_mmap(vma); return 0; fail: @@ -299,7 +300,6 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); if (error) goto out; - perf_event_mmap(vma); nstart = tmp; if (nstart < prev->vm_end) diff --git a/mm/mremap.c b/mm/mremap.c index cde56ee..97de5ae 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -91,9 +91,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, */ mapping = vma->vm_file->f_mapping; spin_lock(&mapping->i_mmap_lock); - if (new_vma->vm_truncate_count && - new_vma->vm_truncate_count != vma->vm_truncate_count) - new_vma->vm_truncate_count = 0; + new_vma->vm_truncate_count = 0; } /* @@ -276,9 +274,16 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (old_len > vma->vm_end - addr) goto Efault; - if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) { - if (new_len > old_len) + /* Need to be careful about a growing mapping */ + if (new_len > old_len) { + unsigned long pgoff; + + if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) goto Efault; + pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; + pgoff += vma->vm_pgoff; + if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) + goto Einval; } if (vma->vm_flags & VM_LOCKED) { diff --git a/mm/nommu.c b/mm/nommu.c index b76f3ee..3e18548 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -609,7 +609,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) */ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { - struct vm_area_struct *pvma, **pp; + struct vm_area_struct *pvma, **pp, *next; struct address_space *mapping; struct rb_node **p, *parent; @@ -669,8 +669,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) break; } - vma->vm_next = *pp; + next = *pp; *pp = vma; + vma->vm_next = next; + if (next) + next->vm_prev = vma; } /* @@ -1670,6 +1673,7 @@ void exit_mmap(struct mm_struct *mm) mm->mmap = vma->vm_next; delete_vma_from_mm(vma); delete_vma(mm, vma); + cond_resched(); } kleave(""); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 37498ef..582cba1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -949,22 +949,16 @@ continue_unlock: } } - if (wbc->nr_to_write > 0) { - if (--wbc->nr_to_write == 0 && - wbc->sync_mode == WB_SYNC_NONE) { - /* - * We stop writing back only if we are - * not doing integrity sync. In case of - * integrity sync we have to keep going - * because someone may be concurrently - * dirtying pages, and we might have - * synced a lot of newly appeared dirty - * pages, but have not synced all of the - * old dirty pages. - */ - done = 1; - break; - } + /* + * We stop writing back only if we are not doing + * integrity sync. In case of integrity sync we have to + * keep going until we have written all the pages + * we tagged for writeback prior to entering this loop. + */ + if (--wbc->nr_to_write <= 0 && + wbc->sync_mode == WB_SYNC_NONE) { + done = 1; + break; } } pagevec_release(&pvec); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9bd339e..68404aa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -103,19 +103,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; * only be modified with pm_mutex held, unless the suspend/hibernate code is * guaranteed not to run in parallel with that modification). */ -void set_gfp_allowed_mask(gfp_t mask) + +static gfp_t saved_gfp_mask; + +void pm_restore_gfp_mask(void) { WARN_ON(!mutex_is_locked(&pm_mutex)); - gfp_allowed_mask = mask; + if (saved_gfp_mask) { + gfp_allowed_mask = saved_gfp_mask; + saved_gfp_mask = 0; + } } -gfp_t clear_gfp_allowed_mask(gfp_t mask) +void pm_restrict_gfp_mask(void) { - gfp_t ret = gfp_allowed_mask; - WARN_ON(!mutex_is_locked(&pm_mutex)); - gfp_allowed_mask &= ~mask; - return ret; + WARN_ON(saved_gfp_mask); + saved_gfp_mask = gfp_allowed_mask; + gfp_allowed_mask &= ~GFP_IOFS; } #endif /* CONFIG_PM_SLEEP */ @@ -530,7 +535,7 @@ static inline void __free_one_page(struct page *page, * so it's less likely to be used soon and more likely to be merged * as a higher order page */ - if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) { + if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { struct page *higher_page, *higher_buddy; combined_idx = __find_combined_index(page_idx, order); higher_page = page + combined_idx - page_idx; @@ -588,13 +593,13 @@ static void free_pcppages_bulk(struct zone *zone, int count, { int migratetype = 0; int batch_free = 0; + int to_free = count; spin_lock(&zone->lock); zone->all_unreclaimable = 0; zone->pages_scanned = 0; - __mod_zone_page_state(zone, NR_FREE_PAGES, count); - while (count) { + while (to_free) { struct page *page; struct list_head *list; @@ -619,8 +624,9 @@ static void free_pcppages_bulk(struct zone *zone, int count, /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ __free_one_page(page, zone, 0, page_private(page)); trace_mm_page_pcpu_drain(page, 0, page_private(page)); - } while (--count && --batch_free && !list_empty(list)); + } while (--to_free && --batch_free && !list_empty(list)); } + __mod_zone_page_state(zone, NR_FREE_PAGES, count); spin_unlock(&zone->lock); } @@ -631,8 +637,8 @@ static void free_one_page(struct zone *zone, struct page *page, int order, zone->all_unreclaimable = 0; zone->pages_scanned = 0; - __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); __free_one_page(page, zone, order, migratetype); + __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); spin_unlock(&zone->lock); } @@ -1453,24 +1459,24 @@ static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) #endif /* CONFIG_FAIL_PAGE_ALLOC */ /* - * Return 1 if free pages are above 'mark'. This takes into account the order + * Return true if free pages are above 'mark'. This takes into account the order * of the allocation. */ -int zone_watermark_ok(struct zone *z, int order, unsigned long mark, - int classzone_idx, int alloc_flags) +static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags, long free_pages) { /* free_pages my go negative - that's OK */ long min = mark; - long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; int o; + free_pages -= (1 << order) + 1; if (alloc_flags & ALLOC_HIGH) min -= min / 2; if (alloc_flags & ALLOC_HARDER) min -= min / 4; if (free_pages <= min + z->lowmem_reserve[classzone_idx]) - return 0; + return false; for (o = 0; o < order; o++) { /* At the next order, this order's pages become unavailable */ free_pages -= z->free_area[o].nr_free << o; @@ -1479,9 +1485,28 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark, min >>= 1; if (free_pages <= min) - return 0; + return false; } - return 1; + return true; +} + +bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags) +{ + return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, + zone_page_state(z, NR_FREE_PAGES)); +} + +bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, + int classzone_idx, int alloc_flags) +{ + long free_pages = zone_page_state(z, NR_FREE_PAGES); + + if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) + free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); + + return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, + free_pages); } #ifdef CONFIG_NUMA @@ -1843,6 +1868,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, struct page *page = NULL; struct reclaim_state reclaim_state; struct task_struct *p = current; + bool drained = false; cond_resched(); @@ -1861,14 +1887,25 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, cond_resched(); - if (order != 0) - drain_all_pages(); + if (unlikely(!(*did_some_progress))) + return NULL; - if (likely(*did_some_progress)) - page = get_page_from_freelist(gfp_mask, nodemask, order, +retry: + page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags, preferred_zone, migratetype); + + /* + * If an allocation failed after direct reclaim, it could be because + * pages are pinned on the per-cpu lists. Drain them and try again + */ + if (!page && !drained) { + drain_all_pages(); + drained = true; + goto retry; + } + return page; } diff --git a/mm/percpu.c b/mm/percpu.c index 6470e77..0a09fff 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1413,9 +1413,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, if (pcpu_first_unit_cpu == NR_CPUS) pcpu_first_unit_cpu = cpu; + pcpu_last_unit_cpu = cpu; } } - pcpu_last_unit_cpu = cpu; pcpu_nr_units = unit; for_each_possible_cpu(cpu) diff --git a/mm/shmem.c b/mm/shmem.c index f65f840..676eed3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2756,5 +2756,6 @@ int shmem_zero_setup(struct vm_area_struct *vma) fput(vma->vm_file); vma->vm_file = file; vma->vm_ops = &shmem_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } diff --git a/mm/slab.c b/mm/slab.c index e49f8f4..91e691b 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2289,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, if (ralign < align) { ralign = align; } - /* disable debug if not aligning with REDZONE_ALIGN */ - if (ralign & (__alignof__(unsigned long long) - 1)) + /* disable debug if necessary */ + if (ralign > __alignof__(unsigned long long)) flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); /* * 4) Store it. @@ -2316,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, */ if (flags & SLAB_RED_ZONE) { /* add space for red zone words */ - cachep->obj_offset += align; - size += align + sizeof(unsigned long long); + cachep->obj_offset += sizeof(unsigned long long); + size += 2 * sizeof(unsigned long long); } if (flags & SLAB_STORE_USER) { /* user store requires one word storage behind the end of @@ -2331,8 +2331,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, } #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) if (size >= malloc_sizes[INDEX_L3 + 1].cs_size - && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { - cachep->obj_offset += PAGE_SIZE - size; + && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { + cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); size = PAGE_SIZE; } #endif diff --git a/mm/swapfile.c b/mm/swapfile.c index 03aa2d5..7c703ff 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -139,8 +139,7 @@ static int discard_swap(struct swap_info_struct *si) nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_KERNEL, - BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); + nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); if (err) return err; cond_resched(); @@ -151,8 +150,7 @@ static int discard_swap(struct swap_info_struct *si) nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_KERNEL, - BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); + nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); if (err) break; @@ -191,8 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, - nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT | - BLKDEV_IFL_BARRIER)) + nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) break; } @@ -686,6 +683,24 @@ int try_to_free_swap(struct page *page) if (page_swapcount(page)) return 0; + /* + * Once hibernation has begun to create its image of memory, + * there's a danger that one of the calls to try_to_free_swap() + * - most probably a call from __try_to_reclaim_swap() while + * hibernation is allocating its own swap pages for the image, + * but conceivably even a call from memory reclaim - will free + * the swap from a page which has already been recorded in the + * image as a clean swapcache page, and then reuse its swap for + * another page of the image. On waking from hibernation, the + * original page might be freed under memory pressure, then + * later read back in from swap, now with the wrong data. + * + * Hibernation clears bits from gfp_allowed_mask to prevent + * memory reclaim from writing to disk, so check that here. + */ + if (!(gfp_allowed_mask & __GFP_IO)) + return 0; + delete_from_swap_cache(page); SetPageDirty(page); return 1; @@ -2032,7 +2047,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->flags |= SWP_SOLIDSTATE; p->cluster_next = 1 + (random32() % p->highest_bit); } - if (discard_swap(p) == 0) + if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) p->flags |= SWP_DISCARDABLE; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ae00746..20a402c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -513,6 +513,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); static void purge_fragmented_blocks_allcpus(void); /* + * called before a call to iounmap() if the caller wants vm_area_struct's + * immediately freed. + */ +void set_iounmap_nonlazy(void) +{ + atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); +} + +/* * Purges all lazily-freed vmap areas. * * If sync is 0 then don't purge if there is already a purge in progress. diff --git a/mm/vmscan.c b/mm/vmscan.c index b94fe1b..22e5676 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1726,13 +1726,12 @@ static void shrink_zone(int priority, struct zone *zone, * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static bool shrink_zones(int priority, struct zonelist *zonelist, +static void shrink_zones(int priority, struct zonelist *zonelist, struct scan_control *sc) { enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); struct zoneref *z; struct zone *zone; - bool all_unreclaimable = true; for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, sc->nodemask) { @@ -1759,8 +1758,38 @@ static bool shrink_zones(int priority, struct zonelist *zonelist, } shrink_zone(priority, zone, sc); - all_unreclaimable = false; } +} + +static bool zone_reclaimable(struct zone *zone) +{ + return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; +} + +/* + * As hibernation is going on, kswapd is freezed so that it can't mark + * the zone into all_unreclaimable. It can't handle OOM during hibernation. + * So let's check zone's unreclaimable in direct reclaim as well as kswapd. + */ +static bool all_unreclaimable(struct zonelist *zonelist, + struct scan_control *sc) +{ + struct zoneref *z; + struct zone *zone; + bool all_unreclaimable = true; + + for_each_zone_zonelist_nodemask(zone, z, zonelist, + gfp_zone(sc->gfp_mask), sc->nodemask) { + if (!populated_zone(zone)) + continue; + if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) + continue; + if (zone_reclaimable(zone)) { + all_unreclaimable = false; + break; + } + } + return all_unreclaimable; } @@ -1784,7 +1813,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, struct scan_control *sc) { int priority; - bool all_unreclaimable; unsigned long total_scanned = 0; struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long lru_pages = 0; @@ -1815,7 +1843,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, sc->nr_scanned = 0; if (!priority) disable_swap_token(); - all_unreclaimable = shrink_zones(priority, zonelist, sc); + shrink_zones(priority, zonelist, sc); /* * Don't shrink slabs when reclaiming memory from * over limit cgroups @@ -1879,7 +1907,7 @@ out: return sc->nr_reclaimed; /* top priority shrink_zones still had more to do? don't OOM, then */ - if (scanning_global_lru(sc) && !all_unreclaimable) + if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc)) return 1; return 0; @@ -1979,7 +2007,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining) if (zone->all_unreclaimable) continue; - if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 0, 0)) return 1; } @@ -2076,7 +2104,7 @@ loop_again: shrink_active_list(SWAP_CLUSTER_MAX, zone, &sc, priority, 0); - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), 0, 0)) { end_zone = i; break; @@ -2127,7 +2155,7 @@ loop_again: * We put equal pressure on every zone, unless one * zone has way too many pages free already. */ - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, 8*high_wmark_pages(zone), end_zone, 0)) shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; @@ -2137,8 +2165,7 @@ loop_again: total_scanned += sc.nr_scanned; if (zone->all_unreclaimable) continue; - if (nr_slab == 0 && - zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6)) + if (nr_slab == 0 && !zone_reclaimable(zone)) zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and @@ -2149,7 +2176,7 @@ loop_again: total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2) sc.may_writepage = 1; - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), end_zone, 0)) { all_zones_ok = 0; /* @@ -2157,7 +2184,7 @@ loop_again: * means that we have a GFP_ATOMIC allocation * failure risk. Hurry up! */ - if (!zone_watermark_ok(zone, order, + if (!zone_watermark_ok_safe(zone, order, min_wmark_pages(zone), end_zone, 0)) has_under_min_watermark_zone = 1; } @@ -2299,9 +2326,11 @@ static int kswapd(void *p) * premature sleep. If not, then go fully * to sleep until explicitly woken up */ - if (!sleeping_prematurely(pgdat, order, remaining)) + if (!sleeping_prematurely(pgdat, order, remaining)) { + restore_pgdat_percpu_threshold(pgdat); schedule(); - else { + reduce_pgdat_percpu_threshold(pgdat); + } else { if (remaining) count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY); else @@ -2337,15 +2366,16 @@ void wakeup_kswapd(struct zone *zone, int order) if (!populated_zone(zone)) return; - pgdat = zone->zone_pgdat; - if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) + if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) return; + pgdat = zone->zone_pgdat; if (pgdat->kswapd_max_order < order) pgdat->kswapd_max_order = order; - if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) - return; if (!waitqueue_active(&pgdat->kswapd_wait)) return; + if (zone_watermark_ok_safe(zone, order, low_wmark_pages(zone), 0, 0)) + return; + wake_up_interruptible(&pgdat->kswapd_wait); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 7759941..41dc8cd 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -81,6 +81,30 @@ EXPORT_SYMBOL(vm_stat); #ifdef CONFIG_SMP +static int calculate_pressure_threshold(struct zone *zone) +{ + int threshold; + int watermark_distance; + + /* + * As vmstats are not up to date, there is drift between the estimated + * and real values. For high thresholds and a high number of CPUs, it + * is possible for the min watermark to be breached while the estimated + * value looks fine. The pressure threshold is a reduced value such + * that even the maximum amount of drift will not accidentally breach + * the min watermark + */ + watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); + threshold = max(1, (int)(watermark_distance / num_online_cpus())); + + /* + * Maximum threshold is 125 + */ + threshold = min(125, threshold); + + return threshold; +} + static int calculate_threshold(struct zone *zone) { int threshold; @@ -138,14 +162,69 @@ static void refresh_zone_stat_thresholds(void) int threshold; for_each_populated_zone(zone) { + unsigned long max_drift, tolerate_drift; + threshold = calculate_threshold(zone); for_each_online_cpu(cpu) per_cpu_ptr(zone->pageset, cpu)->stat_threshold = threshold; + + /* + * Only set percpu_drift_mark if there is a danger that + * NR_FREE_PAGES reports the low watermark is ok when in fact + * the min watermark could be breached by an allocation + */ + tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); + max_drift = num_online_cpus() * threshold; + if (max_drift > tolerate_drift) + zone->percpu_drift_mark = high_wmark_pages(zone) + + max_drift; } } +void reduce_pgdat_percpu_threshold(pg_data_t *pgdat) +{ + struct zone *zone; + int cpu; + int threshold; + int i; + + get_online_cpus(); + for (i = 0; i < pgdat->nr_zones; i++) { + zone = &pgdat->node_zones[i]; + if (!zone->percpu_drift_mark) + continue; + + threshold = calculate_pressure_threshold(zone); + for_each_online_cpu(cpu) + per_cpu_ptr(zone->pageset, cpu)->stat_threshold + = threshold; + } + put_online_cpus(); +} + +void restore_pgdat_percpu_threshold(pg_data_t *pgdat) +{ + struct zone *zone; + int cpu; + int threshold; + int i; + + get_online_cpus(); + for (i = 0; i < pgdat->nr_zones; i++) { + zone = &pgdat->node_zones[i]; + if (!zone->percpu_drift_mark) + continue; + + threshold = calculate_threshold(zone); + for_each_online_cpu(cpu) + per_cpu_ptr(zone->pageset, cpu)->stat_threshold + = threshold; + } + put_online_cpus(); +} + /* * For use when we know that interrupts are disabled. */ diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 98ce9bc..c85109d 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -948,7 +948,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) csocket = NULL; - if (strlen(addr) > UNIX_PATH_MAX) { + if (strlen(addr) >= UNIX_PATH_MAX) { P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", addr); return -ENAMETOOLONG; diff --git a/net/Kconfig b/net/Kconfig index 0d68b40..a44092e 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -206,7 +206,7 @@ source "net/dcb/Kconfig" config RPS boolean - depends on SMP && SYSFS + depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS default y menu "Network testing" diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index cfdfd7e..763fd0b 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, ax25_cb *ax25; int err = 0; + memset(fsa, 0, sizeof(*fsa)); lock_sock(sk); ax25 = ax25_sk(sk); @@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, fsa->fsa_ax25.sax25_family = AF_AX25; fsa->fsa_ax25.sax25_call = ax25->dest_addr; - fsa->fsa_ax25.sax25_ndigis = 0; if (ax25->digipeat != NULL) { ndigi = ax25->digipeat->ndigi; diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 2862f53..d935da7 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -88,6 +88,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long sockfd_put(nsock); return -EBADFD; } + ca.device[sizeof(ca.device)-1] = 0; err = bnep_add_connection(&ca, nsock); if (!err) { diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index d0927d1..f3f8d4c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -700,6 +700,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user break; } + memset(&cinfo, 0, sizeof(cinfo)); cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 753fc42..f49bcd9 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -22,7 +22,7 @@ #include #include "br_private.h" -/* net device transmit always called with no BH (preempt_disabled) */ +/* net device transmit always called with BH disabled */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); @@ -46,9 +46,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); + rcu_read_lock(); if (is_multicast_ether_addr(dest)) { - if (br_multicast_rcv(br, NULL, skb)) + if (br_multicast_rcv(br, NULL, skb)) { + kfree_skb(skb); goto out; + } mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) @@ -61,6 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) br_flood_deliver(br, skb); out: + rcu_read_unlock(); return NETDEV_TX_OK; } diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b01dde3..7204ad3 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br, spin_unlock_bh(&br->hash_lock); } -/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */ +/* No locking or refcounting, assumes caller has rcu_read_lock */ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, const unsigned char *addr) { diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index d36e700..114365c 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -37,7 +37,7 @@ static int br_pass_frame_up(struct sk_buff *skb) netif_receive_skb); } -/* note: already called with rcu_read_lock (preempt_disabled) */ +/* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; @@ -108,7 +108,7 @@ drop: goto out; } -/* note: already called with rcu_read_lock (preempt_disabled) */ +/* note: already called with rcu_read_lock */ static int br_handle_local_finish(struct sk_buff *skb) { struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); @@ -133,7 +133,7 @@ static inline int is_link_local(const unsigned char *dest) /* * Called via br_handle_frame_hook. * Return NULL if skb is handled - * note: already called with rcu_read_lock (preempt_disabled) + * note: already called with rcu_read_lock */ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) { diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 27ae946..5a9bcfe 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -437,7 +437,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, ip6h = ipv6_hdr(skb); *(__force __be32 *)ip6h = htonl(0x60000000); - ip6h->payload_len = 8 + sizeof(*mldq); + ip6h->payload_len = htons(8 + sizeof(*mldq)); ip6h->nexthdr = IPPROTO_HOPOPTS; ip6h->hop_limit = 1; ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8fb75f8..e759b89 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -749,9 +749,11 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) { if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && - !skb_is_gso(skb)) + !skb_is_gso(skb)) { + /* BUG: Should really parse the IP options here. */ + memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); return ip_fragment(skb, br_dev_queue_push_xmit); - else + } else return br_dev_queue_push_xmit(skb); } #else diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c index 217bd22..5854e82 100644 --- a/net/bridge/br_stp_bpdu.c +++ b/net/bridge/br_stp_bpdu.c @@ -131,7 +131,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) /* * Called from llc. * - * NO locks, but rcu_read_lock (preempt_disabled) + * NO locks, but rcu_read_lock */ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, struct net_device *dev) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 59ca00e..62675dc 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1098,6 +1098,8 @@ static int do_replace(struct net *net, const void __user *user, if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) return -ENOMEM; + tmp.name[sizeof(tmp.name) - 1] = 0; + countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; newinfo = vmalloc(sizeof(*newinfo) + countersize); if (!newinfo) diff --git a/net/can/bcm.c b/net/can/bcm.c index 9c65e9d..9d5e8ac 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -60,6 +60,13 @@ #include #include +/* + * To send multiple CAN frame content within TX_SETUP or to filter + * CAN messages with multiplex index within RX_SETUP, the number of + * different filters is limited to 256 due to the one byte index value. + */ +#define MAX_NFRAMES 256 + /* use of last_frames[index].can_dlc */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ @@ -89,16 +96,16 @@ struct bcm_op { struct list_head list; int ifindex; canid_t can_id; - int flags; + u32 flags; unsigned long frames_abs, frames_filtered; struct timeval ival1, ival2; struct hrtimer timer, thrtimer; struct tasklet_struct tsklet, thrtsklet; ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; int rx_ifindex; - int count; - int nframes; - int currframe; + u32 count; + u32 nframes; + u32 currframe; struct can_frame *frames; struct can_frame *last_frames; struct can_frame sframe; @@ -118,7 +125,7 @@ struct bcm_sock { struct list_head tx_ops; unsigned long dropped_usr_msgs; struct proc_dir_entry *bcm_proc_read; - char procname [9]; /* pointer printed in ASCII with \0 */ + char procname [32]; /* inode number in decimal with \0 */ }; static inline struct bcm_sock *bcm_sk(const struct sock *sk) @@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, "rx_op: %03X %-5s ", op->can_id, bcm_proc_getifname(ifname, op->ifindex)); - seq_printf(m, "[%d]%c ", op->nframes, + seq_printf(m, "[%u]%c ", op->nframes, (op->flags & RX_CHECK_DLC)?'d':' '); if (op->kt_ival1.tv64) seq_printf(m, "timeo=%lld ", @@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) list_for_each_entry(op, &bo->tx_ops, list) { - seq_printf(m, "tx_op: %03X %s [%d] ", + seq_printf(m, "tx_op: %03X %s [%u] ", op->can_id, bcm_proc_getifname(ifname, op->ifindex), op->nframes); @@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, struct can_frame *firstframe; struct sockaddr_can *addr; struct sock *sk = op->sk; - int datalen = head->nframes * CFSIZ; + unsigned int datalen = head->nframes * CFSIZ; int err; skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); @@ -468,7 +475,7 @@ rx_changed_settime: * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly * received data stored in op->last_frames[] */ -static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, +static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, const struct can_frame *rxdata) { /* @@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) /* * bcm_rx_do_flush - helper for bcm_rx_thr_flush */ -static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) +static inline int bcm_rx_do_flush(struct bcm_op *op, int update, + unsigned int index) { if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { if (update) @@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update) int updated = 0; if (op->nframes > 1) { - int i; + unsigned int i; /* for MUX filter we start at index 1 */ for (i = 1; i < op->nframes; i++) @@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) { struct bcm_op *op = (struct bcm_op *)data; const struct can_frame *rxframe = (struct can_frame *)skb->data; - int i; + unsigned int i; /* disable timeout */ hrtimer_cancel(&op->timer); @@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; - int i, err; + unsigned int i; + int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; - /* we need at least one can_frame */ - if (msg_head->nframes < 1) + /* check nframes boundaries - we need at least one can_frame */ + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; /* check the given can_id */ @@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, msg_head->nframes = 0; } + /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ + if (msg_head->nframes > MAX_NFRAMES + 1) + return -EINVAL; + if ((msg_head->flags & RX_RTR_FRAME) && ((msg_head->nframes != 1) || (!(msg_head->can_id & CAN_RTR_FLAG)))) @@ -1508,7 +1521,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, if (proc_dir) { /* unique socket address as filename */ - sprintf(bo->procname, "%p", sock); + sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_data(bo->procname, 0644, proc_dir, &bcm_proc_fops, sk); diff --git a/net/can/raw.c b/net/can/raw.c index da99cf1..1650599 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -655,6 +655,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, err = sock_tx_timestamp(msg, sk, skb_tx(skb)); if (err < 0) goto free_skb; + + /* to be able to check the received tx sock reference in raw_rcv() */ + skb_tx(skb)->prevent_sk_orphan = 1; + skb->dev = dev; skb->sk = sk; diff --git a/net/compat.c b/net/compat.c index ec24d9e..b869534 100644 --- a/net/compat.c +++ b/net/compat.c @@ -41,10 +41,12 @@ static inline int iov_from_user_compat_to_kern(struct iovec *kiov, compat_size_t len; if (get_user(len, &uiov32->iov_len) || - get_user(buf, &uiov32->iov_base)) { - tot_len = -EFAULT; - break; - } + get_user(buf, &uiov32->iov_base)) + return -EFAULT; + + if (len > INT_MAX - tot_len) + len = INT_MAX - tot_len; + tot_len += len; kiov->iov_base = compat_ptr(buf); kiov->iov_len = (__kernel_size_t) len; diff --git a/net/core/dev.c b/net/core/dev.c index 1f466e8..85e3f8d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1121,13 +1121,20 @@ EXPORT_SYMBOL(netdev_bonding_change); void dev_load(struct net *net, const char *name) { struct net_device *dev; + int no_module; rcu_read_lock(); dev = dev_get_by_name_rcu(net, name); rcu_read_unlock(); - - if (!dev && capable(CAP_NET_ADMIN)) - request_module("%s", name); + no_module = !dev; + if (no_module && capable(CAP_NET_ADMIN)) + no_module = request_module("netdev-%s", name); + if (no_module && capable(CAP_SYS_MODULE)) { + if (!request_module("%s", name)) + pr_err("Loading kernel module for a network device " +"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " +"instead\n", name); + } } EXPORT_SYMBOL(dev_load); @@ -1491,7 +1498,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) nf_reset(skb); if (!(dev->flags & IFF_UP) || - (skb->len > (dev->mtu + dev->hard_header_len))) { + (skb->len > (dev->mtu + dev->hard_header_len + VLAN_HLEN))) { kfree_skb(skb); return NET_RX_DROP; } @@ -1653,10 +1660,10 @@ EXPORT_SYMBOL(netif_device_attach); static bool can_checksum_protocol(unsigned long features, __be16 protocol) { - return ((features & NETIF_F_GEN_CSUM) || - ((features & NETIF_F_IP_CSUM) && + return ((features & NETIF_F_NO_CSUM) || + ((features & NETIF_F_V4_CSUM) && protocol == htons(ETH_P_IP)) || - ((features & NETIF_F_IPV6_CSUM) && + ((features & NETIF_F_V6_CSUM) && protocol == htons(ETH_P_IPV6)) || ((features & NETIF_F_FCOE_CRC) && protocol == htons(ETH_P_FCOE))); @@ -2110,6 +2117,9 @@ static inline int skb_needs_linearize(struct sk_buff *skb, illegal_highdma(dev, skb))); } +static DEFINE_PER_CPU(int, xmit_recursion); +#define RECURSION_LIMIT 10 + /** * dev_queue_xmit - transmit a buffer * @skb: buffer to transmit @@ -2194,10 +2204,15 @@ gso: if (txq->xmit_lock_owner != cpu) { + if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) + goto recursion_alert; + HARD_TX_LOCK(dev, txq, cpu); if (!netif_tx_queue_stopped(txq)) { + __this_cpu_inc(xmit_recursion); rc = dev_hard_start_xmit(skb, dev, txq); + __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; @@ -2209,7 +2224,9 @@ gso: "queue packet!\n", dev->name); } else { /* Recursion is detected! It is possible, - * unfortunately */ + * unfortunately + */ +recursion_alert: if (net_ratelimit()) printk(KERN_CRIT "Dead loop on virtual device " "%s, fix it urgently!\n", dev->name); @@ -2504,6 +2521,7 @@ int netif_rx(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; + preempt_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); @@ -2513,6 +2531,7 @@ int netif_rx(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); + preempt_enable(); } #else { @@ -3064,7 +3083,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) int mac_len; enum gro_result ret; - if (!(skb->dev->features & NETIF_F_GRO)) + if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) goto normal; if (skb_is_gso(skb) || skb_has_frags(skb)) @@ -3133,7 +3152,7 @@ pull: put_page(skb_shinfo(skb)->frags[0].page); memmove(skb_shinfo(skb)->frags, skb_shinfo(skb)->frags + 1, - --skb_shinfo(skb)->nr_frags); + --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); } } @@ -3151,9 +3170,6 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) { struct sk_buff *p; - if (netpoll_rx_on(skb)) - return GRO_NORMAL; - for (p = napi->gro_list; p; p = p->next) { NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev) && @@ -3215,6 +3231,8 @@ void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { __skb_pull(skb, skb_headlen(skb)); skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); + skb->dev = napi->dev; + skb->skb_iif = 0; napi->skb = skb; } diff --git a/net/core/dst.c b/net/core/dst.c index 9920722..1ebf5e0 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -343,6 +343,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, static struct notifier_block dst_dev_notifier = { .notifier_call = dst_dev_event, + .priority = -10, /* must be called after other network notifiers */ }; void __init dst_init(void) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 75e4ffe..f5e3856 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -482,7 +482,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) gstrings.len = ret; - data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); if (!data) return -ENOMEM; @@ -719,7 +719,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) if (regs.len > reglen) regs.len = reglen; - regbuf = kmalloc(reglen, GFP_USER); + regbuf = kzalloc(reglen, GFP_USER); if (!regbuf) return -ENOMEM; diff --git a/net/core/filter.c b/net/core/filter.c index da69fb7..71a433c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -112,104 +112,106 @@ EXPORT_SYMBOL(sk_filter); */ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { - struct sock_filter *fentry; /* We walk down these */ void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ + unsigned long memvalid = 0; u32 tmp; int k; int pc; + BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { - fentry = &filter[pc]; + const struct sock_filter *fentry = &filter[pc]; + u32 f_k = fentry->k; switch (fentry->code) { - case BPF_ALU|BPF_ADD|BPF_X: + case BPF_S_ALU_ADD_X: A += X; continue; - case BPF_ALU|BPF_ADD|BPF_K: - A += fentry->k; + case BPF_S_ALU_ADD_K: + A += f_k; continue; - case BPF_ALU|BPF_SUB|BPF_X: + case BPF_S_ALU_SUB_X: A -= X; continue; - case BPF_ALU|BPF_SUB|BPF_K: - A -= fentry->k; + case BPF_S_ALU_SUB_K: + A -= f_k; continue; - case BPF_ALU|BPF_MUL|BPF_X: + case BPF_S_ALU_MUL_X: A *= X; continue; - case BPF_ALU|BPF_MUL|BPF_K: - A *= fentry->k; + case BPF_S_ALU_MUL_K: + A *= f_k; continue; - case BPF_ALU|BPF_DIV|BPF_X: + case BPF_S_ALU_DIV_X: if (X == 0) return 0; A /= X; continue; - case BPF_ALU|BPF_DIV|BPF_K: - A /= fentry->k; + case BPF_S_ALU_DIV_K: + A /= f_k; continue; - case BPF_ALU|BPF_AND|BPF_X: + case BPF_S_ALU_AND_X: A &= X; continue; - case BPF_ALU|BPF_AND|BPF_K: - A &= fentry->k; + case BPF_S_ALU_AND_K: + A &= f_k; continue; - case BPF_ALU|BPF_OR|BPF_X: + case BPF_S_ALU_OR_X: A |= X; continue; - case BPF_ALU|BPF_OR|BPF_K: - A |= fentry->k; + case BPF_S_ALU_OR_K: + A |= f_k; continue; - case BPF_ALU|BPF_LSH|BPF_X: + case BPF_S_ALU_LSH_X: A <<= X; continue; - case BPF_ALU|BPF_LSH|BPF_K: - A <<= fentry->k; + case BPF_S_ALU_LSH_K: + A <<= f_k; continue; - case BPF_ALU|BPF_RSH|BPF_X: + case BPF_S_ALU_RSH_X: A >>= X; continue; - case BPF_ALU|BPF_RSH|BPF_K: - A >>= fentry->k; + case BPF_S_ALU_RSH_K: + A >>= f_k; continue; - case BPF_ALU|BPF_NEG: + case BPF_S_ALU_NEG: A = -A; continue; - case BPF_JMP|BPF_JA: - pc += fentry->k; + case BPF_S_JMP_JA: + pc += f_k; continue; - case BPF_JMP|BPF_JGT|BPF_K: - pc += (A > fentry->k) ? fentry->jt : fentry->jf; + case BPF_S_JMP_JGT_K: + pc += (A > f_k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JGE|BPF_K: - pc += (A >= fentry->k) ? fentry->jt : fentry->jf; + case BPF_S_JMP_JGE_K: + pc += (A >= f_k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JEQ|BPF_K: - pc += (A == fentry->k) ? fentry->jt : fentry->jf; + case BPF_S_JMP_JEQ_K: + pc += (A == f_k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JSET|BPF_K: - pc += (A & fentry->k) ? fentry->jt : fentry->jf; + case BPF_S_JMP_JSET_K: + pc += (A & f_k) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JGT|BPF_X: + case BPF_S_JMP_JGT_X: pc += (A > X) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JGE|BPF_X: + case BPF_S_JMP_JGE_X: pc += (A >= X) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JEQ|BPF_X: + case BPF_S_JMP_JEQ_X: pc += (A == X) ? fentry->jt : fentry->jf; continue; - case BPF_JMP|BPF_JSET|BPF_X: + case BPF_S_JMP_JSET_X: pc += (A & X) ? fentry->jt : fentry->jf; continue; - case BPF_LD|BPF_W|BPF_ABS: - k = fentry->k; + case BPF_S_LD_W_ABS: + k = f_k; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { @@ -217,8 +219,8 @@ load_w: continue; } break; - case BPF_LD|BPF_H|BPF_ABS: - k = fentry->k; + case BPF_S_LD_H_ABS: + k = f_k; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { @@ -226,8 +228,8 @@ load_h: continue; } break; - case BPF_LD|BPF_B|BPF_ABS: - k = fentry->k; + case BPF_S_LD_B_ABS: + k = f_k; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { @@ -235,55 +237,59 @@ load_b: continue; } break; - case BPF_LD|BPF_W|BPF_LEN: + case BPF_S_LD_W_LEN: A = skb->len; continue; - case BPF_LDX|BPF_W|BPF_LEN: + case BPF_S_LDX_W_LEN: X = skb->len; continue; - case BPF_LD|BPF_W|BPF_IND: - k = X + fentry->k; + case BPF_S_LD_W_IND: + k = X + f_k; goto load_w; - case BPF_LD|BPF_H|BPF_IND: - k = X + fentry->k; + case BPF_S_LD_H_IND: + k = X + f_k; goto load_h; - case BPF_LD|BPF_B|BPF_IND: - k = X + fentry->k; + case BPF_S_LD_B_IND: + k = X + f_k; goto load_b; - case BPF_LDX|BPF_B|BPF_MSH: - ptr = load_pointer(skb, fentry->k, 1, &tmp); + case BPF_S_LDX_B_MSH: + ptr = load_pointer(skb, f_k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; - case BPF_LD|BPF_IMM: - A = fentry->k; + case BPF_S_LD_IMM: + A = f_k; continue; - case BPF_LDX|BPF_IMM: - X = fentry->k; + case BPF_S_LDX_IMM: + X = f_k; continue; - case BPF_LD|BPF_MEM: - A = mem[fentry->k]; + case BPF_S_LD_MEM: + A = (memvalid & (1UL << f_k)) ? + mem[f_k] : 0; continue; - case BPF_LDX|BPF_MEM: - X = mem[fentry->k]; + case BPF_S_LDX_MEM: + X = (memvalid & (1UL << f_k)) ? + mem[f_k] : 0; continue; - case BPF_MISC|BPF_TAX: + case BPF_S_MISC_TAX: X = A; continue; - case BPF_MISC|BPF_TXA: + case BPF_S_MISC_TXA: A = X; continue; - case BPF_RET|BPF_K: - return fentry->k; - case BPF_RET|BPF_A: + case BPF_S_RET_K: + return f_k; + case BPF_S_RET_A: return A; - case BPF_ST: - mem[fentry->k] = A; + case BPF_S_ST: + memvalid |= 1UL << f_k; + mem[f_k] = A; continue; - case BPF_STX: - mem[fentry->k] = X; + case BPF_S_STX: + memvalid |= 1UL << f_k; + mem[f_k] = X; continue; default: WARN_ON(1); @@ -390,53 +396,128 @@ int sk_chk_filter(struct sock_filter *filter, int flen) /* Only allow valid instructions */ switch (ftest->code) { case BPF_ALU|BPF_ADD|BPF_K: + ftest->code = BPF_S_ALU_ADD_K; + break; case BPF_ALU|BPF_ADD|BPF_X: + ftest->code = BPF_S_ALU_ADD_X; + break; case BPF_ALU|BPF_SUB|BPF_K: + ftest->code = BPF_S_ALU_SUB_K; + break; case BPF_ALU|BPF_SUB|BPF_X: + ftest->code = BPF_S_ALU_SUB_X; + break; case BPF_ALU|BPF_MUL|BPF_K: + ftest->code = BPF_S_ALU_MUL_K; + break; case BPF_ALU|BPF_MUL|BPF_X: + ftest->code = BPF_S_ALU_MUL_X; + break; case BPF_ALU|BPF_DIV|BPF_X: + ftest->code = BPF_S_ALU_DIV_X; + break; case BPF_ALU|BPF_AND|BPF_K: + ftest->code = BPF_S_ALU_AND_K; + break; case BPF_ALU|BPF_AND|BPF_X: + ftest->code = BPF_S_ALU_AND_X; + break; case BPF_ALU|BPF_OR|BPF_K: + ftest->code = BPF_S_ALU_OR_K; + break; case BPF_ALU|BPF_OR|BPF_X: + ftest->code = BPF_S_ALU_OR_X; + break; case BPF_ALU|BPF_LSH|BPF_K: + ftest->code = BPF_S_ALU_LSH_K; + break; case BPF_ALU|BPF_LSH|BPF_X: + ftest->code = BPF_S_ALU_LSH_X; + break; case BPF_ALU|BPF_RSH|BPF_K: + ftest->code = BPF_S_ALU_RSH_K; + break; case BPF_ALU|BPF_RSH|BPF_X: + ftest->code = BPF_S_ALU_RSH_X; + break; case BPF_ALU|BPF_NEG: + ftest->code = BPF_S_ALU_NEG; + break; case BPF_LD|BPF_W|BPF_ABS: + ftest->code = BPF_S_LD_W_ABS; + break; case BPF_LD|BPF_H|BPF_ABS: + ftest->code = BPF_S_LD_H_ABS; + break; case BPF_LD|BPF_B|BPF_ABS: + ftest->code = BPF_S_LD_B_ABS; + break; case BPF_LD|BPF_W|BPF_LEN: + ftest->code = BPF_S_LD_W_LEN; + break; case BPF_LD|BPF_W|BPF_IND: + ftest->code = BPF_S_LD_W_IND; + break; case BPF_LD|BPF_H|BPF_IND: + ftest->code = BPF_S_LD_H_IND; + break; case BPF_LD|BPF_B|BPF_IND: + ftest->code = BPF_S_LD_B_IND; + break; case BPF_LD|BPF_IMM: + ftest->code = BPF_S_LD_IMM; + break; case BPF_LDX|BPF_W|BPF_LEN: + ftest->code = BPF_S_LDX_W_LEN; + break; case BPF_LDX|BPF_B|BPF_MSH: + ftest->code = BPF_S_LDX_B_MSH; + break; case BPF_LDX|BPF_IMM: + ftest->code = BPF_S_LDX_IMM; + break; case BPF_MISC|BPF_TAX: + ftest->code = BPF_S_MISC_TAX; + break; case BPF_MISC|BPF_TXA: + ftest->code = BPF_S_MISC_TXA; + break; case BPF_RET|BPF_K: + ftest->code = BPF_S_RET_K; + break; case BPF_RET|BPF_A: + ftest->code = BPF_S_RET_A; break; /* Some instructions need special checks */ - case BPF_ALU|BPF_DIV|BPF_K: /* check for division by zero */ + case BPF_ALU|BPF_DIV|BPF_K: if (ftest->k == 0) return -EINVAL; + ftest->code = BPF_S_ALU_DIV_K; break; + /* check for invalid memory addresses */ case BPF_LD|BPF_MEM: + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + ftest->code = BPF_S_LD_MEM; + break; case BPF_LDX|BPF_MEM: + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + ftest->code = BPF_S_LDX_MEM; + break; case BPF_ST: + if (ftest->k >= BPF_MEMWORDS) + return -EINVAL; + ftest->code = BPF_S_ST; + break; case BPF_STX: - /* check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; + ftest->code = BPF_S_STX; break; case BPF_JMP|BPF_JA: @@ -447,49 +528,77 @@ int sk_chk_filter(struct sock_filter *filter, int flen) */ if (ftest->k >= (unsigned)(flen-pc-1)) return -EINVAL; + ftest->code = BPF_S_JMP_JA; break; case BPF_JMP|BPF_JEQ|BPF_K: + ftest->code = BPF_S_JMP_JEQ_K; + break; case BPF_JMP|BPF_JEQ|BPF_X: + ftest->code = BPF_S_JMP_JEQ_X; + break; case BPF_JMP|BPF_JGE|BPF_K: + ftest->code = BPF_S_JMP_JGE_K; + break; case BPF_JMP|BPF_JGE|BPF_X: + ftest->code = BPF_S_JMP_JGE_X; + break; case BPF_JMP|BPF_JGT|BPF_K: + ftest->code = BPF_S_JMP_JGT_K; + break; case BPF_JMP|BPF_JGT|BPF_X: + ftest->code = BPF_S_JMP_JGT_X; + break; case BPF_JMP|BPF_JSET|BPF_K: + ftest->code = BPF_S_JMP_JSET_K; + break; case BPF_JMP|BPF_JSET|BPF_X: + ftest->code = BPF_S_JMP_JSET_X; + break; + + default: + return -EINVAL; + } + /* for conditionals both must be safe */ + switch (ftest->code) { + case BPF_S_JMP_JEQ_K: + case BPF_S_JMP_JEQ_X: + case BPF_S_JMP_JGE_K: + case BPF_S_JMP_JGE_X: + case BPF_S_JMP_JGT_K: + case BPF_S_JMP_JGT_X: + case BPF_S_JMP_JSET_X: + case BPF_S_JMP_JSET_K: if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; - break; + } + } + /* last instruction must be a RET code */ + switch (filter[flen - 1].code) { + case BPF_S_RET_K: + case BPF_S_RET_A: + return 0; + break; default: return -EINVAL; } - } - - return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL; } EXPORT_SYMBOL(sk_chk_filter); /** - * sk_filter_rcu_release: Release a socket filter by rcu_head + * sk_filter_release_rcu - Release a socket filter by rcu_head * @rcu: rcu_head that contains the sk_filter to free */ -static void sk_filter_rcu_release(struct rcu_head *rcu) +void sk_filter_release_rcu(struct rcu_head *rcu) { struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); - sk_filter_release(fp); -} - -static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp) -{ - unsigned int size = sk_filter_len(fp); - - atomic_sub(size, &sk->sk_omem_alloc); - call_rcu_bh(&fp->rcu, sk_filter_rcu_release); + kfree(fp); } +EXPORT_SYMBOL(sk_filter_release_rcu); /** * sk_attach_filter - attach a socket filter @@ -534,7 +643,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) rcu_read_unlock_bh(); if (old_fp) - sk_filter_delayed_uncharge(sk, old_fp); + sk_filter_uncharge(sk, old_fp); return 0; } EXPORT_SYMBOL_GPL(sk_attach_filter); @@ -548,7 +657,7 @@ int sk_detach_filter(struct sock *sk) filter = rcu_dereference_bh(sk->sk_filter); if (filter) { rcu_assign_pointer(sk->sk_filter, NULL); - sk_filter_delayed_uncharge(sk, filter); + sk_filter_uncharge(sk, filter); ret = 0; } rcu_read_unlock_bh(); diff --git a/net/core/iovec.c b/net/core/iovec.c index 1e7f4e9..4198d73 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -59,14 +59,13 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, err = 0; for (ct = 0; ct < m->msg_iovlen; ct++) { - err += iov[ct].iov_len; - /* - * Goal is not to verify user data, but to prevent returning - * negative value, which is interpreted as errno. - * Overflow is still possible, but it is harmless. - */ - if (err < 0) - return -EMSGSIZE; + size_t len = iov[ct].iov_len; + + if (len > INT_MAX - err) { + len = INT_MAX - err; + iov[ct].iov_len = len; + } + err += len; } return err; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a2af24..04b3dcf 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1545,6 +1545,9 @@ replay: snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind); dest_net = rtnl_link_get_net(net, tb); + if (IS_ERR(dest_net)) + return PTR_ERR(dest_net); + dev = rtnl_create_link(net, dest_net, ifname, ops, tb); if (IS_ERR(dev)) diff --git a/net/core/scm.c b/net/core/scm.c index b88f6f9..681c976 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -170,6 +170,30 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) err = scm_check_creds(&p->creds); if (err) goto error; + + if (pid_vnr(p->pid) != p->creds.pid) { + struct pid *pid; + err = -ESRCH; + pid = find_get_pid(p->creds.pid); + if (!pid) + goto error; + put_pid(p->pid); + p->pid = pid; + } + + if ((p->cred->euid != p->creds.uid) || + (p->cred->egid != p->creds.gid)) { + struct cred *cred; + err = -ENOMEM; + cred = prepare_creds(); + if (!cred) + goto error; + + cred->uid = cred->euid = p->creds.uid; + cred->gid = cred->egid = p->creds.uid; + put_cred(p->cred); + p->cred = cred; + } break; default: goto error; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ce88293..a586e89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2574,6 +2574,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features) __copy_skb_header(nskb, skb); nskb->mac_len = skb->mac_len; + /* nskb and skb might have different headroom */ + if (nskb->ip_summed == CHECKSUM_PARTIAL) + nskb->csum_start += skb_headroom(nskb) - headroom; + skb_reset_mac_header(nskb); skb_set_network_header(nskb, skb->mac_len); nskb->transport_header = (nskb->network_header + @@ -2704,7 +2708,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) return -E2BIG; headroom = skb_headroom(p); - nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p)); + nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC); if (unlikely(!nskb)) return -ENOMEM; diff --git a/net/core/sock.c b/net/core/sock.c index 2cf7f9f..e734700 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -110,6 +110,7 @@ #include #include #include +#include #include #include @@ -749,6 +750,20 @@ set_rcvbuf: EXPORT_SYMBOL(sock_setsockopt); +void cred_to_ucred(struct pid *pid, const struct cred *cred, + struct ucred *ucred) +{ + ucred->pid = pid_vnr(pid); + ucred->uid = ucred->gid = -1; + if (cred) { + struct user_namespace *current_ns = current_user_ns(); + + ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); + ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); + } +} +EXPORT_SYMBOL_GPL(cred_to_ucred); + int sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { diff --git a/net/core/stream.c b/net/core/stream.c index cc196f4..9ece62b 100644 --- a/net/core/stream.c +++ b/net/core/stream.c @@ -144,10 +144,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; - sk_wait_event(sk, ¤t_timeo, !sk->sk_err && - !(sk->sk_shutdown & SEND_SHUTDOWN) && - sk_stream_memory_free(sk) && - vm_wait); + sk_wait_event(sk, ¤t_timeo, sk->sk_err || + (sk->sk_shutdown & SEND_SHUTDOWN) || + (sk_stream_memory_free(sk) && + !vm_wait)); sk->sk_write_pending--; if (vm_wait) { diff --git a/net/dccp/input.c b/net/dccp/input.c index 6beb6a7..ca16869 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -617,6 +617,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* Caller (dccp_v4_do_rcv) will send Reset */ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; return 1; + } else if (sk->sk_state == DCCP_CLOSED) { + dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; + return 1; } if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { @@ -679,10 +682,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } switch (sk->sk_state) { - case DCCP_CLOSED: - dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; - return 1; - case DCCP_REQUESTING: queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); if (queued >= 0) diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index d6b93d1..cf38f52 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us if (r_len > sizeof(struct linkinfo_dn)) r_len = sizeof(struct linkinfo_dn); + memset(&link, 0, sizeof(link)); + switch(sock->state) { case SS_CONNECTING: link.idn_linkstate = LL_CONNECTING; diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index 2a5a805..ef3593f 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, #endif #ifdef CONFIG_ECONET_AUNUDP struct msghdr udpmsg; - struct iovec iov[msg->msg_iovlen+1]; + struct iovec iov[2]; struct aunhdr ah; struct sockaddr_in udpdest; __kernel_size_t size; - int i; mm_segment_t oldfs; + char *userbuf; #endif /* @@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, mutex_lock(&econet_mutex); - if (saddr == NULL) { - struct econet_sock *eo = ec_sk(sk); - - addr.station = eo->station; - addr.net = eo->net; - port = eo->port; - cb = eo->cb; - } else { - if (msg->msg_namelen < sizeof(struct sockaddr_ec)) { - mutex_unlock(&econet_mutex); - return -EINVAL; - } - addr.station = saddr->addr.station; - addr.net = saddr->addr.net; - port = saddr->port; - cb = saddr->cb; - } + if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) { + mutex_unlock(&econet_mutex); + return -EINVAL; + } + addr.station = saddr->addr.station; + addr.net = saddr->addr.net; + port = saddr->port; + cb = saddr->cb; /* Look for a device with the right network number. */ dev = net2dev_map[addr.net]; @@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, } } - if (len + 15 > dev->mtu) { - mutex_unlock(&econet_mutex); - return -EMSGSIZE; - } - if (dev->type == ARPHRD_ECONET) { /* Real hardware Econet. We're not worthy etc. */ #ifdef CONFIG_ECONET_NATIVE unsigned short proto = 0; int res; + if (len + 15 > dev->mtu) { + mutex_unlock(&econet_mutex); + return -EMSGSIZE; + } + dev_hold(dev); skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), @@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, eb = (struct ec_cb *)&skb->cb; - /* BUG: saddr may be NULL */ eb->cookie = saddr->cookie; eb->sec = *saddr; eb->sent = ec_tx_done; @@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, return -ENETDOWN; /* No socket - can't send */ } + if (len > 32768) { + err = -E2BIG; + goto error; + } + /* Make up a UDP datagram and hand it off to some higher intellect. */ memset(&udpdest, 0, sizeof(udpdest)); @@ -439,43 +435,33 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, udpdest.sin_addr.s_addr = htonl(network | addr.station); } + memset(&ah, 0, sizeof(ah)); ah.port = port; ah.cb = cb & 0x7f; ah.code = 2; /* magic */ - ah.pad = 0; /* tack our header on the front of the iovec */ size = sizeof(struct aunhdr); - /* - * XXX: that is b0rken. We can't mix userland and kernel pointers - * in iovec, since on a lot of platforms copy_from_user() will - * *not* work with the kernel and userland ones at the same time, - * regardless of what we do with set_fs(). And we are talking about - * econet-over-ethernet here, so "it's only ARM anyway" doesn't - * apply. Any suggestions on fixing that code? -- AV - */ iov[0].iov_base = (void *)&ah; iov[0].iov_len = size; - for (i = 0; i < msg->msg_iovlen; i++) { - void __user *base = msg->msg_iov[i].iov_base; - size_t iov_len = msg->msg_iov[i].iov_len; - /* Check it now since we switch to KERNEL_DS later. */ - if (!access_ok(VERIFY_READ, base, iov_len)) { - mutex_unlock(&econet_mutex); - return -EFAULT; - } - iov[i+1].iov_base = base; - iov[i+1].iov_len = iov_len; - size += iov_len; + + userbuf = vmalloc(len); + if (userbuf == NULL) { + err = -ENOMEM; + goto error; } + iov[1].iov_base = userbuf; + iov[1].iov_len = len; + err = memcpy_fromiovec(userbuf, msg->msg_iov, len); + if (err) + goto error_free_buf; + /* Get a skbuff (no data, just holds our cb information) */ if ((skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, - &err)) == NULL) { - mutex_unlock(&econet_mutex); - return err; - } + &err)) == NULL) + goto error_free_buf; eb = (struct ec_cb *)&skb->cb; @@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, udpmsg.msg_name = (void *)&udpdest; udpmsg.msg_namelen = sizeof(udpdest); udpmsg.msg_iov = &iov[0]; - udpmsg.msg_iovlen = msg->msg_iovlen + 1; + udpmsg.msg_iovlen = 2; udpmsg.msg_control = NULL; udpmsg.msg_controllen = 0; udpmsg.msg_flags=0; @@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ err = sock_sendmsg(udpsock, &udpmsg, size); set_fs(oldfs); + +error_free_buf: + vfree(userbuf); #else err = -EPROTOTYPE; #endif + error: mutex_unlock(&econet_mutex); return err; @@ -671,6 +661,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) err = 0; switch (cmd) { case SIOCSIFADDR: + if (!capable(CAP_NET_ADMIN)) { + err = -EPERM; + break; + } + edev = dev->ec_ptr; if (edev == NULL) { /* Magic up a new one. */ @@ -854,7 +849,11 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) unsigned char stn = ntohl(ip->saddr) & 0xff; struct sock *sk; struct sk_buff *newskb; - struct ec_device *edev = skb->dev->ec_ptr; + struct dst_entry *dst = skb_dst(skb); + struct ec_device *edev = NULL; + + if (dst) + edev = dst->dev->ec_ptr; if (! edev) goto bad; diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index fb24658..31cafd4 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -62,8 +62,11 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) } if (!inet->inet_saddr) inet->inet_saddr = rt->rt_src; /* Update source address */ - if (!inet->inet_rcv_saddr) + if (!inet->inet_rcv_saddr) { inet->inet_rcv_saddr = rt->rt_src; + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); + } inet->inet_daddr = rt->rt_dst; inet->inet_dport = usin->sin_port; sk->sk_state = TCP_ESTABLISHED; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 382bc76..ff6c372 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1029,6 +1029,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) return mtu >= 68; } +static void inetdev_send_gratuitous_arp(struct net_device *dev, + struct in_device *in_dev) + +{ + struct in_ifaddr *ifa = in_dev->ifa_list; + + if (!ifa) + return; + + arp_send(ARPOP_REQUEST, ETH_P_ARP, + ifa->ifa_address, dev, + ifa->ifa_address, NULL, + dev->dev_addr, NULL); +} + /* Called only under RTNL semaphore */ static int inetdev_event(struct notifier_block *this, unsigned long event, @@ -1082,16 +1097,12 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, ip_mc_up(in_dev); /* fall through */ case NETDEV_CHANGEADDR: + if (!IN_DEV_ARP_NOTIFY(in_dev)) + break; + /* fall through */ + case NETDEV_NOTIFY_PEERS: /* Send gratuitous ARP to notify of link change */ - if (IN_DEV_ARP_NOTIFY(in_dev)) { - struct in_ifaddr *ifa = in_dev->ifa_list; - - if (ifa) - arp_send(ARPOP_REQUEST, ETH_P_ARP, - ifa->ifa_address, dev, - ifa->ifa_address, NULL, - dev->dev_addr, NULL); - } + inetdev_send_gratuitous_arp(dev, in_dev); break; case NETDEV_DOWN: ip_mc_down(in_dev); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index e5fa2dd..7403b9b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk, { struct inet_diag_req *r = NLMSG_DATA(cb->nlh); - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { struct inet_diag_entry entry; - struct rtattr *bc = (struct rtattr *)(r + 1); + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, + sizeof(*r), + INET_DIAG_REQ_BYTECODE); struct inet_sock *inet = inet_sk(sk); entry.family = sk->sk_family; @@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk, entry.dport = ntohs(inet->inet_dport); entry.userlocks = sk->sk_userlocks; - if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) return 0; } @@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, { struct inet_diag_req *r = NLMSG_DATA(cb->nlh); - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { struct inet_diag_entry entry; - struct rtattr *bc = (struct rtattr *)(r + 1); + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, + sizeof(*r), + INET_DIAG_REQ_BYTECODE); entry.family = tw->tw_family; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) @@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, entry.dport = ntohs(tw->tw_dport); entry.userlocks = 0; - if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) return 0; } @@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, struct inet_diag_req *r = NLMSG_DATA(cb->nlh); struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt; - struct rtattr *bc = NULL; + const struct nlattr *bc = NULL; struct inet_sock *inet = inet_sk(sk); int j, s_j; int reqnum, s_reqnum; @@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, if (!lopt || !lopt->qlen) goto out; - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { - bc = (struct rtattr *)(r + 1); + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { + bc = nlmsg_find_attr(cb->nlh, sizeof(*r), + INET_DIAG_REQ_BYTECODE); entry.sport = inet->inet_num; entry.userlocks = sk->sk_userlocks; } @@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, &ireq->rmt_addr; entry.dport = ntohs(ireq->rmt_port); - if (!inet_diag_bc_run(RTA_DATA(bc), - RTA_PAYLOAD(bc), &entry)) + if (!inet_diag_bc_run(nla_data(bc), + nla_len(bc), &entry)) continue; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 32618e1..cd527c4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1701,3 +1701,4 @@ module_exit(ipgre_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("gre"); MODULE_ALIAS_RTNL_LINK("gretap"); +MODULE_ALIAS_NETDEV("gre0"); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 041d41d..f300f43 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -488,9 +488,8 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) * we can switch to copy when see the first bad fragment. */ if (skb_has_frags(skb)) { - struct sk_buff *frag; + struct sk_buff *frag, *frag2; int first_len = skb_pagelen(skb); - int truesizes = 0; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || @@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) - goto slow_path; + goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) - goto slow_path; + goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } - truesizes += frag->truesize; + skb->truesize -= frag->truesize; } /* Everything is OK. Generate! */ @@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); skb->data_len = first_len - skb_headlen(skb); - skb->truesize -= truesizes; skb->len = first_len; iph->tot_len = htons(first_len); iph->frag_off = htons(IP_MF); @@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) } IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); return err; + +slow_path_clean: + skb_walk_frags(skb, frag2) { + if (frag2 == frag) + break; + frag2->sk = NULL; + frag2->destructor = NULL; + skb->truesize += frag2->truesize; + } } slow_path: diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 7fd6367..5d8e362 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -850,3 +850,4 @@ static void __exit ipip_fini(void) module_init(ipip_init); module_exit(ipip_fini); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETDEV("tunl0"); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 1ac01b1..956584d 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1081,6 +1081,7 @@ static int do_replace(struct net *net, const void __user *user, /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1420,6 +1421,9 @@ static int translate_compat_table(const char *name, if (ret != 0) break; ++i; + if (strcmp(arpt_get_target(iter1)->u.user.name, + XT_ERROR_TARGET) == 0) + ++newinfo->stacksize; } if (ret) { /* @@ -1499,6 +1503,7 @@ static int compat_do_replace(struct net *net, void __user *user, return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1751,6 +1756,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len ret = -EFAULT; break; } + rev.name[sizeof(rev.name)-1] = 0; try_then_request_module(xt_find_revision(NFPROTO_ARP, rev.name, rev.revision, 1, &ret), diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index e1be7dd..1dcf5d2 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -60,12 +60,12 @@ static int checkentry(const struct xt_tgchk_param *par) if (mangle->flags & ~ARPT_MANGLE_MASK || !(mangle->flags & ARPT_MANGLE_MASK)) - return false; + return -EINVAL; if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT && mangle->target != ARPT_CONTINUE) - return false; - return true; + return -EINVAL; + return 0; } static struct xt_target arpt_mangle_reg __read_mostly = { diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4b6c5ca..e8c4ddf 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1273,6 +1273,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1747,6 +1748,9 @@ translate_compat_table(struct net *net, if (ret != 0) break; ++i; + if (strcmp(ipt_get_target(iter1)->u.user.name, + XT_ERROR_TARGET) == 0) + ++newinfo->stacksize; } if (ret) { /* @@ -1814,6 +1818,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -2043,6 +2048,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) ret = -EFAULT; break; } + rev.name[sizeof(rev.name)-1] = 0; if (cmd == IPT_SO_GET_REVISION_TARGET) target = 1; diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index f91c94b..de77301 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -663,8 +663,11 @@ static ssize_t clusterip_proc_write(struct file *file, const char __user *input, char buffer[PROC_WRITELEN+1]; unsigned long nodenum; - if (copy_from_user(buffer, input, PROC_WRITELEN)) + if (size > PROC_WRITELEN) + return -EIO; + if (copy_from_user(buffer, input, size)) return -EFAULT; + buffer[size] = 0; if (*buffer == '+') { nodenum = simple_strtoul(buffer+1, NULL, 10); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 560acc6..0189deb 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2741,6 +2741,11 @@ slow_output: EXPORT_SYMBOL_GPL(__ip_route_output_key); +static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie) +{ + return NULL; +} + static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) { } @@ -2749,7 +2754,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .family = AF_INET, .protocol = cpu_to_be16(ETH_P_IP), .destroy = ipv4_dst_destroy, - .check = ipv4_dst_check, + .check = ipv4_blackhole_dst_check, .update_pmtu = ipv4_rt_blackhole_update_pmtu, .entries = ATOMIC_INIT(0), }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 65afeae..673fbe7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -388,8 +388,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) */ mask = 0; - if (sk->sk_err) - mask = POLLERR; /* * POLLHUP is certainly not done right. But poll() doesn't @@ -453,11 +451,17 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) mask |= POLLOUT | POLLWRNORM; } - } + } else + mask |= POLLOUT | POLLWRNORM; if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; } + /* This barrier is coupled with smp_wmb() in tcp_reset() */ + smp_rmb(); + if (sk->sk_err) + mask |= POLLERR; + return mask; } @@ -2002,11 +2006,8 @@ adjudge_to_death: } } if (sk->sk_state != TCP_CLOSE) { - int orphan_count = percpu_counter_read_positive( - sk->sk_prot->orphan_count); - sk_mem_reclaim(sk); - if (tcp_too_many_orphans(sk, orphan_count)) { + if (tcp_too_many_orphans(sk, 0)) { if (net_ratelimit()) printk(KERN_INFO "TCP: too many of orphaned " "sockets\n"); @@ -2176,6 +2177,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, GFP_KERNEL); if (cvp == NULL) return -ENOMEM; + + kref_init(&cvp->kref); } lock_sock(sk); tp->rx_opt.cookie_in_always = @@ -2190,12 +2193,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, */ kref_put(&tp->cookie_values->kref, tcp_cookie_values_release); - kref_init(&cvp->kref); - tp->cookie_values = cvp; } else { cvp = tp->cookie_values; } } + if (cvp != NULL) { cvp->cookie_desired = ctd.tcpct_cookie_desired; @@ -2209,6 +2211,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, cvp->s_data_desired = ctd.tcpct_s_data_desired; cvp->s_data_constant = 0; /* false */ } + + tp->cookie_values = cvp; } release_sock(sk); return err; @@ -2231,7 +2235,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, /* Values greater than interface MTU won't take effect. However * at the point when this call is done we typically don't yet * know which interface is going to be used */ - if (val < 8 || val > MAX_TCP_WINDOW) { + if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { err = -EINVAL; break; } @@ -3193,7 +3197,7 @@ void __init tcp_init(void) { struct sk_buff *skb = NULL; unsigned long nr_pages, limit; - int order, i, max_share; + int i, max_share, cnt; unsigned long jiffy = jiffies; BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); @@ -3242,31 +3246,23 @@ void __init tcp_init(void) INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); } - /* Try to be a bit smarter and adjust defaults depending - * on available memory. - */ - for (order = 0; ((1 << order) << PAGE_SHIFT) < - (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); - order++) - ; - if (order >= 4) { - tcp_death_row.sysctl_max_tw_buckets = 180000; - sysctl_tcp_max_orphans = 4096 << (order - 4); - sysctl_max_syn_backlog = 1024; - } else if (order < 3) { - tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); - sysctl_tcp_max_orphans >>= (3 - order); - sysctl_max_syn_backlog = 128; - } + + cnt = tcp_hashinfo.ehash_mask + 1; + + tcp_death_row.sysctl_max_tw_buckets = cnt / 2; + sysctl_tcp_max_orphans = cnt / 2; + sysctl_max_syn_backlog = max(128, cnt / 256); /* Set the pressure threshold to be a fraction of global memory that * is up to 1/2 at 256 MB, decreasing toward zero with the amount of - * memory, with a floor of 128 pages. + * memory, with a floor of 128 pages, and a ceiling that prevents an + * integer overflow. */ nr_pages = totalram_pages - totalhigh_pages; limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); limit = max(limit, 128UL); + limit = min(limit, INT_MAX * 4UL / 3 / 2); sysctl_tcp_mem[0] = limit / 4 * 3; sysctl_tcp_mem[1] = limit; sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 548d575..d83b496 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4041,6 +4041,8 @@ static void tcp_reset(struct sock *sk) default: sk->sk_err = ECONNRESET; } + /* This barrier is coupled with smp_rmb() in tcp_poll() */ + smp_wmb(); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fe193e5..163a5c1 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -411,6 +411,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) !icsk->icsk_backoff) break; + if (sock_owned_by_user(sk)) + break; + icsk->icsk_backoff--; inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << icsk->icsk_backoff; @@ -425,11 +428,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); - } else if (sock_owned_by_user(sk)) { - /* RTO revert clocked out retransmission, - * but socket is locked. Will defer. */ - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - HZ/20, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7ed9dc1..1df0d17 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -237,11 +237,10 @@ void tcp_select_initial_window(int __space, __u32 mss, /* when initializing use the value from init_rcv_wnd * rather than the default from above */ - if (init_rcv_wnd && - (*rcv_wnd > init_rcv_wnd * mss)) - *rcv_wnd = init_rcv_wnd * mss; - else if (*rcv_wnd > init_cwnd * mss) - *rcv_wnd = init_cwnd * mss; + if (init_rcv_wnd) + *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); + else + *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); } /* Set the clamp no higher than max representable value */ @@ -391,27 +390,30 @@ struct tcp_out_options { */ static u8 tcp_cookie_size_check(u8 desired) { - if (desired > 0) { + int cookie_size; + + if (desired > 0) /* previously specified */ return desired; - } - if (sysctl_tcp_cookie_size <= 0) { + + cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); + if (cookie_size <= 0) /* no default specified */ return 0; - } - if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) { + + if (cookie_size <= TCP_COOKIE_MIN) /* value too small, specify minimum */ return TCP_COOKIE_MIN; - } - if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) { + + if (cookie_size >= TCP_COOKIE_MAX) /* value too large, specify maximum */ return TCP_COOKIE_MAX; - } - if (0x1 & sysctl_tcp_cookie_size) { + + if (cookie_size & 1) /* 8-bit multiple, illegal, fix it */ - return (u8)(sysctl_tcp_cookie_size + 0x1); - } - return (u8)sysctl_tcp_cookie_size; + cookie_size++; + + return (u8)cookie_size; } /* Write previously computed TCP options to the packet. @@ -1517,6 +1519,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; + int win_divisor; if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) goto send_now; @@ -1548,13 +1551,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) goto send_now; - if (sysctl_tcp_tso_win_divisor) { + win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); + if (win_divisor) { u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); /* If at least some fraction of a window is available, * just use it. */ - chunk /= sysctl_tcp_tso_win_divisor; + chunk /= win_divisor; if (limit >= chunk) goto send_now; } else { diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 440a5c6..ebb3dbb 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -67,18 +67,18 @@ static void tcp_write_err(struct sock *sk) static int tcp_out_of_resources(struct sock *sk, int do_reset) { struct tcp_sock *tp = tcp_sk(sk); - int orphans = percpu_counter_read_positive(&tcp_orphan_count); + int shift = 0; /* If peer does not open window for long time, or did not transmit * anything for long time, penalize it. */ if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) - orphans <<= 1; + shift++; /* If some dubious ICMP arrived, penalize even more. */ if (sk->sk_err_soft) - orphans <<= 1; + shift++; - if (tcp_too_many_orphans(sk, orphans)) { + if (tcp_too_many_orphans(sk, shift)) { if (net_ratelimit()) printk(KERN_INFO "Out of socket memory\n"); @@ -136,13 +136,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) /* This function calculates a "timeout" which is equivalent to the timeout of a * TCP connection after "boundary" unsuccessful, exponentially backed-off - * retransmissions with an initial RTO of TCP_RTO_MIN. + * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if + * syn_set flag is set. */ static bool retransmits_timed_out(struct sock *sk, - unsigned int boundary) + unsigned int boundary, + bool syn_set) { unsigned int timeout, linear_backoff_thresh; unsigned int start_ts; + unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; if (!inet_csk(sk)->icsk_retransmits) return false; @@ -152,12 +155,12 @@ static bool retransmits_timed_out(struct sock *sk, else start_ts = tcp_sk(sk)->retrans_stamp; - linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN); + linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base); if (boundary <= linear_backoff_thresh) - timeout = ((2 << boundary) - 1) * TCP_RTO_MIN; + timeout = ((2 << boundary) - 1) * rto_base; else - timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN + + timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + (boundary - linear_backoff_thresh) * TCP_RTO_MAX; return (tcp_time_stamp - start_ts) >= timeout; @@ -168,14 +171,15 @@ static int tcp_write_timeout(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; - bool do_reset; + bool do_reset, syn_set = 0; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if (icsk->icsk_retransmits) dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; + syn_set = 1; } else { - if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { + if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0)) { /* Black hole detection */ tcp_mtu_probing(icsk, sk); @@ -188,14 +192,14 @@ static int tcp_write_timeout(struct sock *sk) retry_until = tcp_orphan_retries(sk, alive); do_reset = alive || - !retransmits_timed_out(sk, retry_until); + !retransmits_timed_out(sk, retry_until, 0); if (tcp_out_of_resources(sk, do_reset)) return 1; } } - if (retransmits_timed_out(sk, retry_until)) { + if (retransmits_timed_out(sk, retry_until, syn_set)) { /* Has it gone just too far? */ tcp_write_err(sk); return 1; @@ -437,7 +441,7 @@ out_reset_timer: icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); - if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1)) + if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0)) __sk_dst_reset(sk); out:; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index eec4ff4..f0fbbba 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk) } EXPORT_SYMBOL(udp_lib_unhash); +/* + * inet_rcv_saddr was changed, we must rehash secondary hash + */ +void udp_lib_rehash(struct sock *sk, u16 newhash) +{ + if (sk_hashed(sk)) { + struct udp_table *udptable = sk->sk_prot->h.udp_table; + struct udp_hslot *hslot, *hslot2, *nhslot2; + + hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); + nhslot2 = udp_hashslot2(udptable, newhash); + udp_sk(sk)->udp_portaddr_hash = newhash; + if (hslot2 != nhslot2) { + hslot = udp_hashslot(udptable, sock_net(sk), + udp_sk(sk)->udp_port_hash); + /* we must lock primary chain too */ + spin_lock_bh(&hslot->lock); + + spin_lock(&hslot2->lock); + hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); + hslot2->count--; + spin_unlock(&hslot2->lock); + + spin_lock(&nhslot2->lock); + hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, + &nhslot2->head); + nhslot2->count++; + spin_unlock(&nhslot2->lock); + + spin_unlock_bh(&hslot->lock); + } + } +} +EXPORT_SYMBOL(udp_lib_rehash); + +static void udp_v4_rehash(struct sock *sk) +{ + u16 new_hash = udp4_portaddr_hash(sock_net(sk), + inet_sk(sk)->inet_rcv_saddr, + inet_sk(sk)->inet_num); + udp_lib_rehash(sk, new_hash); +} + static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; @@ -1843,6 +1886,7 @@ struct proto udp_prot = { .backlog_rcv = __udp_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, @@ -2123,12 +2167,14 @@ void __init udp_init(void) udp_table_init(&udp_table, "UDP"); /* Set the pressure threshold up by the same strategy of TCP. It is a * fraction of global memory that is up to 1/2 at 256 MB, decreasing - * toward zero with the amount of memory, with a floor of 128 pages. + * toward zero with the amount of memory, with a floor of 128 pages, + * and a ceiling that prevents an integer overflow. */ nr_pages = totalram_pages - totalhigh_pages; limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); limit = max(limit, 128UL); + limit = min(limit, INT_MAX * 4UL / 3 / 2); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 23883a4..ce1e1b5 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net, static int xfrm4_get_tos(struct flowi *fl) { - return fl->fl4_tos; + return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */ } static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 784f34d..0bbaddc 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1424,8 +1424,10 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; - if (addrconf_dad_end(ifp)) + if (addrconf_dad_end(ifp)) { + in6_ifa_put(ifp); return; + } if (net_ratelimit()) printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n", diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 7126846..0e3754a1 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -104,9 +104,12 @@ ipv4_connected: if (ipv6_addr_any(&np->saddr)) ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); - if (ipv6_addr_any(&np->rcv_saddr)) + if (ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); + } goto out; } @@ -191,6 +194,8 @@ ipv4_connected: if (ipv6_addr_any(&np->rcv_saddr)) { ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src); inet->inet_rcv_saddr = LOOPBACK4_IPV6; + if (sk->sk_prot->rehash) + sk->sk_prot->rehash(sk); } ip6_dst_store(sk, dst, diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 89425af..00a0bbe 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -639,7 +639,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) if (skb_has_frags(skb)) { int first_len = skb_pagelen(skb); - int truesizes = 0; + struct sk_buff *frag2; if (first_len - hlen > mtu || ((first_len - hlen) & 7) || @@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen) - goto slow_path; + goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) - goto slow_path; + goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; - truesizes += frag->truesize; } + skb->truesize -= frag->truesize; } err = 0; @@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) first_len = skb_pagelen(skb); skb->data_len = first_len - skb_headlen(skb); - skb->truesize -= truesizes; skb->len = first_len; ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr)); @@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) IPSTATS_MIB_FRAGFAILS); dst_release(&rt->u.dst); return err; + +slow_path_clean: + skb_walk_frags(skb, frag2) { + if (frag2 == frag) + break; + frag2->sk = NULL; + frag2->destructor = NULL; + skb->truesize += frag2->truesize; + } } slow_path: diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 8f39893..81b7887 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -57,6 +57,7 @@ MODULE_AUTHOR("Ville Nuorvala"); MODULE_DESCRIPTION("IPv6 tunneling device"); MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETDEV("ip6tnl0"); #define IPV6_TLV_TEL_DST_SIZE 8 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 9d2d68f..008e186 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1289,6 +1289,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -1765,6 +1766,9 @@ translate_compat_table(struct net *net, if (ret != 0) break; ++i; + if (strcmp(ip6t_get_target(iter1)->u.user.name, + XT_ERROR_TARGET) == 0) + ++newinfo->stacksize; } if (ret) { /* @@ -1832,6 +1836,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; + tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) @@ -2061,6 +2066,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) ret = -EFAULT; break; } + rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 252d761..4d947c1 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1559,14 +1559,13 @@ out: * i.e. Path MTU discovery */ -void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, - struct net_device *dev, u32 pmtu) +static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr, + struct net *net, u32 pmtu, int ifindex) { struct rt6_info *rt, *nrt; - struct net *net = dev_net(dev); int allfrag = 0; - rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0); + rt = rt6_lookup(net, daddr, saddr, ifindex, 0); if (rt == NULL) return; @@ -1634,6 +1633,27 @@ out: dst_release(&rt->u.dst); } +void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, + struct net_device *dev, u32 pmtu) +{ + struct net *net = dev_net(dev); + + /* + * RFC 1981 states that a node "MUST reduce the size of the packets it + * is sending along the path" that caused the Packet Too Big message. + * Since it's not possible in the general case to determine which + * interface was used to send the original packet, we update the MTU + * on the interface that will be used to send future packets. We also + * update the MTU on the interface that received the Packet Too Big in + * case the original packet was forced out that interface with + * SO_BINDTODEVICE or similar. This is the next best thing to the + * correct behaviour, which would be to update the MTU on all + * interfaces. + */ + rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0); + rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex); +} + /* * Misc support functions */ diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e51e650..2020bea 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1239,4 +1239,4 @@ static int __init sit_init(void) module_init(sit_init); module_exit(sit_cleanup); MODULE_LICENSE("GPL"); -MODULE_ALIAS("sit0"); +MODULE_ALIAS_NETDEV("sit0"); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 87be586..97e1214 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum) return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr); } +static void udp_v6_rehash(struct sock *sk) +{ + u16 new_hash = udp6_portaddr_hash(sock_net(sk), + &inet6_sk(sk)->rcv_saddr, + inet_sk(sk)->inet_num); + + udp_lib_rehash(sk, new_hash); +} + static inline int compute_score(struct sock *sk, struct net *net, unsigned short hnum, struct in6_addr *saddr, __be16 sport, @@ -1452,6 +1461,7 @@ struct proto udpv6_prot = { .backlog_rcv = udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, + .rehash = udp_v6_rehash, .get_port = udp_v6_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 79986a6..83ef96e 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); if (err < 0) { - kfree(self->ias_obj->name); - kfree(self->ias_obj); + irias_delete_object(self->ias_obj); + self->ias_obj = NULL; goto out; } @@ -2278,6 +2278,14 @@ static int __irda_getsockopt(struct socket *sock, int level, int optname, switch (optname) { case IRLMP_ENUMDEVICES: + + /* Offset to first device entry */ + offset = sizeof(struct irda_device_list) - + sizeof(struct irda_device_info); + + if (len < offset) + return -EINVAL; + /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&list.len, self->mask.word, self->nslots); @@ -2287,15 +2295,9 @@ static int __irda_getsockopt(struct socket *sock, int level, int optname, err = 0; /* Write total list length back to client */ - if (copy_to_user(optval, &list, - sizeof(struct irda_device_list) - - sizeof(struct irda_device_info))) + if (copy_to_user(optval, &list, offset)) err = -EFAULT; - /* Offset to first device entry */ - offset = sizeof(struct irda_device_list) - - sizeof(struct irda_device_info); - /* Copy the list itself - watch for overflow */ if(list.len > 2048) { diff --git a/net/irda/iriap.c b/net/irda/iriap.c index fce364c..3647753 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -502,7 +502,8 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len); /* Make sure the string is null-terminated */ - fp[n+value_len] = 0x00; + if (n + value_len < skb->len) + fp[n + value_len] = 0x00; IRDA_DEBUG(4, "Got string %s\n", fp+n); /* Will truncate to IAS_MAX_STRING bytes */ @@ -655,10 +656,16 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, n = 1; name_len = fp[n++]; + + IRDA_ASSERT(name_len < IAS_MAX_CLASSNAME + 1, return;); + memcpy(name, fp+n, name_len); n+=name_len; name[name_len] = '\0'; attr_len = fp[n++]; + + IRDA_ASSERT(attr_len < IAS_MAX_ATTRIBNAME + 1, return;); + memcpy(attr, fp+n, attr_len); n+=attr_len; attr[attr_len] = '\0'; diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a788f9e..6130f9d 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */ le16_to_cpus(&val_len); n+=2; - if (val_len > 1016) { + if (val_len >= 1016) { IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); return -RSP_INVALID_COMMAND_FORMAT; } diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 6a1a202..ab5bee2 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -106,6 +106,9 @@ irnet_ctrl_write(irnet_socket * ap, while(isspace(start[length - 1])) length--; + DABORT(length < 5 || length > NICKNAME_MAX_LEN + 5, + -EINVAL, CTRL_ERROR, "Invalid nickname.\n"); + /* Copy the name for later reuse */ memcpy(ap->rname, start + 5, length - 5); ap->rname[length - 5] = '\0'; diff --git a/net/irda/parameters.c b/net/irda/parameters.c index fc1a205..71cd38c 100644 --- a/net/irda/parameters.c +++ b/net/irda/parameters.c @@ -298,6 +298,8 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract length of value */ + if (p.pl > 32) + p.pl = 32; IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__, p.pi, p.pl); @@ -318,7 +320,7 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, (__u8) str[0], (__u8) str[1]); /* Null terminate string */ - str[p.pl+1] = '\0'; + str[p.pl] = '\0'; p.pv.c = str; /* Handler will need to take a copy */ diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 58c6c4c..1ae6976 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, printk("\n"); } - if (data_len < ETH_HLEN) + if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) goto error; secpath_reset(skb); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0852512..362e3c6 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -676,4 +676,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Chapman "); MODULE_DESCRIPTION("L2TP over IP"); MODULE_VERSION("1.0"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); + +/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like + * enums + */ +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 023ba82..e35dbe5 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -317,8 +317,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) goto out; rc = -ENODEV; rtnl_lock(); + rcu_read_lock(); if (sk->sk_bound_dev_if) { - llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); + llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { if (!addr->sllc_arphrd) addr->sllc_arphrd = llc->dev->type; @@ -329,13 +330,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) !llc_mac_match(addr->sllc_mac, llc->dev->dev_addr)) { rc = -EINVAL; - dev_put(llc->dev); llc->dev = NULL; } } } else llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); + rcu_read_unlock(); rtnl_unlock(); if (!llc->dev) goto out; @@ -1024,7 +1025,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); - int rc = -EINVAL, opt; + unsigned int opt; + int rc = -EINVAL; lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 98258b7..72ab63d 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -128,6 +128,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, enum ieee80211_back_parties initiator) { + struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; struct ieee80211_local *local = sta->local; int ret; u8 *state; @@ -137,6 +138,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, sta->sta.addr, tid); #endif /* CONFIG_MAC80211_HT_DEBUG */ + del_timer(&tid_tx->addba_resp_timer); + state = &sta->ampdu_mlme.tid_state_tx[tid]; if (*state == HT_AGG_STATE_OPERATIONAL) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 67ee34f..e8f3545 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -639,6 +639,7 @@ static void sta_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) { + unsigned long flags; u32 rates; int i, j; struct ieee80211_supported_band *sband; @@ -647,7 +648,7 @@ static void sta_apply_parameters(struct ieee80211_local *local, sband = local->hw.wiphy->bands[local->oper_channel->band]; - spin_lock_bh(&sta->lock); + spin_lock_irqsave(&sta->flaglock, flags); mask = params->sta_flags_mask; set = params->sta_flags_set; @@ -674,7 +675,7 @@ static void sta_apply_parameters(struct ieee80211_local *local, if (set & BIT(NL80211_STA_FLAG_MFP)) sta->flags |= WLAN_STA_MFP; } - spin_unlock_bh(&sta->lock); + spin_unlock_irqrestore(&sta->flaglock, flags); /* * cfg80211 validates this (1-2007) and allows setting the AID diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b2cc1fd..c429878 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -427,6 +427,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, if (!sta) return NULL; + sta->last_rx = jiffies; set_sta_flags(sta, WLAN_STA_AUTHORIZED); /* make sure mandatory rates are always added */ diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1a9e2da..a9f8947 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1002,6 +1002,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, u64 timestamp); void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); +void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 22a384d..cd36d49 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -106,7 +106,8 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed) if (scan_chan) { chan = scan_chan; channel_type = NL80211_CHAN_NO_HT; - } else if (local->tmp_channel) { + } else if (local->tmp_channel && + local->oper_channel != local->tmp_channel) { chan = scan_chan = local->tmp_channel; channel_type = local->tmp_channel_type; } else { @@ -659,6 +660,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) rtnl_unlock(); + /* + * Now all work items will be gone, but the + * timer might still be armed, so delete it + */ + del_timer_sync(&local->work_timer); + cancel_work_sync(&local->reconfig_filter); ieee80211_clear_tx_pending(local); diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 3cd5f7b..ea13a80 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -65,7 +65,6 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); mesh_accept_plinks_update(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } static inline @@ -73,7 +72,6 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata) { atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); mesh_accept_plinks_update(sdata); - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } /** @@ -115,7 +113,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, } /** - * mesh_plink_deactivate - deactivate mesh peer link + * __mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * @@ -123,18 +121,23 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, * * Locking: the caller must hold sta->lock */ -static void __mesh_plink_deactivate(struct sta_info *sta) +static bool __mesh_plink_deactivate(struct sta_info *sta) { struct ieee80211_sub_if_data *sdata = sta->sdata; + bool deactivated = false; - if (sta->plink_state == PLINK_ESTAB) + if (sta->plink_state == PLINK_ESTAB) { mesh_plink_dec_estab_count(sdata); + deactivated = true; + } sta->plink_state = PLINK_BLOCKED; mesh_path_flush_by_nexthop(sta); + + return deactivated; } /** - * __mesh_plink_deactivate - deactivate mesh peer link + * mesh_plink_deactivate - deactivate mesh peer link * * @sta: mesh peer link to deactivate * @@ -142,9 +145,15 @@ static void __mesh_plink_deactivate(struct sta_info *sta) */ void mesh_plink_deactivate(struct sta_info *sta) { + struct ieee80211_sub_if_data *sdata = sta->sdata; + bool deactivated; + spin_lock_bh(&sta->lock); - __mesh_plink_deactivate(sta); + deactivated = __mesh_plink_deactivate(sta); spin_unlock_bh(&sta->lock); + + if (deactivated) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, @@ -381,10 +390,16 @@ int mesh_plink_open(struct sta_info *sta) void mesh_plink_block(struct sta_info *sta) { + struct ieee80211_sub_if_data *sdata = sta->sdata; + bool deactivated; + spin_lock_bh(&sta->lock); - __mesh_plink_deactivate(sta); + deactivated = __mesh_plink_deactivate(sta); sta->plink_state = PLINK_BLOCKED; spin_unlock_bh(&sta->lock); + + if (deactivated) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); } @@ -397,6 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m enum plink_event event; enum plink_frame_type ftype; size_t baselen; + bool deactivated; u8 ie_len; u8 *baseaddr; __le16 plid, llid, reason; @@ -651,8 +667,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case CNF_ACPT: del_timer(&sta->plink_timer); sta->plink_state = PLINK_ESTAB; - mesh_plink_inc_estab_count(sdata); spin_unlock_bh(&sta->lock); + mesh_plink_inc_estab_count(sdata); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); break; @@ -684,8 +701,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case OPN_ACPT: del_timer(&sta->plink_timer); sta->plink_state = PLINK_ESTAB; - mesh_plink_inc_estab_count(sdata); spin_unlock_bh(&sta->lock); + mesh_plink_inc_estab_count(sdata); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mpl_dbg("Mesh plink with %pM ESTABLISHED\n", sta->sta.addr); mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, @@ -702,11 +720,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m case CLS_ACPT: reason = cpu_to_le16(MESH_CLOSE_RCVD); sta->reason = reason; - __mesh_plink_deactivate(sta); + deactivated = __mesh_plink_deactivate(sta); sta->plink_state = PLINK_HOLDING; llid = sta->llid; mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); spin_unlock_bh(&sta->lock); + if (deactivated) + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, reason); break; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f803f8b..090b3e6 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -109,7 +109,7 @@ static void run_again(struct ieee80211_if_managed *ifmgd, mod_timer(&ifmgd->timer, timeout); } -static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata) +void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata) { if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER) return; @@ -118,6 +118,19 @@ static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata) round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME)); } +void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + + if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) + return; + + mod_timer(&sdata->u.mgd.conn_mon_timer, + round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); + + ifmgd->probe_send_count = 0; +} + static int ecw2cw(int ecw) { return (1 << ecw) - 1; @@ -954,21 +967,26 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, if (is_multicast_ether_addr(hdr->addr1)) return; - if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) - return; - - mod_timer(&sdata->u.mgd.conn_mon_timer, - round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); + ieee80211_sta_reset_conn_monitor(sdata); } static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; const u8 *ssid; + u8 *dst = ifmgd->associated->bssid; + u8 unicast_limit = max(1, IEEE80211_MAX_PROBE_TRIES - 3); + + /* + * Try sending broadcast probe requests for the last three + * probe requests after the first ones failed since some + * buggy APs only support broadcast probe requests. + */ + if (ifmgd->probe_send_count >= unicast_limit) + dst = NULL; ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); - ieee80211_send_probe_req(sdata, ifmgd->associated->bssid, - ssid + 2, ssid[1], NULL, 0); + ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0); ifmgd->probe_send_count++; ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT; @@ -1210,7 +1228,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, rates = 0; basic_rates = 0; - sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + sband = local->hw.wiphy->bands[wk->chan->band]; for (i = 0; i < elems.supp_rates_len; i++) { int rate = (elems.supp_rates[i] & 0x7f) * 5; @@ -1246,11 +1264,11 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, } } - sta->sta.supp_rates[local->hw.conf.channel->band] = rates; + sta->sta.supp_rates[wk->chan->band] = rates; sdata->vif.bss_conf.basic_rates = basic_rates; /* cf. IEEE 802.11 9.2.12 */ - if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ && + if (wk->chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit) sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE; else @@ -1310,7 +1328,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk, * Also start the timer that will detect beacon loss. */ ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt); - mod_beacon_timer(sdata); + ieee80211_sta_reset_beacon_monitor(sdata); return true; } @@ -1413,7 +1431,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata, * we have or will be receiving any beacons or data, so let's * schedule the timers again, just in case. */ - mod_beacon_timer(sdata); + ieee80211_sta_reset_beacon_monitor(sdata); mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(jiffies + @@ -1488,7 +1506,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, ifmgd->last_beacon_signal = rx_status->signal; if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) { ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE; - ifmgd->ave_beacon_signal = rx_status->signal; + ifmgd->ave_beacon_signal = rx_status->signal * 16; ifmgd->last_cqm_event_signal = 0; } else { ifmgd->ave_beacon_signal = @@ -1536,7 +1554,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, * Push the beacon loss detection into the future since * we are processing a beacon from the AP just now. */ - mod_beacon_timer(sdata); + ieee80211_sta_reset_beacon_monitor(sdata); ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable, diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index c36b191..cf5ee30 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -22,12 +22,16 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; local->offchannel_ps_enabled = false; /* FIXME: what to do when local->pspolling is true? */ del_timer_sync(&local->dynamic_ps_timer); + del_timer_sync(&ifmgd->bcn_mon_timer); + del_timer_sync(&ifmgd->conn_mon_timer); + cancel_work_sync(&local->dynamic_ps_enable_work); if (local->hw.conf.flags & IEEE80211_CONF_PS) { @@ -85,6 +89,9 @@ static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata) mod_timer(&local->dynamic_ps_timer, jiffies + msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); } + + ieee80211_sta_reset_beacon_monitor(sdata); + ieee80211_sta_reset_conn_monitor(sdata); } void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local) diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c index 6d0bd19..68fc9c4 100644 --- a/net/mac80211/rate.c +++ b/net/mac80211/rate.c @@ -326,6 +326,9 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata, * if needed. */ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { + /* Skip invalid rates */ + if (info->control.rates[i].idx < 0) + break; /* Rate masking supports only legacy rates for now */ if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS) continue; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index be9abc2..dc7e773 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1710,6 +1710,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) if (!fwd_skb && net_ratelimit()) printk(KERN_DEBUG "%s: failed to clone mesh frame\n", sdata->name); + if (!fwd_skb) + goto out; fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); @@ -1747,6 +1749,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) } } + out: if (is_multicast_ether_addr(hdr->addr1) || sdata->dev->flags & IFF_PROMISC) return RX_CONTINUE; @@ -2156,9 +2159,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, struct net_device *prev_dev = NULL; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - if (status->flag & RX_FLAG_INTERNAL_CMTR) - goto out_free_skb; - if (skb_headroom(skb) < sizeof(*rthdr) && pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC)) goto out_free_skb; @@ -2217,7 +2217,6 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, } else goto out_free_skb; - status->flag |= RX_FLAG_INTERNAL_CMTR; return; out_free_skb: diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index ba9360a..cd6999e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -239,6 +239,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, memcpy(sta->sta.addr, addr, ETH_ALEN); sta->local = local; sta->sdata = sdata; + sta->last_rx = jiffies; if (sta_prepare_rate_control(local, sta, gfp)) { kfree(sta); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 94613af..5f1310a 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -58,6 +58,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, info->control.vif = &sta->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING | IEEE80211_TX_INTFL_RETRANSMISSION; + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; sta->tx_filtered_count++; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 680bcb7..1ec650b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1668,7 +1668,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_tx_info *info; int ret = NETDEV_TX_BUSY, head_need; u16 ethertype, hdrlen, meshhdrlen = 0; __le16 fc; @@ -1679,15 +1679,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, int nh_pos, h_pos; struct sta_info *sta = NULL; u32 sta_flags = 0; + struct sk_buff *tmp_skb; if (unlikely(skb->len < ETH_HLEN)) { ret = NETDEV_TX_OK; goto fail; } - nh_pos = skb_network_header(skb) - skb->data; - h_pos = skb_transport_header(skb) - skb->data; - /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; @@ -1859,6 +1857,20 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, goto fail; } + /* + * If the skb is shared we need to obtain our own copy. + */ + if (skb_shared(skb)) { + tmp_skb = skb; + skb = skb_copy(skb, GFP_ATOMIC); + kfree_skb(tmp_skb); + + if (!skb) { + ret = NETDEV_TX_OK; + goto fail; + } + } + hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; @@ -1877,6 +1889,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, encaps_len = 0; } + nh_pos = skb_network_header(skb) - skb->data; + h_pos = skb_transport_header(skb) - skb->data; + skb_pull(skb, skip_header_bytes); nh_pos -= skip_header_bytes; h_pos -= skip_header_bytes; @@ -1941,6 +1956,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, skb_set_network_header(skb, nh_pos); skb_set_transport_header(skb, h_pos); + info = IEEE80211_SKB_CB(skb); memset(info, 0, sizeof(*info)); dev->trans_start = jiffies; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index eeeb8bc..0d7a594 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1252,7 +1252,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) if (!hash) { *vmalloced = 1; printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); - hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + PAGE_KERNEL); } if (hash && nulls) diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 7df37fd..2a2a20e 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister); int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) { + if (pf >= ARRAY_SIZE(nf_loggers)) + return -EINVAL; mutex_lock(&nf_log_mutex); if (__find_logger(pf, logger->name) == NULL) { mutex_unlock(&nf_log_mutex); @@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf); void nf_log_unbind_pf(u_int8_t pf) { + if (pf >= ARRAY_SIZE(nf_loggers)) + return; mutex_lock(&nf_log_mutex); rcu_assign_pointer(nf_loggers[pf], NULL); mutex_unlock(&nf_log_mutex); diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c index 23b2d6c..364ad16 100644 --- a/net/netfilter/xt_SECMARK.c +++ b/net/netfilter/xt_SECMARK.c @@ -101,7 +101,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par) switch (info->mode) { case SECMARK_MODE_SEL: err = checkentry_selinux(info); - if (err <= 0) + if (err) return err; break; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a2eb965..eea6817 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1323,8 +1323,11 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; - if (NULL == siocb->scm) + if (NULL == siocb->scm) { siocb->scm = &scm; + memset(&scm, 0, sizeof(scm)); + } + err = scm_send(sock, msg, siocb->scm); if (err < 0) return err; @@ -1400,7 +1403,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, struct netlink_sock *nlk = nlk_sk(sk); int noblock = flags&MSG_DONTWAIT; size_t copied; - struct sk_buff *skb, *frag __maybe_unused = NULL; + struct sk_buff *skb, *data_skb; int err; if (flags&MSG_OOB) @@ -1412,45 +1415,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, if (skb == NULL) goto out; + data_skb = skb; + #ifdef CONFIG_COMPAT_NETLINK_MESSAGES if (unlikely(skb_shinfo(skb)->frag_list)) { - bool need_compat = !!(flags & MSG_CMSG_COMPAT); - /* - * If this skb has a frag_list, then here that means that - * we will have to use the frag_list skb for compat tasks - * and the regular skb for non-compat tasks. + * If this skb has a frag_list, then here that means that we + * will have to use the frag_list skb's data for compat tasks + * and the regular skb's data for normal (non-compat) tasks. * - * The skb might (and likely will) be cloned, so we can't - * just reset frag_list and go on with things -- we need to - * keep that. For the compat case that's easy -- simply get - * a reference to the compat skb and free the regular one - * including the frag. For the non-compat case, we need to - * avoid sending the frag to the user -- so assign NULL but - * restore it below before freeing the skb. + * If we need to send the compat skb, assign it to the + * 'data_skb' variable so that it will be used below for data + * copying. We keep 'skb' for everything else, including + * freeing both later. */ - if (need_compat) { - struct sk_buff *compskb = skb_shinfo(skb)->frag_list; - skb_get(compskb); - kfree_skb(skb); - skb = compskb; - } else { - frag = skb_shinfo(skb)->frag_list; - skb_shinfo(skb)->frag_list = NULL; - } + if (flags & MSG_CMSG_COMPAT) + data_skb = skb_shinfo(skb)->frag_list; } #endif msg->msg_namelen = 0; - copied = skb->len; + copied = data_skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } - skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_reset_transport_header(data_skb); + err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; @@ -1470,11 +1463,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, } siocb->scm->creds = *NETLINK_CREDS(skb); if (flags & MSG_TRUNC) - copied = skb->len; - -#ifdef CONFIG_COMPAT_NETLINK_MESSAGES - skb_shinfo(skb)->frag_list = frag; -#endif + copied = data_skb->len; skb_free_datagram(sk, skb); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 2078a27..2f68bbd 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1595,9 +1595,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, err = -EINVAL; vnet_hdr_len = sizeof(vnet_hdr); - if ((len -= vnet_hdr_len) < 0) + if (len < vnet_hdr_len) goto out_free; + len -= vnet_hdr_len; + if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); @@ -1704,7 +1706,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) - strlcpy(uaddr->sa_data, dev->name, 15); + strncpy(uaddr->sa_data, dev->name, 14); else memset(uaddr->sa_data, 0, 14); rcu_read_unlock(); @@ -1727,6 +1729,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; + sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index b2a3ae6..1500302 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -225,12 +225,13 @@ static void pipe_grant_credits(struct sock *sk) static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) { struct pep_sock *pn = pep_sk(sk); - struct pnpipehdr *hdr = pnp_hdr(skb); + struct pnpipehdr *hdr; int wake = 0; if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) return -EINVAL; + hdr = pnp_hdr(skb); if (hdr->data[0] != PN_PEP_TYPE_COMMON) { LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", (unsigned)hdr->data[0]); diff --git a/net/rds/page.c b/net/rds/page.c index 595a952..1dfbfea 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -57,30 +57,17 @@ int rds_page_copy_user(struct page *page, unsigned long offset, unsigned long ret; void *addr; - if (to_user) + addr = kmap(page); + if (to_user) { rds_stats_add(s_copy_to_user, bytes); - else + ret = copy_to_user(ptr, addr + offset, bytes); + } else { rds_stats_add(s_copy_from_user, bytes); - - addr = kmap_atomic(page, KM_USER0); - if (to_user) - ret = __copy_to_user_inatomic(ptr, addr + offset, bytes); - else - ret = __copy_from_user_inatomic(addr + offset, ptr, bytes); - kunmap_atomic(addr, KM_USER0); - - if (ret) { - addr = kmap(page); - if (to_user) - ret = copy_to_user(ptr, addr + offset, bytes); - else - ret = copy_from_user(addr + offset, ptr, bytes); - kunmap(page); - if (ret) - return -EFAULT; + ret = copy_from_user(addr + offset, ptr, bytes); } + kunmap(page); - return 0; + return ret ? -EFAULT : 0; } EXPORT_SYMBOL_GPL(rds_page_copy_user); diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 75fd13b..e2ccf7b 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -474,7 +474,7 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs, goto out; } - if (args->nr_local > (u64)UINT_MAX) { + if (args->nr_local > UIO_MAXIOV) { ret = -EMSGSIZE; goto out; } @@ -500,6 +500,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs, max_pages = max(nr, max_pages); nr_pages += nr; + + /* + * nr for one entry in limited to (UINT_MAX>>PAGE_SHIFT)+1 + * so nr_pages cannot overflow without becoming bigger than + * INT_MAX first. If nr cannot overflow then max_pages should + * be ok. + */ + if (nr_pages > INT_MAX) { + ret = -EINVAL; + goto out; + } } pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL); diff --git a/net/rds/recv.c b/net/rds/recv.c index 795a00b..c93588c 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c @@ -297,7 +297,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) { struct rds_notifier *notifier; - struct rds_rdma_notify cmsg; + struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */ unsigned int count = 0, max_messages = ~0U; unsigned long flags; LIST_HEAD(copy); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 8e45e76..d952e7e 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; - if (addr->srose_ndigis > ROSE_MAX_DIGIS) + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { @@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1) return -EINVAL; - if (addr->srose_ndigis > ROSE_MAX_DIGIS) + if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; /* Source + Destination digis should not exceed ROSE_MAX_DIGIS */ diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c index 1734abb..174d51c 100644 --- a/net/rose/rose_subr.c +++ b/net/rose/rose_subr.c @@ -290,10 +290,15 @@ static int rose_parse_national(unsigned char *p, struct rose_facilities_struct * facilities->source_ndigis = 0; facilities->dest_ndigis = 0; for (pt = p + 2, lg = 0 ; lg < l ; pt += AX25_ADDR_LEN, lg += AX25_ADDR_LEN) { - if (pt[6] & AX25_HBIT) + if (pt[6] & AX25_HBIT) { + if (facilities->dest_ndigis >= ROSE_MAX_DIGIS) + return -1; memcpy(&facilities->dest_digis[facilities->dest_ndigis++], pt, AX25_ADDR_LEN); - else + } else { + if (facilities->source_ndigis >= ROSE_MAX_DIGIS) + return -1; memcpy(&facilities->source_digis[facilities->source_ndigis++], pt, AX25_ADDR_LEN); + } } } p += l + 2; @@ -333,6 +338,11 @@ static int rose_parse_ccitt(unsigned char *p, struct rose_facilities_struct *fac case 0xC0: l = p[1]; + + /* Prevent overflows*/ + if (l < 10 || l > 20) + return -1; + if (*p == FAC_CCITT_DEST_NSAP) { memcpy(&facilities->source_addr, p + 7, ROSE_ADDR_LEN); memcpy(callsign, p + 12, l - 10); @@ -373,12 +383,16 @@ int rose_parse_facilities(unsigned char *p, switch (*p) { case FAC_NATIONAL: /* National */ len = rose_parse_national(p + 1, facilities, facilities_len - 1); + if (len < 0) + return 0; facilities_len -= len + 1; p += len + 1; break; case FAC_CCITT: /* CCITT */ len = rose_parse_ccitt(p + 1, facilities, facilities_len - 1); + if (len < 0) + return 0; facilities_len -= len + 1; p += len + 1; break; diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c index 724553e..abbf4fa 100644 --- a/net/sched/act_nat.c +++ b/net/sched/act_nat.c @@ -218,6 +218,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) goto drop; + icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); if (egress) addr = iph->daddr; @@ -246,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, iph->saddr = new_addr; inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, - 1); + 0); break; } default: diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 78ef2c5..d49c40f 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = { .populate = cgrp_populate, #ifdef CONFIG_NET_CLS_CGROUP .subsys_id = net_cls_subsys_id, -#else -#define net_cls_subsys_id net_cls_subsys.subsys_id #endif .module = THIS_MODULE, }; @@ -123,7 +121,7 @@ static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, * calls by looking at the number of nested bh disable calls because * softirqs always disables bh. */ - if (softirq_count() != SOFTIRQ_OFFSET) { + if (in_serving_softirq()) { /* If there is an sk_classid we'll use that. */ if (!skb->sk) return -1; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a63029e..bd1892f 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev) } EXPORT_SYMBOL(netif_carrier_off); +/** + * netif_notify_peers - notify network peers about existence of @dev + * @dev: network device + * + * Generate traffic such that interested network peers are aware of + * @dev, such as by generating a gratuitous ARP. This may be used when + * a device wants to inform the rest of the network about some sort of + * reconfiguration such as a failover event or virtual machine + * migration. + */ +void netif_notify_peers(struct net_device *dev) +{ + rtnl_lock(); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); + rtnl_unlock(); +} +EXPORT_SYMBOL(netif_notify_peers); + /* "NOOP" scheduler: the best scheduler, recommended for all interfaces under all circumstances. It is difficult to invent anything faster or cheaper. diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index c657628..a9be0ef 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -497,11 +497,22 @@ nla_put_failure: return -1; } +static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg) +{ + return NULL; +} + static unsigned long sfq_get(struct Qdisc *sch, u32 classid) { return 0; } +static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, + u32 classid) +{ + return 0; +} + static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) { struct sfq_sched_data *q = qdisc_priv(sch); @@ -554,8 +565,10 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) } static const struct Qdisc_class_ops sfq_class_ops = { + .leaf = sfq_leaf, .get = sfq_get, .tcf_chain = sfq_find_tcf, + .bind_tcf = sfq_bind, .dump = sfq_dump_class, .dump_stats = sfq_dump_class_stats, .walk = sfq_walk, diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 8636639..ddbbf7c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) id = ntohs(hmacs->hmac_ids[i]); /* Check the id is in the supported range */ - if (id > SCTP_AUTH_HMAC_ID_MAX) + if (id > SCTP_AUTH_HMAC_ID_MAX) { + id = 0; continue; + } /* See is we support the id. Supported IDs have name and * length fields set, so that we can allocated and use * them. We can safely just check for name, for without the * name, we can't allocate the TFM. */ - if (!sctp_hmac_list[id].hmac_name) + if (!sctp_hmac_list[id].hmac_name) { + id = 0; continue; + } break; } diff --git a/net/sctp/output.c b/net/sctp/output.c index a646681..bcc4590 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); - sctp_packet_reset(packet); packet->vtag = vtag; if (ecn_capable && sctp_packet_empty(packet)) { diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 1827498..b3d890f 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1161,7 +1161,8 @@ SCTP_STATIC __init int sctp_init(void) /* Set the pressure threshold to be a fraction of global memory that * is up to 1/2 at 256 MB, decreasing toward zero with the amount of - * memory, with a floor of 128 pages. + * memory, with a floor of 128 pages, and a ceiling that prevents an + * integer overflow. * Note this initalizes the data in sctpv6_prot too * Unabashedly stolen from tcp_init */ @@ -1169,6 +1170,7 @@ SCTP_STATIC __init int sctp_init(void) limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); limit = max(limit, 128UL); + limit = min(limit, INT_MAX * 4UL / 3 / 2); sysctl_sctp_mem[0] = limit / 4 * 3; sysctl_sctp_mem[1] = limit; sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; diff --git a/net/socket.c b/net/socket.c index 367d547..166ad32 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1652,6 +1652,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, struct iovec iov; int fput_needed; + if (len > INT_MAX) + len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; @@ -1709,6 +1711,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, int err, err2; int fput_needed; + if (size > INT_MAX) + size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 8da2a0e..5359f36 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -745,17 +745,18 @@ gss_pipe_release(struct inode *inode) struct rpc_inode *rpci = RPC_I(inode); struct gss_upcall_msg *gss_msg; +restart: spin_lock(&inode->i_lock); - while (!list_empty(&rpci->in_downcall)) { + list_for_each_entry(gss_msg, &rpci->in_downcall, list) { - gss_msg = list_entry(rpci->in_downcall.next, - struct gss_upcall_msg, list); + if (!list_empty(&gss_msg->msg.list)) + continue; gss_msg->msg.errno = -EPIPE; atomic_inc(&gss_msg->count); __gss_unhash_msg(gss_msg); spin_unlock(&inode->i_lock); gss_release_msg(gss_msg); - spin_lock(&inode->i_lock); + goto restart; } spin_unlock(&inode->i_lock); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 95ccbcf..41a762f 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -48,7 +48,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, return; do { msg = list_entry(head->next, struct rpc_pipe_msg, list); - list_del(&msg->list); + list_del_init(&msg->list); msg->errno = err; destroy_msg(msg); } while (!list_empty(head)); @@ -208,7 +208,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp) if (msg != NULL) { spin_lock(&inode->i_lock); msg->errno = -EAGAIN; - list_del(&msg->list); + list_del_init(&msg->list); spin_unlock(&inode->i_lock); rpci->ops->destroy_msg(msg); } @@ -268,7 +268,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset) if (res < 0 || msg->len == msg->copied) { filp->private_data = NULL; spin_lock(&inode->i_lock); - list_del(&msg->list); + list_del_init(&msg->list); spin_unlock(&inode->i_lock); rpci->ops->destroy_msg(msg); } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 4a843b8..c55f753 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -637,14 +637,12 @@ static void __rpc_execute(struct rpc_task *task) save_callback = task->tk_callback; task->tk_callback = NULL; save_callback(task); - } - - /* - * Perform the next FSM step. - * tk_action may be NULL when the task has been killed - * by someone else. - */ - if (!RPC_IS_QUEUED(task)) { + } else { + /* + * Perform the next FSM step. + * tk_action may be NULL when the task has been killed + * by someone else. + */ if (task->tk_action == NULL) break; task->tk_action(task); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index cbc0849..2f5fb71 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -212,6 +212,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, spin_lock(&svc_xprt_class_lock); list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { struct svc_xprt *newxprt; + unsigned short newport; if (strcmp(xprt_name, xcl->xcl_name)) continue; @@ -230,8 +231,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, spin_lock_bh(&serv->sv_lock); list_add(&newxprt->xpt_list, &serv->sv_permsocks); spin_unlock_bh(&serv->sv_lock); + newport = svc_xprt_local_port(newxprt); clear_bit(XPT_BUSY, &newxprt->xpt_flags); - return svc_xprt_local_port(newxprt); + return newport; } err: spin_unlock(&svc_xprt_class_lock); @@ -431,8 +433,13 @@ void svc_xprt_received(struct svc_xprt *xprt) { BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); xprt->xpt_pool = NULL; + /* As soon as we clear busy, the xprt could be closed and + * 'put', so we need a reference to call svc_xprt_enqueue with: + */ + svc_xprt_get(xprt); clear_bit(XPT_BUSY, &xprt->xpt_flags); svc_xprt_enqueue(xprt); + svc_xprt_put(xprt); } EXPORT_SYMBOL_GPL(svc_xprt_received); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 7ca65c7..02fbb8d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -710,6 +710,8 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + transport->srcport = 0; + write_lock_bh(&sk->sk_callback_lock); transport->inet = NULL; transport->sock = NULL; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 66e889b..1a5f62a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -395,6 +395,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; struct tipc_sock *tsock = tipc_sk(sock->sk); + memset(addr, 0, sizeof(*addr)); if (peer) { if ((sock->state != SS_CONNECTED) && ((peer != 2) || (sock->state != SS_DISCONNECTING))) diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index ab6eab4..ac91f0d 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -76,6 +76,19 @@ struct top_srv { static struct top_srv topsrv = { 0 }; /** + * htohl - convert value to endianness used by destination + * @in: value to convert + * @swap: non-zero if endianness must be reversed + * + * Returns converted value + */ + +static u32 htohl(u32 in, int swap) +{ + return swap ? swab32(in) : in; +} + +/** * subscr_send_event - send a message containing a tipc_event to the subscriber * * Note: Must not hold subscriber's server port lock, since tipc_send() will @@ -94,11 +107,11 @@ static void subscr_send_event(struct subscription *sub, msg_sect.iov_base = (void *)&sub->evt; msg_sect.iov_len = sizeof(struct tipc_event); - sub->evt.event = htonl(event); - sub->evt.found_lower = htonl(found_lower); - sub->evt.found_upper = htonl(found_upper); - sub->evt.port.ref = htonl(port_ref); - sub->evt.port.node = htonl(node); + sub->evt.event = htohl(event, sub->swap); + sub->evt.found_lower = htohl(found_lower, sub->swap); + sub->evt.found_upper = htohl(found_upper, sub->swap); + sub->evt.port.ref = htohl(port_ref, sub->swap); + sub->evt.port.node = htohl(node, sub->swap); tipc_send(sub->server_ref, 1, &msg_sect); } @@ -274,29 +287,16 @@ static void subscr_cancel(struct tipc_subscr *s, { struct subscription *sub; struct subscription *sub_temp; - __u32 type, lower, upper, timeout, filter; int found = 0; /* Find first matching subscription, exit if not found */ - type = ntohl(s->seq.type); - lower = ntohl(s->seq.lower); - upper = ntohl(s->seq.upper); - timeout = ntohl(s->timeout); - filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL; - list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { - if ((type == sub->seq.type) && - (lower == sub->seq.lower) && - (upper == sub->seq.upper) && - (timeout == sub->timeout) && - (filter == sub->filter) && - !memcmp(s->usr_handle,sub->evt.s.usr_handle, - sizeof(s->usr_handle)) ){ - found = 1; - break; - } + if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { + found = 1; + break; + } } if (!found) return; @@ -310,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s, k_term_timer(&sub->timer); spin_lock_bh(subscriber->lock); } - dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n", + dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n", sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); subscr_del(sub); } @@ -325,10 +325,16 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, struct subscriber *subscriber) { struct subscription *sub; + int swap; + + /* Determine subscriber's endianness */ + + swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); /* Detect & process a subscription cancellation request */ - if (ntohl(s->filter) & TIPC_SUB_CANCEL) { + if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { + s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); subscr_cancel(s, subscriber); return NULL; } @@ -353,12 +359,13 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, /* Initialize subscription object */ - sub->seq.type = ntohl(s->seq.type); - sub->seq.lower = ntohl(s->seq.lower); - sub->seq.upper = ntohl(s->seq.upper); - sub->timeout = ntohl(s->timeout); - sub->filter = ntohl(s->filter); - if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) || + sub->seq.type = htohl(s->seq.type, swap); + sub->seq.lower = htohl(s->seq.lower, swap); + sub->seq.upper = htohl(s->seq.upper, swap); + sub->timeout = htohl(s->timeout, swap); + sub->filter = htohl(s->filter, swap); + if ((!(sub->filter & TIPC_SUB_PORTS) == + !(sub->filter & TIPC_SUB_SERVICE)) || (sub->seq.lower > sub->seq.upper)) { warn("Subscription rejected, illegal request\n"); kfree(sub); @@ -369,6 +376,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s, INIT_LIST_HEAD(&sub->nameseq_list); list_add(&sub->subscription_list, &subscriber->subscription_list); sub->server_ref = subscriber->port_ref; + sub->swap = swap; memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); atomic_inc(&topsrv.subscription_count); if (sub->timeout != TIPC_WAIT_FOREVER) { diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h index c20f496..45d89bf 100644 --- a/net/tipc/subscr.h +++ b/net/tipc/subscr.h @@ -53,6 +53,7 @@ typedef void (*tipc_subscr_event) (struct subscription *sub, * @nameseq_list: adjacent subscriptions in name sequence's subscription list * @subscription_list: adjacent subscriptions in subscriber's subscription list * @server_ref: object reference of server port associated with subscription + * @swap: indicates if subscriber uses opposite endianness in its messages * @evt: template for events generated by subscription */ @@ -65,6 +66,7 @@ struct subscription { struct list_head nameseq_list; struct list_head subscription_list; u32 server_ref; + int swap; struct tipc_event evt; }; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fef2cc5..d63e7a2 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -673,6 +673,7 @@ static int unix_autobind(struct socket *sock) static u32 ordernum = 1; struct unix_address *addr; int err; + unsigned int retries = 0; mutex_lock(&u->readlock); @@ -698,9 +699,17 @@ retry: if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type, addr->hash)) { spin_unlock(&unix_table_lock); - /* Sanity yield. It is unusual case, but yet... */ - if (!(ordernum&0xFF)) - yield(); + /* + * __unix_find_socket_byname() may take long time if many names + * are already in use. + */ + cond_resched(); + /* Give up if all names seems to be in use. */ + if (retries++ == 0xFFFFF) { + err = -ENOSPC; + kfree(addr); + goto out; + } goto retry; } addr->hash ^= sk->sk_type; @@ -1297,18 +1306,20 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) int i; scm->fp = UNIXCB(skb).fp; - skb->destructor = sock_wfree; UNIXCB(skb).fp = NULL; for (i = scm->fp->count-1; i >= 0; i--) unix_notinflight(scm->fp->fp[i]); } -static void unix_destruct_fds(struct sk_buff *skb) +static void unix_destruct_scm(struct sk_buff *skb) { struct scm_cookie scm; memset(&scm, 0, sizeof(scm)); - unix_detach_fds(&scm, skb); + scm.pid = UNIXCB(skb).pid; + scm.cred = UNIXCB(skb).cred; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); /* Alas, it calls VFS */ /* So fscking what? fput() had been SMP-safe since the last Summer */ @@ -1316,9 +1327,25 @@ static void unix_destruct_fds(struct sk_buff *skb) sock_wfree(skb); } +#define MAX_RECURSION_LEVEL 4 + static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; + unsigned char max_level = 0; + int unix_sock_count = 0; + + for (i = scm->fp->count - 1; i >= 0; i--) { + struct sock *sk = unix_get_socket(scm->fp->fp[i]); + + if (sk) { + unix_sock_count++; + max_level = max(max_level, + unix_sk(sk)->recursion_level); + } + } + if (unlikely(max_level > MAX_RECURSION_LEVEL)) + return -ETOOMANYREFS; /* * Need to duplicate file references for the sake of garbage @@ -1329,10 +1356,24 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) if (!UNIXCB(skb).fp) return -ENOMEM; - for (i = scm->fp->count-1; i >= 0; i--) - unix_inflight(scm->fp->fp[i]); - skb->destructor = unix_destruct_fds; - return 0; + if (unix_sock_count) { + for (i = scm->fp->count - 1; i >= 0; i--) + unix_inflight(scm->fp->fp[i]); + } + return max_level; +} + +static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) +{ + int err = 0; + UNIXCB(skb).pid = get_pid(scm->pid); + UNIXCB(skb).cred = get_cred(scm->cred); + UNIXCB(skb).fp = NULL; + if (scm->fp && send_fds) + err = unix_attach_fds(scm, skb); + + skb->destructor = unix_destruct_scm; + return err; } /* @@ -1354,6 +1395,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; + int max_level; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1391,12 +1433,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (skb == NULL) goto out; - memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); - if (siocb->scm->fp) { - err = unix_attach_fds(siocb->scm, skb); - if (err) - goto out_free; - } + err = unix_scm_to_skb(siocb->scm, skb, true); + if (err < 0) + goto out_free; + max_level = err + 1; unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); @@ -1476,6 +1516,8 @@ restart: } skb_queue_tail(&other->sk_receive_queue, skb); + if (max_level > unix_sk(other)->recursion_level) + unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); @@ -1506,6 +1548,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, int sent = 0; struct scm_cookie tmp_scm; bool fds_sent = false; + int max_level; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1566,16 +1609,15 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, */ size = min_t(int, size, skb_tailroom(skb)); - memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); + /* Only send the fds in the first buffer */ - if (siocb->scm->fp && !fds_sent) { - err = unix_attach_fds(siocb->scm, skb); - if (err) { - kfree_skb(skb); - goto out_err; - } - fds_sent = true; + err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); + if (err < 0) { + kfree_skb(skb); + goto out_err; } + max_level = err + 1; + fds_sent = true; err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { @@ -1590,6 +1632,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, goto pipe_err_free; skb_queue_tail(&other->sk_receive_queue, skb); + if (max_level > unix_sk(other)->recursion_level) + unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, size); sent += size; @@ -1692,7 +1736,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, siocb->scm = &tmp_scm; memset(&tmp_scm, 0, sizeof(tmp_scm)); } - siocb->scm->creds = *UNIXCREDS(skb); + scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); unix_set_secdata(siocb->scm, skb); if (!(flags & MSG_PEEK)) { @@ -1806,6 +1850,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, unix_state_lock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { + unix_sk(sk)->recursion_level = 0; if (copied >= target) goto unlock; @@ -1841,14 +1886,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, if (check_creds) { /* Never glue messages from different writers */ - if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, - sizeof(siocb->scm->creds)) != 0) { + if ((UNIXCB(skb).pid != siocb->scm->pid) || + (UNIXCB(skb).cred != siocb->scm->cred)) { skb_queue_head(&sk->sk_receive_queue, skb); break; } } else { /* Copy credentials */ - siocb->scm->creds = *UNIXCREDS(skb); + scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); check_creds = 1; } diff --git a/net/unix/garbage.c b/net/unix/garbage.c index c8df6fd..f89f83b 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; -static struct sock *unix_get_socket(struct file *filp) +struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = filp->f_path.dentry->d_inode; @@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u) } static bool gc_in_progress = false; +#define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) { + /* + * If number of inflight sockets is insane, + * force a garbage collect right now. + */ + if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) + unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index b01a6f6..bbaab4b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -43,6 +43,38 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, return chan; } +static bool can_beacon_sec_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + struct ieee80211_channel *sec_chan; + int diff; + + switch (channel_type) { + case NL80211_CHAN_HT40PLUS: + diff = 20; + break; + case NL80211_CHAN_HT40MINUS: + diff = -20; + break; + default: + return false; + } + + sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); + if (!sec_chan) + return false; + + /* we'll need a DFS capability later */ + if (sec_chan->flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_PASSIVE_SCAN | + IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_RADAR)) + return false; + + return true; +} + int cfg80211_set_freq(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int freq, enum nl80211_channel_type channel_type) @@ -67,6 +99,27 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev, if (!chan) return -EINVAL; + /* Both channels should be able to initiate communication */ + if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC || + wdev->iftype == NL80211_IFTYPE_AP || + wdev->iftype == NL80211_IFTYPE_AP_VLAN || + wdev->iftype == NL80211_IFTYPE_MESH_POINT)) { + switch (channel_type) { + case NL80211_CHAN_HT40PLUS: + case NL80211_CHAN_HT40MINUS: + if (!can_beacon_sec_chan(&rdev->wiphy, chan, + channel_type)) { + printk(KERN_DEBUG + "cfg80211: Secondary channel not " + "allowed to initiate communication\n"); + return -EINVAL; + } + break; + default: + break; + } + } + result = rdev->ops->set_channel(&rdev->wiphy, wdev ? wdev->netdev : NULL, chan, channel_type); diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 48ead6f..e4be688 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c @@ -44,10 +44,10 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len) } } - WARN_ON(!done); - - nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); - cfg80211_sme_rx_auth(dev, buf, len); + if (done) { + nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); + cfg80211_sme_rx_auth(dev, buf, len); + } wdev_unlock(wdev); } @@ -842,12 +842,18 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, return -EINVAL; if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { /* Verify that we are associated with the destination AP */ + wdev_lock(wdev); + if (!wdev->current_bss || memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, ETH_ALEN) != 0 || memcmp(wdev->current_bss->pub.bssid, mgmt->da, - ETH_ALEN) != 0) + ETH_ALEN) != 0) { + wdev_unlock(wdev); return -ENOTCONN; + } + wdev_unlock(wdev); + } if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index db71150..da657d8 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -758,11 +758,13 @@ static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) result = get_rdev_dev_by_info_ifindex(info, &rdev, &netdev); if (result) - goto unlock; + goto unlock_rtnl; result = __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info); - unlock: + dev_put(netdev); + cfg80211_unlock_rdev(rdev); + unlock_rtnl: rtnl_unlock(); return result; @@ -4909,7 +4911,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) - goto unlock_rdev; + goto unlock_rtnl; wdev = dev->ieee80211_ptr; @@ -4926,9 +4928,10 @@ static int nl80211_set_cqm_rssi(struct genl_info *info, err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev, threshold, hysteresis); -unlock_rdev: + unlock_rdev: cfg80211_unlock_rdev(rdev); dev_put(dev); + unlock_rtnl: rtnl_unlock(); return err; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 8f0d97d..55b1101 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1695,7 +1695,7 @@ static int ignore_request(struct wiphy *wiphy, return 0; return -EALREADY; } - return REG_INTERSECT; + return 0; case NL80211_REGDOM_SET_BY_DRIVER: if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(pending_request->alpha2)) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 58401d2..503ebb8 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -275,6 +275,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, { struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); struct cfg80211_internal_bss *bss, *res = NULL; + unsigned long now = jiffies; spin_lock_bh(&dev->bss_lock); @@ -283,6 +284,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy, continue; if (channel && bss->pub.channel != channel) continue; + /* Don't get expired BSS structs */ + if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) && + !atomic_read(&bss->hold)) + continue; if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { res = bss; kref_get(&res->ref); @@ -645,14 +650,14 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub) bss = container_of(pub, struct cfg80211_internal_bss, pub); spin_lock_bh(&dev->bss_lock); + if (!list_empty(&bss->list)) { + list_del_init(&bss->list); + dev->bss_generation++; + rb_erase(&bss->rbn, &dev->bss_tree); - list_del(&bss->list); - dev->bss_generation++; - rb_erase(&bss->rbn, &dev->bss_tree); - + kref_put(&bss->ref, bss_release); + } spin_unlock_bh(&dev->bss_lock); - - kref_put(&bss->ref, bss_release); } EXPORT_SYMBOL(cfg80211_unlink_bss); diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 9634299..37c7d5b 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -1420,6 +1420,9 @@ int cfg80211_wext_giwessid(struct net_device *dev, { struct wireless_dev *wdev = dev->ieee80211_ptr; + data->flags = 0; + data->length = 0; + switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: return cfg80211_ibss_wext_giwessid(dev, info, data, ssid); diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 0ef17bc..8f5116f 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -782,6 +782,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, } } + if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { + /* + * If this is a GET, but not NOMAX, it means that the extra + * data is not bounded by userspace, but by max_tokens. Thus + * set the length to max_tokens. This matches the extra data + * allocation. + * The driver should fill it with the number of tokens it + * provided, and it may check iwp->length rather than having + * knowledge of max_tokens. If the driver doesn't change the + * iwp->length, this ioctl just copies back max_token tokens + * filled with zeroes. Hopefully the driver isn't claiming + * them to be valid data. + */ + iwp->length = descr->max_tokens; + } + err = handler(dev, info, (union iwreq_data *) iwp, extra); iwp->length += essid_compat; diff --git a/net/wireless/wext-priv.c b/net/wireless/wext-priv.c index 3feb28e..674d426 100644 --- a/net/wireless/wext-priv.c +++ b/net/wireless/wext-priv.c @@ -152,7 +152,7 @@ static int ioctl_private_iw_point(struct iw_point *iwp, unsigned int cmd, } else if (!iwp->pointer) return -EFAULT; - extra = kmalloc(extra_size, GFP_KERNEL); + extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) return -ENOMEM; diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 771bab0..55187c8 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, while (len > 0) { switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: + if (len < 2) + return 0; switch (*p) { case X25_FAC_REVERSE: if((p[1] & 0x81) == 0x81) { @@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, len -= 2; break; case X25_FAC_CLASS_B: + if (len < 3) + return 0; switch (*p) { case X25_FAC_PACKET_SIZE: facilities->pacsize_in = p[1]; @@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, len -= 3; break; case X25_FAC_CLASS_C: + if (len < 4) + return 0; printk(KERN_DEBUG "X.25: unknown facility %02X, " "values %02X, %02X, %02X\n", p[0], p[1], p[2], p[3]); @@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, len -= 4; break; case X25_FAC_CLASS_D: + if (len < p[1] + 2) + return 0; switch (*p) { case X25_FAC_CALLING_AE: - if (p[1] > X25_MAX_DTE_FACIL_LEN) - break; + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) + return 0; dte_facs->calling_len = p[2]; memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLING_AE; break; case X25_FAC_CALLED_AE: - if (p[1] > X25_MAX_DTE_FACIL_LEN) - break; + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) + return 0; dte_facs->called_len = p[2]; memcpy(dte_facs->called_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLED_AE; break; default: printk(KERN_DEBUG "X.25: unknown facility %02X," - "length %d, values %02X, %02X, " - "%02X, %02X\n", - p[0], p[1], p[2], p[3], p[4], p[5]); + "length %d\n", p[0], p[1]); break; } len -= p[1] + 2; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 6317896..f729f02 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp &x25->vc_facil_mask); if (len > 0) skb_pull(skb, len); + else + return -1; /* * Copy any Call User Data. */ diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 73e7b95..88048b6 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -392,8 +392,12 @@ void __exit x25_link_free(void) write_lock_bh(&x25_neigh_list_lock); list_for_each_safe(entry, tmp, &x25_neigh_list) { + struct net_device *dev; + nb = list_entry(entry, struct x25_neigh, node); + dev = nb->dev; __x25_remove_neigh(nb); + dev_put(dev); } write_unlock_bh(&x25_neigh_list_lock); } diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 9960d1c..7f97e3f 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -330,7 +330,7 @@ static int conf_choice(struct menu *menu) } if (!child) continue; - if (line[strlen(line) - 1] == '?') { + if (line[0] && line[strlen(line) - 1] == '?') { print_help(child); continue; } diff --git a/scripts/mkmakefile b/scripts/mkmakefile index 67d59c7..5325423 100644 --- a/scripts/mkmakefile +++ b/scripts/mkmakefile @@ -44,7 +44,9 @@ all: Makefile:; -\$(all) %/: all +\$(all): all @: +%/: all + @: EOF diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 16d100d..3fbcd1d 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -35,6 +35,7 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) /* set during initialization */ +extern int iint_initialized; extern int ima_initialized; extern int ima_used_chip; extern char *ima_hash; diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index 7625b85..afba4ae 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c @@ -22,9 +22,10 @@ RADIX_TREE(ima_iint_store, GFP_ATOMIC); DEFINE_SPINLOCK(ima_iint_lock); - static struct kmem_cache *iint_cache __read_mostly; +int iint_initialized = 0; + /* ima_iint_find_get - return the iint associated with an inode * * ima_iint_find_get gets a reference to the iint. Caller must @@ -141,6 +142,7 @@ static int __init ima_iintcache_init(void) iint_cache = kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, SLAB_PANIC, init_once); + iint_initialized = 1; return 0; } security_initcall(ima_iintcache_init); diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index f936413..e662b89 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -148,12 +148,14 @@ void ima_counts_get(struct file *file) struct ima_iint_cache *iint; int rc; - if (!ima_initialized || !S_ISREG(inode->i_mode)) + if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find_get(inode); if (!iint) return; mutex_lock(&iint->mutex); + if (!ima_initialized) + goto out; rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK); if (rc < 0) goto out; @@ -213,7 +215,7 @@ void ima_file_free(struct file *file) struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; - if (!ima_initialized || !S_ISREG(inode->i_mode)) + if (!iint_initialized || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find_get(inode); if (!iint) @@ -230,7 +232,7 @@ static int process_measurement(struct file *file, const unsigned char *filename, { struct inode *inode = file->f_dentry->d_inode; struct ima_iint_cache *iint; - int rc; + int rc = 0; if (!ima_initialized || !S_ISREG(inode->i_mode)) return 0; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index aef8c0a..d661afb 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; } diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 6261745..e0d1e2b 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1259,6 +1259,7 @@ long keyctl_session_to_parent(void) keyring_r = NULL; me = current; + rcu_read_lock(); write_lock_irq(&tasklist_lock); parent = me->real_parent; @@ -1291,7 +1292,8 @@ long keyctl_session_to_parent(void) goto not_permitted; /* the keyrings must have the same UID */ - if (pcred->tgcred->session_keyring->uid != mycred->euid || + if ((pcred->tgcred->session_keyring && + pcred->tgcred->session_keyring->uid != mycred->euid) || mycred->tgcred->session_keyring->uid != mycred->euid) goto not_permitted; @@ -1306,6 +1308,7 @@ long keyctl_session_to_parent(void) set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); if (oldcred) put_cred(oldcred); return 0; @@ -1314,6 +1317,7 @@ already_same: ret = 0; not_permitted: write_unlock_irq(&tasklist_lock); + rcu_read_unlock(); put_cred(cred); return ret; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 5c9f25b..7b765f0 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2573,7 +2573,10 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, sid = tsec->sid; newsid = tsec->create_sid; - if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { + if ((sbsec->flags & SE_SBINITIALIZED) && + (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) + newsid = sbsec->mntpoint_sid; + else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { rc = security_transition_sid(sid, dsec->sid, inode_mode_to_security_class(inode->i_mode), &newsid); @@ -3231,7 +3234,11 @@ static void selinux_cred_free(struct cred *cred) { struct task_security_struct *tsec = cred->security; - BUG_ON((unsigned long) cred->security < PAGE_SIZE); + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); cred->security = (void *) 0x7UL; kfree(tsec); } diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 75ec0c6..8b02b21 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -65,6 +65,8 @@ static struct nlmsg_perm nlmsg_route_perms[] = { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, }; static struct nlmsg_perm nlmsg_firewall_perms[] = diff --git a/sound/core/control.c b/sound/core/control.c index 070aab4..45a8180 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -31,6 +31,7 @@ /* max number of user-defined controls */ #define MAX_USER_CONTROLS 32 +#define MAX_CONTROL_COUNT 1028 struct snd_kctl_ioctl { struct list_head list; /* list of all ioctls */ @@ -195,6 +196,10 @@ static struct snd_kcontrol *snd_ctl_new(struct snd_kcontrol *control, if (snd_BUG_ON(!control || !control->count)) return NULL; + + if (control->count > MAX_CONTROL_COUNT) + return NULL; + kctl = kzalloc(sizeof(*kctl) + sizeof(struct snd_kcontrol_volatile) * control->count, GFP_KERNEL); if (kctl == NULL) { snd_printk(KERN_ERR "Cannot allocate control instance\n"); diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c index 7730575..07efa29 100644 --- a/sound/core/hrtimer.c +++ b/sound/core/hrtimer.c @@ -45,12 +45,13 @@ static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; + unsigned long oruns; if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; - hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); - snd_timer_interrupt(stime->timer, t->sticks); + oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution)); + snd_timer_interrupt(stime->timer, t->sticks * oruns); if (!atomic_read(&stime->running)) return HRTIMER_NORESTART; diff --git a/sound/core/init.c b/sound/core/init.c index ec4a50c..82f350e 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -848,6 +848,7 @@ int snd_card_file_add(struct snd_card *card, struct file *file) return -ENOMEM; mfile->file = file; mfile->disconnected_f_op = NULL; + INIT_LIST_HEAD(&mfile->shutdown_list); spin_lock(&card->files_lock); if (card->shutdown) { spin_unlock(&card->files_lock); @@ -883,6 +884,9 @@ int snd_card_file_remove(struct snd_card *card, struct file *file) list_for_each_entry(mfile, &card->files_list, list) { if (mfile->file == file) { list_del(&mfile->list); + spin_lock(&shutdown_lock); + list_del(&mfile->shutdown_list); + spin_unlock(&shutdown_lock); if (mfile->disconnected_f_op) fops_put(mfile->disconnected_f_op); found = mfile; diff --git a/sound/core/oss/mixer_oss.c b/sound/core/oss/mixer_oss.c index f50ebf2..8442a08 100644 --- a/sound/core/oss/mixer_oss.c +++ b/sound/core/oss/mixer_oss.c @@ -618,8 +618,10 @@ static void snd_mixer_oss_put_volume1_vol(struct snd_mixer_oss_file *fmixer, if (numid == ID_UNKNOWN) return; down_read(&card->controls_rwsem); - if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) + if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) { + up_read(&card->controls_rwsem); return; + } uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL); uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (uinfo == NULL || uctl == NULL) @@ -658,7 +660,7 @@ static void snd_mixer_oss_put_volume1_sw(struct snd_mixer_oss_file *fmixer, return; down_read(&card->controls_rwsem); if ((kctl = snd_ctl_find_numid(card, numid)) == NULL) { - up_read(&fmixer->card->controls_rwsem); + up_read(&card->controls_rwsem); return; } uinfo = kzalloc(sizeof(*uinfo), GFP_KERNEL); @@ -797,7 +799,7 @@ static int snd_mixer_oss_get_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (uinfo == NULL || uctl == NULL) { err = -ENOMEM; - goto __unlock; + goto __free_only; } down_read(&card->controls_rwsem); kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0); @@ -826,6 +828,7 @@ static int snd_mixer_oss_get_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned err = 0; __unlock: up_read(&card->controls_rwsem); + __free_only: kfree(uctl); kfree(uinfo); return err; @@ -847,7 +850,7 @@ static int snd_mixer_oss_put_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (uinfo == NULL || uctl == NULL) { err = -ENOMEM; - goto __unlock; + goto __free_only; } down_read(&card->controls_rwsem); kctl = snd_mixer_oss_test_id(mixer, "Capture Source", 0); @@ -880,6 +883,7 @@ static int snd_mixer_oss_put_recsrc2(struct snd_mixer_oss_file *fmixer, unsigned err = 0; __unlock: up_read(&card->controls_rwsem); + __free_only: kfree(uctl); kfree(uinfo); return err; diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 5c8c7df..aed06c9 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1510,16 +1510,19 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file) { struct snd_pcm_substream *substream; + struct snd_pcm_runtime *runtime; + int i; - substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; - if (substream != NULL) { - snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); - substream->runtime->oss.prepare = 1; - } - substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE]; - if (substream != NULL) { + for (i = 0; i < 2; i++) { + substream = pcm_oss_file->streams[i]; + if (!substream) + continue; + runtime = substream->runtime; snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); - substream->runtime->oss.prepare = 1; + runtime->oss.prepare = 1; + runtime->oss.buffer_used = 0; + runtime->oss.prev_hw_ptr_period = 0; + runtime->oss.period_ptr = 0; } return 0; } diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 303ac04..1990918 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -981,6 +981,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) { if (substream->runtime->trigger_master != substream) return 0; + /* some drivers might use hw_ptr to recover from the pause - + update the hw_ptr now */ + if (push) + snd_pcm_update_hw_ptr(substream); /* The jiffies check in snd_pcm_update_hw_ptr*() is done by * a delta betwen the current jiffies, this gives a large enough * delta, effectively to skip the check once. diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index eb68326..75490c6 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -535,13 +535,15 @@ static int snd_rawmidi_release(struct inode *inode, struct file *file) { struct snd_rawmidi_file *rfile; struct snd_rawmidi *rmidi; + struct module *module; rfile = file->private_data; rmidi = rfile->rmidi; rawmidi_release_priv(rfile); kfree(rfile); + module = rmidi->card->module; snd_card_file_remove(rmidi->card, file); - module_put(rmidi->card->module); + module_put(module); return 0; } diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c index 6857122..69cd7b3 100644 --- a/sound/core/seq/oss/seq_oss_init.c +++ b/sound/core/seq/oss/seq_oss_init.c @@ -281,13 +281,10 @@ snd_seq_oss_open(struct file *file, int level) return 0; _error: - snd_seq_oss_writeq_delete(dp->writeq); - snd_seq_oss_readq_delete(dp->readq); snd_seq_oss_synth_cleanup(dp); snd_seq_oss_midi_cleanup(dp); - delete_port(dp); delete_seq_queue(dp->queue); - kfree(dp); + delete_port(dp); return rc; } @@ -350,8 +347,10 @@ create_port(struct seq_oss_devinfo *dp) static int delete_port(struct seq_oss_devinfo *dp) { - if (dp->port < 0) + if (dp->port < 0) { + kfree(dp); return 0; + } debug_printk(("delete_port %i\n", dp->port)); return snd_seq_event_port_detach(dp->cseq, dp->port); diff --git a/sound/oss/dev_table.h b/sound/oss/dev_table.h index b7617be..0199a31 100644 --- a/sound/oss/dev_table.h +++ b/sound/oss/dev_table.h @@ -271,7 +271,7 @@ struct synth_operations void (*reset) (int dev); void (*hw_control) (int dev, unsigned char *event); int (*load_patch) (int dev, int format, const char __user *addr, - int offs, int count, int pmgr_flag); + int count, int pmgr_flag); void (*aftertouch) (int dev, int voice, int pressure); void (*controller) (int dev, int voice, int ctrl_num, int value); void (*panning) (int dev, int voice, int value); diff --git a/sound/oss/midi_synth.c b/sound/oss/midi_synth.c index 3bc7104..2292c23 100644 --- a/sound/oss/midi_synth.c +++ b/sound/oss/midi_synth.c @@ -476,7 +476,7 @@ EXPORT_SYMBOL(midi_synth_hw_control); int midi_synth_load_patch(int dev, int format, const char __user *addr, - int offs, int count, int pmgr_flag) + int count, int pmgr_flag) { int orig_dev = synth_devs[dev]->midi_dev; @@ -491,39 +491,37 @@ midi_synth_load_patch(int dev, int format, const char __user *addr, if (!prefix_cmd(orig_dev, 0xf0)) return 0; + /* Invalid patch format */ if (format != SYSEX_PATCH) - { -/* printk("MIDI Error: Invalid patch format (key) 0x%x\n", format);*/ return -EINVAL; - } + + /* Patch header too short */ if (count < hdr_size) - { -/* printk("MIDI Error: Patch header too short\n");*/ return -EINVAL; - } + count -= hdr_size; /* - * Copy the header from user space but ignore the first bytes which have - * been transferred already. + * Copy the header from user space */ - if(copy_from_user(&((char *) &sysex)[offs], &(addr)[offs], hdr_size - offs)) + if (copy_from_user(&sysex, addr, hdr_size)) return -EFAULT; - - if (count < sysex.len) - { -/* printk(KERN_WARNING "MIDI Warning: Sysex record too short (%d<%d)\n", count, (int) sysex.len);*/ + + /* Sysex record too short */ + if ((unsigned)count < (unsigned)sysex.len) sysex.len = count; - } - left = sysex.len; - src_offs = 0; + + left = sysex.len; + src_offs = 0; for (i = 0; i < left && !signal_pending(current); i++) { unsigned char data; - get_user(*(unsigned char *) &data, (unsigned char __user *) &((addr)[hdr_size + i])); + if (get_user(data, + (unsigned char __user *)(addr + hdr_size + i))) + return -EFAULT; eox_seen = (i > 0 && data & 0x80); /* End of sysex */ diff --git a/sound/oss/midi_synth.h b/sound/oss/midi_synth.h index 6bc9d00..b64ddd6 100644 --- a/sound/oss/midi_synth.h +++ b/sound/oss/midi_synth.h @@ -8,7 +8,7 @@ int midi_synth_open (int dev, int mode); void midi_synth_close (int dev); void midi_synth_hw_control (int dev, unsigned char *event); int midi_synth_load_patch (int dev, int format, const char __user * addr, - int offs, int count, int pmgr_flag); + int count, int pmgr_flag); void midi_synth_panning (int dev, int channel, int pressure); void midi_synth_aftertouch (int dev, int channel, int pressure); void midi_synth_controller (int dev, int channel, int ctrl_num, int value); diff --git a/sound/oss/opl3.c b/sound/oss/opl3.c index 938c48c..407cd67 100644 --- a/sound/oss/opl3.c +++ b/sound/oss/opl3.c @@ -820,7 +820,7 @@ static void opl3_hw_control(int dev, unsigned char *event) } static int opl3_load_patch(int dev, int format, const char __user *addr, - int offs, int count, int pmgr_flag) + int count, int pmgr_flag) { struct sbi_instrument ins; @@ -830,11 +830,7 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, return -EINVAL; } - /* - * What the fuck is going on here? We leave junk in the beginning - * of ins and then check the field pretty close to that beginning? - */ - if(copy_from_user(&((char *) &ins)[offs], addr + offs, sizeof(ins) - offs)) + if (copy_from_user(&ins, addr, sizeof(ins))) return -EFAULT; if (ins.channel < 0 || ins.channel >= SBFM_MAXINSTR) @@ -849,6 +845,10 @@ static int opl3_load_patch(int dev, int format, const char __user *addr, static void opl3_panning(int dev, int voice, int value) { + + if (voice < 0 || voice >= devc->nr_voice) + return; + devc->voc[voice].panning = value; } @@ -1066,8 +1066,15 @@ static int opl3_alloc_voice(int dev, int chn, int note, struct voice_alloc_info static void opl3_setup_voice(int dev, int voice, int chn) { - struct channel_info *info = - &synth_devs[dev]->chn_info[chn]; + struct channel_info *info; + + if (voice < 0 || voice >= devc->nr_voice) + return; + + if (chn < 0 || chn > 15) + return; + + info = &synth_devs[dev]->chn_info[chn]; opl3_set_instr(dev, voice, info->pgm_num); diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c index e85789e..a0072a9 100644 --- a/sound/oss/sequencer.c +++ b/sound/oss/sequencer.c @@ -241,7 +241,7 @@ int sequencer_write(int dev, struct file *file, const char __user *buf, int coun return -ENXIO; fmt = (*(short *) &event_rec[0]) & 0xffff; - err = synth_devs[dev]->load_patch(dev, fmt, buf, p + 4, c, 0); + err = synth_devs[dev]->load_patch(dev, fmt, buf + p, c, 0); if (err < 0) return err; diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c index 2d9c513..aa26fd4 100644 --- a/sound/oss/soundcard.c +++ b/sound/oss/soundcard.c @@ -86,7 +86,7 @@ int *load_mixer_volumes(char *name, int *levels, int present) int i, n; for (i = 0; i < num_mixer_volumes; i++) { - if (strcmp(name, mixer_vols[i].name) == 0) { + if (strncmp(name, mixer_vols[i].name, 32) == 0) { if (present) mixer_vols[i].num = i; return mixer_vols[i].levels; @@ -98,7 +98,7 @@ int *load_mixer_volumes(char *name, int *levels, int present) } n = num_mixer_volumes++; - strcpy(mixer_vols[n].name, name); + strncpy(mixer_vols[n].name, name, 32); if (present) mixer_vols[n].num = n; @@ -389,11 +389,11 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case SND_DEV_DSP: case SND_DEV_DSP16: case SND_DEV_AUDIO: - return audio_ioctl(dev, file, cmd, p); + ret = audio_ioctl(dev, file, cmd, p); break; case SND_DEV_MIDIN: - return MIDIbuf_ioctl(dev, file, cmd, p); + ret = MIDIbuf_ioctl(dev, file, cmd, p); break; } diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c index b9d2f20..5439d66 100644 --- a/sound/pci/au88x0/au88x0_pcm.c +++ b/sound/pci/au88x0/au88x0_pcm.c @@ -42,11 +42,7 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = { .rate_min = 5000, .rate_max = 48000, .channels_min = 1, -#ifdef CHIP_AU8830 - .channels_max = 4, -#else .channels_max = 2, -#endif .buffer_bytes_max = 0x10000, .period_bytes_min = 0x1, .period_bytes_max = 0x1000, @@ -115,6 +111,17 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_wt = { .periods_max = 64, }; #endif +#ifdef CHIP_AU8830 +static unsigned int au8830_channels[3] = { + 1, 2, 4, +}; + +static struct snd_pcm_hw_constraint_list hw_constraints_au8830_channels = { + .count = ARRAY_SIZE(au8830_channels), + .list = au8830_channels, + .mask = 0, +}; +#endif /* open callback */ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream) { @@ -156,6 +163,15 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream) if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB || VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_I2S) runtime->hw = snd_vortex_playback_hw_adb; +#ifdef CHIP_AU8830 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && + VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) { + runtime->hw.channels_max = 4; + snd_pcm_hw_constraint_list(runtime, 0, + SNDRV_PCM_HW_PARAM_CHANNELS, + &hw_constraints_au8830_channels); + } +#endif substream->runtime->private_data = NULL; } #ifndef CHIP_AU8810 diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c index 1bff80c..b932154 100644 --- a/sound/pci/ctxfi/ctatc.c +++ b/sound/pci/ctxfi/ctatc.c @@ -869,7 +869,7 @@ spdif_passthru_playback_setup(struct ct_atc *atc, struct ct_atc_pcm *apcm) mutex_lock(&atc->atc_mutex); dao->ops->get_spos(dao, &status); if (((status >> 24) & IEC958_AES3_CON_FS) != iec958_con_fs) { - status &= ((~IEC958_AES3_CON_FS) << 24); + status &= ~(IEC958_AES3_CON_FS << 24); status |= (iec958_con_fs << 24); dao->ops->set_spos(dao, status); dao->ops->commit_write(dao); diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c index af56eb9..47d9ea9 100644 --- a/sound/pci/ctxfi/ctdaio.c +++ b/sound/pci/ctxfi/ctdaio.c @@ -176,6 +176,7 @@ static int dao_set_left_input(struct dao *dao, struct rsc *input) if (!entry) return -ENOMEM; + dao->ops->clear_left_input(dao); /* Program master and conjugate resources */ input->ops->master(input); daio->rscl.ops->master(&daio->rscl); @@ -204,6 +205,7 @@ static int dao_set_right_input(struct dao *dao, struct rsc *input) if (!entry) return -ENOMEM; + dao->ops->clear_right_input(dao); /* Program master and conjugate resources */ input->ops->master(input); daio->rscr.ops->master(&daio->rscr); diff --git a/sound/pci/ctxfi/ctmixer.c b/sound/pci/ctxfi/ctmixer.c index 15c1e72..c3519ff 100644 --- a/sound/pci/ctxfi/ctmixer.c +++ b/sound/pci/ctxfi/ctmixer.c @@ -566,19 +566,6 @@ static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol, return 0; } -static int ct_spdif_default_get(struct snd_kcontrol *kcontrol, - struct snd_ctl_elem_value *ucontrol) -{ - unsigned int status = SNDRV_PCM_DEFAULT_CON_SPDIF; - - ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; - ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; - ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; - ucontrol->value.iec958.status[3] = (status >> 24) & 0xff; - - return 0; -} - static int ct_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -586,6 +573,10 @@ static int ct_spdif_get(struct snd_kcontrol *kcontrol, unsigned int status; atc->spdif_out_get_status(atc, &status); + + if (status == 0) + status = SNDRV_PCM_DEFAULT_CON_SPDIF; + ucontrol->value.iec958.status[0] = (status >> 0) & 0xff; ucontrol->value.iec958.status[1] = (status >> 8) & 0xff; ucontrol->value.iec958.status[2] = (status >> 16) & 0xff; @@ -629,7 +620,7 @@ static struct snd_kcontrol_new iec958_default_ctl = { .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT), .count = 1, .info = ct_spdif_info, - .get = ct_spdif_default_get, + .get = ct_spdif_get, .put = ct_spdif_put, .private_value = MIXER_IEC958_DEFAULT }; diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index 4203782..aff8387 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c @@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64}; static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128}; static int enable_ir[SNDRV_CARDS]; static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */ +static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard."); @@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444); MODULE_PARM_DESC(enable_ir, "Enable IR."); module_param_array(subsystem, uint, NULL, 0444); MODULE_PARM_DESC(subsystem, "Force card subsystem model."); +module_param_array(delay_pcm_irq, uint, NULL, 0444); +MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0)."); /* * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 */ @@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci, &emu)) < 0) goto error; card->private_data = emu; + emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f; if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) goto error; if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index 55b83ef..622bace 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c @@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, evoice->epcm->ccca_start_addr = start_addr + ccis; if (extra) { start_addr += ccis; - end_addr += ccis; + end_addr += ccis + emu->delay_pcm_irq; } if (stereo && !extra) { snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK); @@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, /* Assumption that PT is already 0 so no harm overwriting */ snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]); snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24)); - snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24)); + snd_emu10k1_ptr_write(emu, PSST, voice, + (start_addr + (extra ? emu->delay_pcm_irq : 0)) | + (send_amount[2] << 24)); if (emu->card_capabilities->emu_model) pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */ else @@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_ snd_emu10k1_ptr_write(emu, IP, voice, 0); } +static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu, + struct snd_emu10k1_pcm *epcm, + struct snd_pcm_substream *substream, + struct snd_pcm_runtime *runtime) +{ + unsigned int ptr, period_pos; + + /* try to sychronize the current position for the interrupt + source voice */ + period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt; + period_pos %= runtime->period_size; + ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number); + ptr &= ~0x00ffffff; + ptr |= epcm->ccca_start_addr + period_pos; + snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr); +} + static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, int cmd) { @@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, /* follow thru */ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: case SNDRV_PCM_TRIGGER_RESUME: + if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE) + snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime); mix = &emu->pcm_mixer[substream->number]; snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix); snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix); @@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream * #endif /* printk(KERN_DEBUG - "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n", - ptr, runtime->buffer_size, runtime->period_size); + "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n", + (long)ptr, (long)runtime->buffer_size, + (long)runtime->period_size); */ return ptr; } diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index ffb1ddb..957a311 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c @@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst if (snd_BUG_ON(!hdr)) return NULL; + idx = runtime->period_size >= runtime->buffer_size ? + (emu->delay_pcm_irq * 2) : 0; mutex_lock(&hdr->block_mutex); - blk = search_empty(emu, runtime->dma_bytes); + blk = search_empty(emu, runtime->dma_bytes + idx); if (blk == NULL) { mutex_unlock(&hdr->block_mutex); return NULL; diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c index c7fba53..d6a40e2 100644 --- a/sound/pci/ens1370.c +++ b/sound/pci/ens1370.c @@ -229,6 +229,7 @@ MODULE_PARM_DESC(lineio, "Line In to Rear Out (0 = auto, 1 = force)."); #define ES_REG_1371_CODEC 0x14 /* W/R: Codec Read/Write register address */ #define ES_1371_CODEC_RDY (1<<31) /* codec ready */ #define ES_1371_CODEC_WIP (1<<30) /* codec register access in progress */ +#define EV_1938_CODEC_MAGIC (1<<26) #define ES_1371_CODEC_PIRD (1<<23) /* codec read/write select register */ #define ES_1371_CODEC_WRITE(a,d) ((((a)&0x7f)<<16)|(((d)&0xffff)<<0)) #define ES_1371_CODEC_READS(a) ((((a)&0x7f)<<16)|ES_1371_CODEC_PIRD) @@ -603,12 +604,18 @@ static void snd_es1370_codec_write(struct snd_ak4531 *ak4531, #ifdef CHIP1371 +static inline bool is_ev1938(struct ensoniq *ensoniq) +{ + return ensoniq->pci->device == 0x8938; +} + static void snd_es1371_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct ensoniq *ensoniq = ac97->private_data; - unsigned int t, x; + unsigned int t, x, flag; + flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; mutex_lock(&ensoniq->src_mutex); for (t = 0; t < POLL_COUNT; t++) { if (!(inl(ES_REG(ensoniq, 1371_CODEC)) & ES_1371_CODEC_WIP)) { @@ -630,7 +637,8 @@ static void snd_es1371_codec_write(struct snd_ac97 *ac97, 0x00010000) break; } - outl(ES_1371_CODEC_WRITE(reg, val), ES_REG(ensoniq, 1371_CODEC)); + outl(ES_1371_CODEC_WRITE(reg, val) | flag, + ES_REG(ensoniq, 1371_CODEC)); /* restore SRC reg */ snd_es1371_wait_src_ready(ensoniq); outl(x, ES_REG(ensoniq, 1371_SMPRATE)); @@ -647,8 +655,9 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct ensoniq *ensoniq = ac97->private_data; - unsigned int t, x, fail = 0; + unsigned int t, x, flag, fail = 0; + flag = is_ev1938(ensoniq) ? EV_1938_CODEC_MAGIC : 0; __again: mutex_lock(&ensoniq->src_mutex); for (t = 0; t < POLL_COUNT; t++) { @@ -671,7 +680,8 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, 0x00010000) break; } - outl(ES_1371_CODEC_READS(reg), ES_REG(ensoniq, 1371_CODEC)); + outl(ES_1371_CODEC_READS(reg) | flag, + ES_REG(ensoniq, 1371_CODEC)); /* restore SRC reg */ snd_es1371_wait_src_ready(ensoniq); outl(x, ES_REG(ensoniq, 1371_SMPRATE)); @@ -683,6 +693,11 @@ static unsigned short snd_es1371_codec_read(struct snd_ac97 *ac97, /* now wait for the stinkin' data (RDY) */ for (t = 0; t < POLL_COUNT; t++) { if ((x = inl(ES_REG(ensoniq, 1371_CODEC))) & ES_1371_CODEC_RDY) { + if (is_ev1938(ensoniq)) { + for (t = 0; t < 100; t++) + inl(ES_REG(ensoniq, CONTROL)); + x = inl(ES_REG(ensoniq, 1371_CODEC)); + } mutex_unlock(&ensoniq->src_mutex); return ES_1371_CODEC_READ(x); } diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index ba2098d..00515c7 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -4360,7 +4360,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, cfg->hp_outs--; memmove(cfg->hp_pins + i, cfg->hp_pins + i + 1, sizeof(cfg->hp_pins[0]) * (cfg->hp_outs - i)); - memmove(sequences_hp + i - 1, sequences_hp + i, + memmove(sequences_hp + i, sequences_hp + i + 1, sizeof(sequences_hp[0]) * (cfg->hp_outs - i)); } } diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index d8da18a..f585200 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -381,7 +381,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a) snd_print_pcm_rates(a->rates, buf, sizeof(buf)); if (a->format == AUDIO_CODING_TYPE_LPCM) - snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8)); + snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8); else if (a->max_bitrate) snprintf(buf2, sizeof(buf2), ", max bitrate = %d", a->max_bitrate); diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 1df25cf..824e220 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -457,6 +457,7 @@ enum { AZX_DRIVER_ULI, AZX_DRIVER_NVIDIA, AZX_DRIVER_TERA, + AZX_DRIVER_CTX, AZX_DRIVER_GENERIC, AZX_NUM_DRIVERS, /* keep this as last entry */ }; @@ -472,6 +473,7 @@ static char *driver_short_names[] __devinitdata = { [AZX_DRIVER_ULI] = "HDA ULI M5461", [AZX_DRIVER_NVIDIA] = "HDA NVidia", [AZX_DRIVER_TERA] = "HDA Teradici", + [AZX_DRIVER_CTX] = "HDA Creative", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; @@ -562,7 +564,10 @@ static void azx_init_cmd_io(struct azx *chip) /* reset the rirb hw write pointer */ azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST); /* set N=1, get RIRB response interrupt for new entry */ - azx_writew(chip, RINTCNT, 1); + if (chip->driver_type == AZX_DRIVER_CTX) + azx_writew(chip, RINTCNT, 0xc0); + else + azx_writew(chip, RINTCNT, 1); /* enable rirb dma and response irq */ azx_writeb(chip, RIRBCTL, ICH6_RBCTL_DMA_EN | ICH6_RBCTL_IRQ_EN); spin_unlock_irq(&chip->reg_lock); @@ -1135,8 +1140,11 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id) /* clear rirb int */ status = azx_readb(chip, RIRBSTS); if (status & RIRB_INT_MASK) { - if (status & RIRB_INT_RESPONSE) + if (status & RIRB_INT_RESPONSE) { + if (chip->driver_type == AZX_DRIVER_CTX) + udelay(80); azx_update_rirb(chip); + } azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); } @@ -2283,13 +2291,16 @@ static int azx_dev_free(struct snd_device *device) */ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1025, 0x026f, "Acer Aspire 5538", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1028, 0x0470, "Dell Inspiron 1120", POS_FIX_LPIB), SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), @@ -2790,10 +2801,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID), .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, .class_mask = 0xffffff, - .driver_data = AZX_DRIVER_GENERIC }, + .driver_data = AZX_DRIVER_CTX }, #else /* this entry seems still valid -- i.e. without emu20kx chip */ - { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX }, #endif /* AMD/ATI Generic, PCI class code and Vendor ID for HD Audio */ { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index afbe314..6258a05 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -3641,6 +3641,7 @@ static struct snd_pci_quirk ad1984_cfg_tbl[] = { /* Lenovo Thinkpad T61/X61 */ SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD), SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP), + SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP), {} }; diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c index af47801..9544463 100644 --- a/sound/pci/hda/patch_ca0110.c +++ b/sound/pci/hda/patch_ca0110.c @@ -489,7 +489,7 @@ static void parse_digital(struct hda_codec *codec) if (cfg->dig_outs && snd_hda_get_connections(codec, cfg->dig_out_pins[0], &spec->dig_out, 1) == 1) - spec->multiout.dig_out_nid = cfg->dig_out_pins[0]; + spec->multiout.dig_out_nid = spec->dig_out; } static int ca0110_parse_auto_config(struct hda_codec *codec) diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 350ee8a..059565f 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -972,6 +972,53 @@ static struct hda_verb cs_coef_init_verbs[] = { {} /* terminator */ }; +/* Errata: CS4207 rev C0/C1/C2 Silicon + * + * http://www.cirrus.com/en/pubs/errata/ER880C3.pdf + * + * 6. At high temperature (TA > +85°C), the digital supply current (IVD) + * may be excessive (up to an additional 200 μA), which is most easily + * observed while the part is being held in reset (RESET# active low). + * + * Root Cause: At initial powerup of the device, the logic that drives + * the clock and write enable to the S/PDIF SRC RAMs is not properly + * initialized. + * Certain random patterns will cause a steady leakage current in those + * RAM cells. The issue will resolve once the SRCs are used (turned on). + * + * Workaround: The following verb sequence briefly turns on the S/PDIF SRC + * blocks, which will alleviate the issue. + */ + +static struct hda_verb cs_errata_init_verbs[] = { + {0x01, AC_VERB_SET_POWER_STATE, 0x00}, /* AFG: D0 */ + {0x11, AC_VERB_SET_PROC_STATE, 0x01}, /* VPW: processing on */ + + {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, + {0x11, AC_VERB_SET_PROC_COEF, 0x9999}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, + {0x11, AC_VERB_SET_PROC_COEF, 0xa412}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0009}, + + {0x07, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Rx: D0 */ + {0x08, AC_VERB_SET_POWER_STATE, 0x00}, /* S/PDIF Tx: D0 */ + + {0x11, AC_VERB_SET_COEF_INDEX, 0x0017}, + {0x11, AC_VERB_SET_PROC_COEF, 0x2412}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0008}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0000}, + {0x11, AC_VERB_SET_COEF_INDEX, 0x0001}, + {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, + {0x11, AC_VERB_SET_PROC_STATE, 0x00}, + + {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ + {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ + /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ + + {} /* terminator */ +}; + /* SPDIF setup */ static void init_digital(struct hda_codec *codec) { @@ -991,6 +1038,9 @@ static int cs_init(struct hda_codec *codec) { struct cs_spec *spec = codec->spec; + /* init_verb sequence for C0/C1/C2 errata*/ + snd_hda_sequence_write(codec, cs_errata_init_verbs); + snd_hda_sequence_write(codec, cs_coef_init_verbs); if (spec->gpio_mask) { diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 2bf2cb5..ffba0a1 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -116,6 +116,7 @@ struct conexant_spec { unsigned int dell_vostro:1; unsigned int ideapad:1; unsigned int thinkpad:1; + unsigned int hp_laptop:1; unsigned int ext_mic_present; unsigned int recording; @@ -390,10 +391,16 @@ static int conexant_add_jack(struct hda_codec *codec, struct conexant_spec *spec; struct conexant_jack *jack; const char *name; - int err; + int i, err; spec = codec->spec; snd_array_init(&spec->jacks, sizeof(*jack), 32); + + jack = spec->jacks.list; + for (i = 0; i < spec->jacks.used; i++, jack++) + if (jack->nid == nid) + return 0 ; /* already present */ + jack = snd_array_new(&spec->jacks); name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ; @@ -1632,6 +1639,11 @@ static void cxt5051_update_speaker(struct hda_codec *codec) pinctl = (!spec->hp_present && spec->cur_eapd) ? PIN_OUT : 0; snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl); + /* on ideapad there is an aditional speaker (subwoofer) to mute */ + if (spec->ideapad) + snd_hda_codec_write(codec, 0x1b, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, + pinctl); } /* turn on/off EAPD (+ mute HP) as a master switch */ @@ -1888,6 +1900,13 @@ static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid, #endif } +static struct hda_verb cxt5051_ideapad_init_verbs[] = { + /* Subwoofer */ + {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, + {0x1b, AC_VERB_SET_CONNECT_SEL, 0x00}, + { } /* end */ +}; + /* initialize jack-sensing, too */ static int cxt5051_init(struct hda_codec *codec) { @@ -1917,6 +1936,7 @@ enum { CXT5051_LENOVO_X200, /* Lenovo X200 laptop, also used for Advanced Mini Dock 250410 */ CXT5051_F700, /* HP Compaq Presario F700 */ CXT5051_TOSHIBA, /* Toshiba M300 & co */ + CXT5051_IDEAPAD, /* Lenovo IdeaPad Y430 */ CXT5051_MODELS }; @@ -1927,6 +1947,7 @@ static const char *cxt5051_models[CXT5051_MODELS] = { [CXT5051_LENOVO_X200] = "lenovo-x200", [CXT5051_F700] = "hp-700", [CXT5051_TOSHIBA] = "toshiba", + [CXT5051_IDEAPAD] = "ideapad", }; static struct snd_pci_quirk cxt5051_cfg_tbl[] = { @@ -1938,6 +1959,7 @@ static struct snd_pci_quirk cxt5051_cfg_tbl[] = { CXT5051_LAPTOP), SND_PCI_QUIRK(0x14f1, 0x5051, "HP Spartan 1.1", CXT5051_HP), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT5051_LENOVO_X200), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo IdeaPad", CXT5051_IDEAPAD), {} }; @@ -1999,6 +2021,11 @@ static int patch_cxt5051(struct hda_codec *codec) spec->mixers[0] = cxt5051_toshiba_mixers; spec->auto_mic = AUTO_MIC_PORTB; break; + case CXT5051_IDEAPAD: + spec->init_verbs[spec->num_init_verbs++] = + cxt5051_ideapad_init_verbs; + spec->ideapad = 1; + break; } return 0; @@ -2219,6 +2246,18 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec) } } +/* toggle input of built-in digital mic and mic jack appropriately */ +static void cxt5066_hp_laptop_automic(struct hda_codec *codec) +{ + unsigned int present; + + present = snd_hda_jack_detect(codec, 0x1b); + snd_printdd("CXT5066: external microphone present=%d\n", present); + snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL, + present ? 1 : 3); +} + + /* toggle input of built-in digital mic and mic jack appropriately order is: external mic -> dock mic -> interal mic */ static void cxt5066_thinkpad_automic(struct hda_codec *codec) @@ -2328,6 +2367,20 @@ static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res) } /* unsolicited event for jack sensing */ +static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res) +{ + snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26); + switch (res >> 26) { + case CONEXANT_HP_EVENT: + cxt5066_hp_automute(codec); + break; + case CONEXANT_MIC_EVENT: + cxt5066_hp_laptop_automic(codec); + break; + } +} + +/* unsolicited event for jack sensing */ static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res) { snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26); @@ -2910,6 +2963,14 @@ static struct hda_verb cxt5066_init_verbs_portd_lo[] = { { } /* end */ }; + +static struct hda_verb cxt5066_init_verbs_hp_laptop[] = { + {0x14, AC_VERB_SET_CONNECT_SEL, 0x0}, + {0x19, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_HP_EVENT}, + {0x1b, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | CONEXANT_MIC_EVENT}, + { } /* end */ +}; + /* initialize jack-sensing, too */ static int cxt5066_init(struct hda_codec *codec) { @@ -2925,6 +2986,8 @@ static int cxt5066_init(struct hda_codec *codec) cxt5066_ideapad_automic(codec); else if (spec->thinkpad) cxt5066_thinkpad_automic(codec); + else if (spec->hp_laptop) + cxt5066_hp_laptop_automic(codec); } cxt5066_set_mic_boost(codec); return 0; @@ -2952,6 +3015,7 @@ enum { CXT5066_DELL_VOSTO, /* Dell Vostro 1015i */ CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */ CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */ + CXT5066_HP_LAPTOP, /* HP Laptop */ CXT5066_MODELS }; @@ -2962,21 +3026,26 @@ static const char *cxt5066_models[CXT5066_MODELS] = { [CXT5066_DELL_VOSTO] = "dell-vostro", [CXT5066_IDEAPAD] = "ideapad", [CXT5066_THINKPAD] = "thinkpad", + [CXT5066_HP_LAPTOP] = "hp-laptop", }; static struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x14f1, 0x0101, "Conexant Reference board", CXT5066_LAPTOP), - SND_PCI_QUIRK(0x1028, 0x02f5, "Dell", - CXT5066_DELL_LAPTOP), + SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), + SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), + SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP), SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), + SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), {} @@ -3031,6 +3100,23 @@ static int patch_cxt5066(struct hda_codec *codec) spec->num_init_verbs++; spec->dell_automute = 1; break; + case CXT5066_HP_LAPTOP: + codec->patch_ops.init = cxt5066_init; + codec->patch_ops.unsol_event = cxt5066_hp_laptop_event; + spec->init_verbs[spec->num_init_verbs] = + cxt5066_init_verbs_hp_laptop; + spec->num_init_verbs++; + spec->hp_laptop = 1; + spec->mixers[spec->num_mixers++] = cxt5066_mixer_master; + spec->mixers[spec->num_mixers++] = cxt5066_mixers; + /* no S/PDIF out */ + spec->multiout.dig_out_nid = 0; + /* input source automatically selected */ + spec->input_mux = NULL; + spec->port_d_mode = 0; + spec->mic_boost = 3; /* default 30dB gain */ + break; + case CXT5066_OLPC_XO_1_5: codec->patch_ops.init = cxt5066_olpc_init; codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event; diff --git a/sound/pci/hda/patch_nvhdmi.c b/sound/pci/hda/patch_nvhdmi.c index b0652ac..88ae0f2 100644 --- a/sound/pci/hda/patch_nvhdmi.c +++ b/sound/pci/hda/patch_nvhdmi.c @@ -541,26 +541,32 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) * patch entries */ static struct hda_codec_preset snd_hda_preset_nvhdmi[] = { - { .id = 0x10de0002, .name = "MCP77/78 HDMI", - .patch = patch_nvhdmi_8ch_7x }, - { .id = 0x10de0003, .name = "MCP77/78 HDMI", - .patch = patch_nvhdmi_8ch_7x }, - { .id = 0x10de0005, .name = "MCP77/78 HDMI", - .patch = patch_nvhdmi_8ch_7x }, - { .id = 0x10de0006, .name = "MCP77/78 HDMI", - .patch = patch_nvhdmi_8ch_7x }, - { .id = 0x10de0007, .name = "MCP79/7A HDMI", - .patch = patch_nvhdmi_8ch_7x }, - { .id = 0x10de000a, .name = "GT220 HDMI", - .patch = patch_nvhdmi_8ch_89 }, - { .id = 0x10de000b, .name = "GT21x HDMI", - .patch = patch_nvhdmi_8ch_89 }, - { .id = 0x10de000c, .name = "MCP89 HDMI", - .patch = patch_nvhdmi_8ch_89 }, - { .id = 0x10de000d, .name = "GT240 HDMI", - .patch = patch_nvhdmi_8ch_89 }, - { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, - { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, + { .id = 0x10de0002, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, + { .id = 0x10de0003, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, + { .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, + { .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, + { .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x }, + { .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, + { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, + { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, {} /* terminator */ }; @@ -573,6 +579,21 @@ MODULE_ALIAS("snd-hda-codec-id:10de000a"); MODULE_ALIAS("snd-hda-codec-id:10de000b"); MODULE_ALIAS("snd-hda-codec-id:10de000c"); MODULE_ALIAS("snd-hda-codec-id:10de000d"); +MODULE_ALIAS("snd-hda-codec-id:10de0010"); +MODULE_ALIAS("snd-hda-codec-id:10de0011"); +MODULE_ALIAS("snd-hda-codec-id:10de0012"); +MODULE_ALIAS("snd-hda-codec-id:10de0013"); +MODULE_ALIAS("snd-hda-codec-id:10de0014"); +MODULE_ALIAS("snd-hda-codec-id:10de0018"); +MODULE_ALIAS("snd-hda-codec-id:10de0019"); +MODULE_ALIAS("snd-hda-codec-id:10de001a"); +MODULE_ALIAS("snd-hda-codec-id:10de001b"); +MODULE_ALIAS("snd-hda-codec-id:10de001c"); +MODULE_ALIAS("snd-hda-codec-id:10de0040"); +MODULE_ALIAS("snd-hda-codec-id:10de0041"); +MODULE_ALIAS("snd-hda-codec-id:10de0042"); +MODULE_ALIAS("snd-hda-codec-id:10de0043"); +MODULE_ALIAS("snd-hda-codec-id:10de0044"); MODULE_ALIAS("snd-hda-codec-id:10de0067"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 596ea2f..2f06425 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1161,7 +1161,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) case 0x10ec0883: case 0x10ec0885: case 0x10ec0887: - case 0x10ec0889: + /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ alc889_coef_init(codec); break; case 0x10ec0888: @@ -1391,6 +1391,7 @@ do_sku: spec->init_amp = ALC_INIT_GPIO3; break; case 5: + default: spec->init_amp = ALC_INIT_DEFAULT; break; } @@ -4239,6 +4240,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = { SND_PCI_QUIRK(0x1734, 0x10b0, "Fujitsu", ALC880_FUJITSU), SND_PCI_QUIRK(0x1854, 0x0018, "LG LW20", ALC880_LG_LW), SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_LG), + SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_LG), SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_LG), SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_LG_LW), SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_TCL_S700), @@ -5183,6 +5185,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids, static struct snd_pci_quirk beep_white_list[] = { SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1), + SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1), {} }; @@ -6863,6 +6866,7 @@ static int patch_alc260(struct hda_codec *codec) spec->stream_analog_playback = &alc260_pcm_analog_playback; spec->stream_analog_capture = &alc260_pcm_analog_capture; + spec->stream_analog_alt_capture = &alc260_pcm_analog_capture; spec->stream_digital_playback = &alc260_pcm_digital_playback; spec->stream_digital_capture = &alc260_pcm_digital_capture; @@ -7003,7 +7007,7 @@ static struct hda_input_mux alc883_lenovo_nb0763_capture_source = { .num_items = 4, .items = { { "Mic", 0x0 }, - { "iMic", 0x1 }, + { "Int Mic", 0x1 }, { "Line", 0x2 }, { "CD", 0x4 }, }, @@ -8573,8 +8577,8 @@ static struct snd_kcontrol_new alc883_lenovo_nb0763_mixer[] = { HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), - HDA_CODEC_VOLUME("iMic Playback Volume", 0x0b, 0x1, HDA_INPUT), - HDA_CODEC_MUTE("iMic Playback Switch", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT), + HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT), { } /* end */ }; @@ -9470,7 +9474,6 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763), SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763), SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY), - SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2), SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG), SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), @@ -13024,6 +13027,8 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid, dac = 0x02; break; case 0x15: + case 0x1a: /* ALC259/269 only */ + case 0x1b: /* ALC259/269 only */ case 0x21: /* ALC269vb has this pin, too */ dac = 0x03; break; @@ -13303,7 +13308,6 @@ static struct snd_pci_quirk alc268_cfg_tbl[] = { SND_PCI_QUIRK(0x14c0, 0x0025, "COMPAL IFL90/JFL-92", ALC268_TOSHIBA), SND_PCI_QUIRK(0x152d, 0x0763, "Diverse (CPR2000)", ALC268_ACER), SND_PCI_QUIRK(0x152d, 0x0771, "Quanta IL1", ALC267_QUANTA_IL1), - SND_PCI_QUIRK(0x1854, 0x1775, "LG R510", ALC268_DELL), {} }; @@ -14227,6 +14231,7 @@ static void alc269_auto_init(struct hda_codec *codec) enum { ALC269_FIXUP_SONY_VAIO, + ALC269_FIXUP_DELL_M101Z, }; static const struct hda_verb alc269_sony_vaio_fixup_verbs[] = { @@ -14238,10 +14243,20 @@ static const struct alc_fixup alc269_fixups[] = { [ALC269_FIXUP_SONY_VAIO] = { .verbs = alc269_sony_vaio_fixup_verbs }, + [ALC269_FIXUP_DELL_M101Z] = { + .verbs = (const struct hda_verb[]) { + /* Enables internal speaker */ + {0x20, AC_VERB_SET_COEF_INDEX, 13}, + {0x20, AC_VERB_SET_PROC_COEF, 0x4040}, + {} + } + }, }; static struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9071, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), + SND_PCI_QUIRK(0x104d, 0x9077, "Sony VAIO", ALC269_FIXUP_SONY_VAIO), + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), {} }; @@ -16288,7 +16303,7 @@ static struct alc_config_preset alc861vd_presets[] = { static int alc861vd_auto_create_input_ctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { - return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0); + return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x22, 0); } @@ -18343,6 +18358,8 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid) return 0x02; else if (nid >= 0x0c && nid <= 0x0e) return nid - 0x0c + 0x02; + else if (nid == 0x26) /* ALC887-VD has this DAC too */ + return 0x25; else return 0; } @@ -18351,7 +18368,7 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid) static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac) { - hda_nid_t mix[4]; + hda_nid_t mix[5]; int i, num; num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix)); @@ -18666,6 +18683,36 @@ static void alc662_auto_init(struct hda_codec *codec) alc_inithook(codec); } +enum { + ALC662_FIXUP_ASPIRE, + ALC662_FIXUP_IDEAPAD, +}; + +static const struct alc_fixup alc662_fixups[] = { + [ALC662_FIXUP_ASPIRE] = { + .pins = (const struct alc_pincfg[]) { + { 0x15, 0x99130112 }, /* subwoofer */ + { } + } + }, + [ALC662_FIXUP_IDEAPAD] = { + .pins = (const struct alc_pincfg[]) { + { 0x17, 0x99130112 }, /* subwoofer */ + { } + } + }, +}; + +static struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), + SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), + {} +}; + + + static int patch_alc662(struct hda_codec *codec) { struct alc_spec *spec; @@ -18698,6 +18745,7 @@ static int patch_alc662(struct hda_codec *codec) } if (board_config == ALC662_AUTO) { + alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 1); /* automatic parse from the BIOS config */ err = alc662_parse_auto_config(codec); if (err < 0) { @@ -18756,8 +18804,11 @@ static int patch_alc662(struct hda_codec *codec) spec->vmaster_nid = 0x02; codec->patch_ops = alc_patch_ops; - if (board_config == ALC662_AUTO) + if (board_config == ALC662_AUTO) { spec->init_hook = alc662_auto_init; + alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0); + } + #ifdef CONFIG_SND_HDA_POWER_SAVE if (!spec->loopback.amplist) spec->loopback.amplist = alc662_loopbacks; @@ -18770,7 +18821,10 @@ static int patch_alc888(struct hda_codec *codec) { if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){ kfree(codec->chip_name); - codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); + if (codec->vendor_id == 0x10ec0887) + codec->chip_name = kstrdup("ALC887-VD", GFP_KERNEL); + else + codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); if (!codec->chip_name) { alc_free(codec); return -ENOMEM; @@ -18812,7 +18866,7 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = { { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", .patch = patch_alc882 }, { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, - { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 }, + { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc888 }, { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", .patch = patch_alc882 }, { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 }, diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index f1e7bab..3532d2c 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -93,6 +93,7 @@ enum { STAC_92HD83XXX_REF, STAC_92HD83XXX_PWR_REF, STAC_DELL_S14, + STAC_DELL_E6410, STAC_92HD83XXX_HP, STAC_92HD83XXX_MODELS }; @@ -202,6 +203,7 @@ struct sigmatel_spec { unsigned int spdif_mute: 1; unsigned int check_volume_offset:1; unsigned int auto_mic:1; + unsigned int linear_tone_beep:1; /* gpio lines */ unsigned int eapd_mask; @@ -735,7 +737,7 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e struct sigmatel_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); const struct hda_input_mux *imux = spec->input_mux; - unsigned int idx, prev_idx; + unsigned int idx, prev_idx, didx; idx = ucontrol->value.enumerated.item[0]; if (idx >= imux->num_items) @@ -747,7 +749,8 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e snd_hda_codec_write_cache(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, imux->items[idx].index); - if (prev_idx >= spec->num_analog_muxes) { + if (prev_idx >= spec->num_analog_muxes && + spec->mux_nids[adc_idx] != spec->dmux_nids[adc_idx]) { imux = spec->dinput_mux; /* 0 = analog */ snd_hda_codec_write_cache(codec, @@ -757,9 +760,13 @@ static int stac92xx_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_e } } else { imux = spec->dinput_mux; + /* first dimux item is hardcoded to select analog imux, + * so lets skip it + */ + didx = idx - spec->num_analog_muxes + 1; snd_hda_codec_write_cache(codec, spec->dmux_nids[adc_idx], 0, AC_VERB_SET_CONNECT_SEL, - imux->items[idx - 1].index); + imux->items[didx].index); } spec->cur_mux[adc_idx] = idx; return 1; @@ -1191,6 +1198,13 @@ static int stac92xx_build_controls(struct hda_codec *codec) return 0; } +/* Deliberately turn off 0x0f (Dock Mic) to make it choose Int Mic instead */ +static unsigned int dell_e6410_pin_configs[10] = { + 0x04a11020, 0x0421101f, 0x400000f0, 0x90170110, + 0x23011050, 0x40f000f0, 0x400000f0, 0x90a60130, + 0x40f000f0, 0x40f000f0, +}; + static unsigned int ref9200_pin_configs[8] = { 0x01c47010, 0x01447010, 0x0221401f, 0x01114010, 0x02a19020, 0x01a19021, 0x90100140, 0x01813122, @@ -1616,6 +1630,8 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = { static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = { SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1, "Alienware M17x", STAC_ALIENWARE_M17X), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a, + "Alienware M17x", STAC_ALIENWARE_M17X), {} /* terminator */ }; @@ -1635,6 +1651,7 @@ static unsigned int *stac92hd83xxx_brd_tbl[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = ref92hd83xxx_pin_configs, [STAC_92HD83XXX_PWR_REF] = ref92hd83xxx_pin_configs, [STAC_DELL_S14] = dell_s14_pin_configs, + [STAC_DELL_E6410] = dell_e6410_pin_configs, }; static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { @@ -1642,6 +1659,7 @@ static const char *stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = { [STAC_92HD83XXX_REF] = "ref", [STAC_92HD83XXX_PWR_REF] = "mic-ref", [STAC_DELL_S14] = "dell-s14", + [STAC_DELL_E6410] = "dell-e6410", [STAC_92HD83XXX_HP] = "hp", }; @@ -1653,6 +1671,10 @@ static struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = { "DFI LanParty", STAC_92HD83XXX_REF), SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02ba, "unknown Dell", STAC_DELL_S14), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x040a, + "Dell E6410", STAC_DELL_E6410), + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x040b, + "Dell E6510", STAC_DELL_E6410), SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x3600, "HP", STAC_92HD83XXX_HP), {} /* terminator */ @@ -3802,7 +3824,7 @@ static int stac92xx_parse_auto_config(struct hda_codec *codec, hda_nid_t dig_out return err; if (codec->beep) { /* IDT/STAC codecs have linear beep tone parameter */ - codec->beep->linear_tone = 1; + codec->beep->linear_tone = spec->linear_tone_beep; /* if no beep switch is available, make its own one */ caps = query_amp_caps(codec, nid, HDA_OUTPUT); if (!(caps & AC_AMPCAP_MUTE)) { @@ -5005,6 +5027,7 @@ static int patch_stac9200(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9200_pin_nids); spec->pin_nids = stac9200_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9200_MODELS, @@ -5068,6 +5091,7 @@ static int patch_stac925x(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac925x_pin_nids); spec->pin_nids = stac925x_pin_nids; @@ -5153,6 +5177,7 @@ static int patch_stac92hd73xx(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 0; codec->slave_dig_outs = stac92hd73xx_slave_dig_outs; spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids); spec->pin_nids = stac92hd73xx_pin_nids; @@ -5300,6 +5325,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs; spec->digbeep_nid = 0x21; spec->mux_nids = stac92hd83xxx_mux_nids; @@ -5522,6 +5548,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 0; codec->patch_ops = stac92xx_patch_ops; spec->num_pins = STAC92HD71BXX_NUM_PINS; switch (codec->vendor_id) { @@ -5779,6 +5806,7 @@ static int patch_stac922x(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac922x_pin_nids); spec->pin_nids = stac922x_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_922X_MODELS, @@ -5883,6 +5911,7 @@ static int patch_stac927x(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; codec->slave_dig_outs = stac927x_slave_dig_outs; spec->num_pins = ARRAY_SIZE(stac927x_pin_nids); spec->pin_nids = stac927x_pin_nids; @@ -6018,6 +6047,7 @@ static int patch_stac9205(struct hda_codec *codec) codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9205_pin_nids); spec->pin_nids = stac9205_pin_nids; spec->board_config = snd_hda_check_board_config(codec, STAC_9205_MODELS, @@ -6174,6 +6204,7 @@ static int patch_stac9872(struct hda_codec *codec) return -ENOMEM; codec->no_trigger_sense = 1; codec->spec = spec; + spec->linear_tone_beep = 1; spec->num_pins = ARRAY_SIZE(stac9872_pin_nids); spec->pin_nids = stac9872_pin_nids; diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 7345381..92387d6 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -1091,6 +1091,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct via_spec *spec = codec->spec; unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id); + int ret; if (!spec->mux_nids[adc_idx]) return -EINVAL; @@ -1099,12 +1100,14 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol, AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0) snd_hda_codec_write(codec, spec->mux_nids[adc_idx], 0, AC_VERB_SET_POWER_STATE, AC_PWRST_D0); - /* update jack power state */ - set_jack_power_state(codec); - return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, + ret = snd_hda_input_mux_put(codec, spec->input_mux, ucontrol, spec->mux_nids[adc_idx], &spec->cur_mux[adc_idx]); + /* update jack power state */ + set_jack_power_state(codec); + + return ret; } static int via_independent_hp_info(struct snd_kcontrol *kcontrol, @@ -1300,6 +1303,11 @@ static void mute_aa_path(struct hda_codec *codec, int mute) start_idx = 2; end_idx = 4; break; + case VT1718S: + nid_mixer = 0x21; + start_idx = 1; + end_idx = 3; + break; default: return; } diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 6433e65..ebfa1f8 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c @@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { }, { .subvendor = 0x1014, + .subdevice = 0x0534, + .name = "ThinkPad X31", + .type = AC97_TUNE_INV_EAPD + }, + { + .subvendor = 0x1014, .subdevice = 0x1f00, .name = "MS-9128", .type = AC97_TUNE_ALC_JACK @@ -1860,6 +1866,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { }, { .subvendor = 0x1028, + .subdevice = 0x0182, + .name = "Dell Latitude D610", /* STAC9750/51 */ + .type = AC97_TUNE_HP_ONLY + }, + { + .subvendor = 0x1028, .subdevice = 0x0186, .name = "Dell Latitude D810", /* cf. Malone #41015 */ .type = AC97_TUNE_HP_MUTE_LED diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c index 289cb4d..6c0a11a 100644 --- a/sound/pci/oxygen/oxygen.c +++ b/sound/pci/oxygen/oxygen.c @@ -543,6 +543,10 @@ static int __devinit get_oxygen_model(struct oxygen *chip, chip->model.suspend = claro_suspend; chip->model.resume = claro_resume; chip->model.set_adc_params = set_ak5385_params; + chip->model.device_config = PLAYBACK_0_TO_I2S | + PLAYBACK_1_TO_SPDIF | + CAPTURE_0_FROM_I2S_2 | + CAPTURE_1_FROM_SPDIF; break; } if (id->driver_data == MODEL_MERIDIAN || diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h index 6147216..a3409ed 100644 --- a/sound/pci/oxygen/oxygen.h +++ b/sound/pci/oxygen/oxygen.h @@ -155,6 +155,7 @@ void oxygen_pci_remove(struct pci_dev *pci); int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state); int oxygen_pci_resume(struct pci_dev *pci); #endif +void oxygen_pci_shutdown(struct pci_dev *pci); /* oxygen_mixer.c */ diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index fad03d6..7e93cf8 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c @@ -519,16 +519,21 @@ static void oxygen_init(struct oxygen *chip) } } -static void oxygen_card_free(struct snd_card *card) +static void oxygen_shutdown(struct oxygen *chip) { - struct oxygen *chip = card->private_data; - spin_lock_irq(&chip->reg_lock); chip->interrupt_mask = 0; chip->pcm_running = 0; oxygen_write16(chip, OXYGEN_DMA_STATUS, 0); oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, 0); spin_unlock_irq(&chip->reg_lock); +} + +static void oxygen_card_free(struct snd_card *card) +{ + struct oxygen *chip = card->private_data; + + oxygen_shutdown(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); flush_scheduled_work(); @@ -778,3 +783,13 @@ int oxygen_pci_resume(struct pci_dev *pci) } EXPORT_SYMBOL(oxygen_pci_resume); #endif /* CONFIG_PM */ + +void oxygen_pci_shutdown(struct pci_dev *pci) +{ + struct snd_card *card = pci_get_drvdata(pci); + struct oxygen *chip = card->private_data; + + oxygen_shutdown(chip); + chip->model.cleanup(chip); +} +EXPORT_SYMBOL(oxygen_pci_shutdown); diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c index f03a2f2..06c863e 100644 --- a/sound/pci/oxygen/virtuoso.c +++ b/sound/pci/oxygen/virtuoso.c @@ -95,6 +95,7 @@ static struct pci_driver xonar_driver = { .suspend = oxygen_pci_suspend, .resume = oxygen_pci_resume, #endif + .shutdown = oxygen_pci_shutdown, }; static int __init alsa_card_xonar_init(void) diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c index dbc4b89..b82c1cf 100644 --- a/sound/pci/oxygen/xonar_wm87x6.c +++ b/sound/pci/oxygen/xonar_wm87x6.c @@ -53,6 +53,8 @@ struct xonar_wm87x6 { struct xonar_generic generic; u16 wm8776_regs[0x17]; u16 wm8766_regs[0x10]; + struct snd_kcontrol *line_adcmux_control; + struct snd_kcontrol *mic_adcmux_control; struct snd_kcontrol *lc_controls[13]; }; @@ -193,6 +195,7 @@ static void xonar_ds_init(struct oxygen *chip) static void xonar_ds_cleanup(struct oxygen *chip) { xonar_disable_output(chip); + wm8776_write(chip, WM8776_RESET, 0); } static void xonar_ds_suspend(struct oxygen *chip) @@ -603,6 +606,7 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, { struct oxygen *chip = ctl->private_data; struct xonar_wm87x6 *data = chip->model_data; + struct snd_kcontrol *other_ctl; unsigned int mux_bit = ctl->private_value; u16 reg; int changed; @@ -610,8 +614,18 @@ static int wm8776_input_mux_put(struct snd_kcontrol *ctl, mutex_lock(&chip->mutex); reg = data->wm8776_regs[WM8776_ADCMUX]; if (value->value.integer.value[0]) { - reg &= ~0x003; reg |= mux_bit; + /* line-in and mic-in are exclusive */ + mux_bit ^= 3; + if (reg & mux_bit) { + reg &= ~mux_bit; + if (mux_bit == 1) + other_ctl = data->line_adcmux_control; + else + other_ctl = data->mic_adcmux_control; + snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, + &other_ctl->id); + } } else reg &= ~mux_bit; changed = reg != data->wm8776_regs[WM8776_ADCMUX]; @@ -963,7 +977,13 @@ static int xonar_ds_mixer_init(struct oxygen *chip) err = snd_ctl_add(chip->card, ctl); if (err < 0) return err; + if (!strcmp(ctl->id.name, "Line Capture Switch")) + data->line_adcmux_control = ctl; + else if (!strcmp(ctl->id.name, "Mic Capture Switch")) + data->mic_adcmux_control = ctl; } + if (!data->line_adcmux_control || !data->mic_adcmux_control) + return -ENXIO; BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls)); for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) { ctl = snd_ctl_new1(&lc_controls[i], chip); diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c index ad44626..c737287 100644 --- a/sound/pci/riptide/riptide.c +++ b/sound/pci/riptide/riptide.c @@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip) firmware.firmware.ASIC, firmware.firmware.CODEC, firmware.firmware.AUXDSP, firmware.firmware.PROG); + if (!chip) + return 1; + for (i = 0; i < FIRMWARE_VERSIONS; i++) { if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware))) - break; - } - if (i >= FIRMWARE_VERSIONS) - return 0; /* no match */ + return 1; /* OK */ - if (!chip) - return 1; /* OK */ + } snd_printdd("Writing Firmware\n"); if (!chip->fw_entry) { diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index b92adef..d6fa7bf 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -4609,6 +4609,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne if (err < 0) return err; + memset(&info, 0, sizeof(info)); spin_lock_irqsave(&hdsp->lock, flags); info.pref_sync_ref = (unsigned char)hdsp_pref_sync_ref(hdsp); info.wordclock_sync_check = (unsigned char)hdsp_wc_sync_check(hdsp); diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 547b713..0c98ef9 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -4127,6 +4127,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep * hw, struct file *file, case SNDRV_HDSPM_IOCTL_GET_CONFIG_INFO: + memset(&info, 0, sizeof(info)); spin_lock_irq(&hdspm->lock); info.pref_sync_ref = hdspm_pref_sync_ref(hdspm); info.wordclock_sync_check = hdspm_wc_sync_check(hdspm); diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c index 523b7fc..b69cd4b 100644 --- a/sound/soc/blackfin/bf5xx-ac97.c +++ b/sound/soc/blackfin/bf5xx-ac97.c @@ -261,9 +261,9 @@ static int bf5xx_ac97_suspend(struct snd_soc_dai *dai) pr_debug("%s : sport %d\n", __func__, dai->id); if (!dai->active) return 0; - if (dai->capture.active) + if (dai->capture_active) sport_rx_stop(sport); - if (dai->playback.active) + if (dai->playback_active) sport_tx_stop(sport); return 0; } diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c index c3571ee..a961fc6 100644 --- a/sound/soc/codecs/wm8580.c +++ b/sound/soc/codecs/wm8580.c @@ -171,7 +171,7 @@ static const u16 wm8580_reg[] = { 0x0121, 0x017e, 0x007d, 0x0014, /*R3*/ 0x0121, 0x017e, 0x007d, 0x0194, /*R7*/ - 0x001c, 0x0002, 0x0002, 0x00c2, /*R11*/ + 0x0010, 0x0002, 0x0002, 0x00c2, /*R11*/ 0x0182, 0x0082, 0x000a, 0x0024, /*R15*/ 0x0009, 0x0000, 0x00ff, 0x0000, /*R19*/ 0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R23*/ @@ -269,9 +269,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0), SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0), SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0), -SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0), -SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0), -SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0), +SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1), +SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1), +SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1), SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0), SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0), diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c index 4e212ed..f8154e6 100644 --- a/sound/soc/codecs/wm8776.c +++ b/sound/soc/codecs/wm8776.c @@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; - /* FIXME: CHECK A/B */ - case SND_SOC_DAIFMT_DSP_A: - iface |= 0x0003; - break; - case SND_SOC_DAIFMT_DSP_B: - iface |= 0x0007; - break; default: return -EINVAL; } diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c index 5da17a7..4b8ffc2 100644 --- a/sound/soc/codecs/wm8900.c +++ b/sound/soc/codecs/wm8900.c @@ -188,7 +188,6 @@ static int wm8900_volatile_register(unsigned int reg) { switch (reg) { case WM8900_REG_ID: - case WM8900_REG_POWER1: return 1; default: return 0; @@ -1236,11 +1235,6 @@ static __devinit int wm8900_i2c_probe(struct i2c_client *i2c, goto err; } - /* Read back from the chip */ - reg = snd_soc_read(codec, WM8900_REG_POWER1); - reg = (reg >> 12) & 0xf; - dev_info(&i2c->dev, "WM8900 revision %d\n", reg); - wm8900_reset(codec); /* Turn the chip on */ diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 87f14f8..e21b80a 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -820,7 +820,8 @@ static int wm8904_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); - return wm8904->deemph; + ucontrol->value.enumerated.item[0] = wm8904->deemph; + return 0; } static int wm8904_put_deemph(struct snd_kcontrol *kcontrol, diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c index fedb764..12d04d7 100644 --- a/sound/soc/codecs/wm8955.c +++ b/sound/soc/codecs/wm8955.c @@ -384,7 +384,8 @@ static int wm8955_get_deemph(struct snd_kcontrol *kcontrol, struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct wm8955_priv *wm8955 = snd_soc_codec_get_drvdata(codec); - return wm8955->deemph; + ucontrol->value.enumerated.item[0] = wm8955->deemph; + return 0; } static int wm8955_put_deemph(struct snd_kcontrol *kcontrol, diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c index 5b9a756..3499c78 100644 --- a/sound/soc/codecs/wm8961.c +++ b/sound/soc/codecs/wm8961.c @@ -711,7 +711,7 @@ static int wm8961_hw_params(struct snd_pcm_substream *substream, if (fs <= 24000) reg |= WM8961_DACSLOPE; else - reg &= WM8961_DACSLOPE; + reg &= ~WM8961_DACSLOPE; snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg); return 0; @@ -736,7 +736,7 @@ static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id, freq /= 2; } else { dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq); - reg &= WM8961_MCLKDIV; + reg &= ~WM8961_MCLKDIV; } snd_soc_write(codec, WM8961_CLOCKING1, reg); diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c index c018772..bdbfac7 100644 --- a/sound/soc/codecs/wm8990.c +++ b/sound/soc/codecs/wm8990.c @@ -1185,7 +1185,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, WM8990_VMIDTOG); /* Delay to allow output caps to discharge */ - msleep(msecs_to_jiffies(300)); + msleep(300); /* Disable VMIDTOG */ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | @@ -1197,17 +1197,17 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, /* Enable outputs */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1b00); - msleep(msecs_to_jiffies(50)); + msleep(50); /* Enable VMID at 2x50k */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f02); - msleep(msecs_to_jiffies(100)); + msleep(100); /* Enable VREF */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f03); - msleep(msecs_to_jiffies(600)); + msleep(600); /* Enable BUFIOEN */ snd_soc_write(codec, WM8990_ANTIPOP2, WM8990_SOFTST | @@ -1252,7 +1252,7 @@ static int wm8990_set_bias_level(struct snd_soc_codec *codec, /* Disable VMID */ snd_soc_write(codec, WM8990_POWER_MANAGEMENT_1, 0x1f01); - msleep(msecs_to_jiffies(300)); + msleep(300); /* Enable all output discharge bits */ snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE | diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 16f1a57..5093a76 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -293,7 +293,7 @@ SOC_DOUBLE_R("Speaker Switch", SOC_DOUBLE_R("Speaker ZC Switch", WM8993_SPEAKER_VOLUME_LEFT, WM8993_SPEAKER_VOLUME_RIGHT, 7, 1, 0), -SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 0, 3, 7, 0, +SOC_DOUBLE_TLV("Speaker Boost Volume", WM8993_SPKOUT_BOOST, 3, 0, 7, 0, spkboost_tlv), SOC_ENUM("Speaker Reference", speaker_ref), SOC_ENUM("Speaker Mode", speaker_mode), @@ -705,12 +705,12 @@ static const struct snd_soc_dapm_route analogue_routes[] = { { "SPKL", "Input Switch", "MIXINL" }, { "SPKL", "IN1LP Switch", "IN1LP" }, - { "SPKL", "Output Switch", "Left Output Mixer" }, + { "SPKL", "Output Switch", "Left Output PGA" }, { "SPKL", NULL, "TOCLK" }, { "SPKR", "Input Switch", "MIXINR" }, { "SPKR", "IN1RP Switch", "IN1RP" }, - { "SPKR", "Output Switch", "Right Output Mixer" }, + { "SPKR", "Output Switch", "Right Output PGA" }, { "SPKR", NULL, "TOCLK" }, { "SPKL Boost", "Direct Voice Switch", "Direct Voice" }, @@ -732,8 +732,8 @@ static const struct snd_soc_dapm_route analogue_routes[] = { { "SPKOUTRP", NULL, "SPKR Driver" }, { "SPKOUTRN", NULL, "SPKR Driver" }, - { "Left Headphone Mux", "Mixer", "Left Output Mixer" }, - { "Right Headphone Mux", "Mixer", "Right Output Mixer" }, + { "Left Headphone Mux", "Mixer", "Left Output PGA" }, + { "Right Headphone Mux", "Mixer", "Right Output PGA" }, { "Headphone PGA", NULL, "Left Headphone Mux" }, { "Headphone PGA", NULL, "Right Headphone Mux" }, diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c index 472af38..adbc68c 100644 --- a/sound/soc/soc-cache.c +++ b/sound/soc/soc-cache.c @@ -340,7 +340,7 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec, static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, unsigned int reg) { - u16 *cache = codec->reg_cache; + u8 *cache = codec->reg_cache; reg &= 0xff; if (reg >= codec->reg_cache_size) @@ -351,7 +351,7 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { - u16 *cache = codec->reg_cache; + u8 *cache = codec->reg_cache; u8 data[3]; int ret; diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 4328cad..a184e91 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -640,7 +640,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) } dev->pcm->private_data = dev; - strcpy(dev->pcm->name, dev->product_name); + strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c7..a1a4708 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c @@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) if (ret < 0) return ret; - strcpy(rmidi->name, device->product_name); + strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = device; diff --git a/sound/usb/card.c b/sound/usb/card.c index 7a8ac1d..ceeba89 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -126,7 +126,7 @@ static void snd_usb_stream_disconnect(struct list_head *head) for (idx = 0; idx < 2; idx++) { subs = &as->substream[idx]; if (!subs->num_formats) - return; + continue; snd_usb_release_substream_urbs(subs, 1); subs->interface = -1; } @@ -216,6 +216,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) } switch (protocol) { + default: + snd_printdd(KERN_WARNING "unknown interface protocol %#02x, assuming v1\n", + protocol); + /* fall through */ + case UAC_VERSION_1: { struct uac_ac_header_descriptor_v1 *h1 = control_header; @@ -253,10 +258,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) break; } - - default: - snd_printk(KERN_ERR "unknown protocol version 0x%02x\n", protocol); - return -EINVAL; } return 0; @@ -465,7 +466,13 @@ static void *snd_usb_audio_probe(struct usb_device *dev, goto __error; } - chip->ctrl_intf = alts; + /* + * For devices with more than one control interface, we assume the + * first contains the audio controls. We might need a more specific + * check here in the future. + */ + if (!chip->ctrl_intf) + chip->ctrl_intf = alts; if (err > 0) { /* create normal USB audio interfaces */ diff --git a/sound/usb/clock.c b/sound/usb/clock.c index b585511..aeda4c7 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -304,12 +304,11 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: + default: return set_sample_rate_v1(chip, iface, alts, fmt, rate); case UAC_VERSION_2: return set_sample_rate_v2(chip, iface, alts, fmt, rate); } - - return -EINVAL; } diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 6f6596c..0aac628 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -274,6 +274,12 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) /* get audio formats */ switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d: unknown interface protocol %#02x, assuming v1\n", + dev->devnum, iface_no, altno, protocol); + protocol = UAC_VERSION_1; + /* fall through */ + case UAC_VERSION_1: { struct uac_as_header_descriptor_v1 *as = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); @@ -335,11 +341,6 @@ int snd_usb_parse_audio_endpoints(struct snd_usb_audio *chip, int iface_no) dev->devnum, iface_no, altno, as->bTerminalLink); continue; } - - default: - snd_printk(KERN_ERR "%d:%u:%d : unknown interface protocol %04x\n", - dev->devnum, iface_no, altno, protocol); - continue; } /* get format type */ diff --git a/sound/usb/format.c b/sound/usb/format.c index 30364ab..440d7c3 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -49,7 +49,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, u64 pcm_formats; switch (protocol) { - case UAC_VERSION_1: { + case UAC_VERSION_1: + default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; @@ -64,9 +65,6 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, format <<= 1; break; } - - default: - return -EINVAL; } pcm_formats = 0; @@ -385,6 +383,10 @@ static int parse_audio_format_i(struct snd_usb_audio *chip, * audio class v2 uses class specific EP0 range requests for that. */ switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", + chip->dev->devnum, fp->iface, fp->altsetting, protocol); + /* fall through */ case UAC_VERSION_1: fp->channels = fmt->bNrChannels; ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); @@ -435,6 +437,10 @@ static int parse_audio_format_ii(struct snd_usb_audio *chip, fp->channels = 1; switch (protocol) { + default: + snd_printdd(KERN_WARNING "%d:%u:%d : invalid protocol version %d, assuming v1\n", + chip->dev->devnum, fp->iface, fp->altsetting, protocol); + /* fall through */ case UAC_VERSION_1: { struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 736d134..2a4377f 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -2168,7 +2168,15 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif, } host_iface = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0]; - mixer->protocol = get_iface_desc(host_iface)->bInterfaceProtocol; + switch (get_iface_desc(host_iface)->bInterfaceProtocol) { + case UAC_VERSION_1: + default: + mixer->protocol = UAC_VERSION_1; + break; + case UAC_VERSION_2: + mixer->protocol = UAC_VERSION_2; + break; + } if ((err = snd_usb_mixer_controls(mixer)) < 0 || (err = snd_usb_mixer_status_create(mixer)) < 0) diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 4568298..b40a248 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -173,13 +173,12 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, switch (altsd->bInterfaceProtocol) { case UAC_VERSION_1: + default: return init_pitch_v1(chip, iface, alts, fmt); case UAC_VERSION_2: return init_pitch_v2(chip, iface, alts, fmt); } - - return -EINVAL; } /* diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index 6ef68e4..084e6fc 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c @@ -273,29 +273,26 @@ static unsigned int usb_stream_hwdep_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { struct us122l *us122l = hw->private_data; - struct usb_stream *s = us122l->sk.s; unsigned *polled; unsigned int mask; poll_wait(file, &us122l->sk.sleep, wait); - switch (s->state) { - case usb_stream_ready: - if (us122l->first == file) - polled = &s->periods_polled; - else - polled = &us122l->second_periods_polled; - if (*polled != s->periods_done) { - *polled = s->periods_done; - mask = POLLIN | POLLOUT | POLLWRNORM; - break; + mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; + if (mutex_trylock(&us122l->mutex)) { + struct usb_stream *s = us122l->sk.s; + if (s && s->state == usb_stream_ready) { + if (us122l->first == file) + polled = &s->periods_polled; + else + polled = &us122l->second_periods_polled; + if (*polled != s->periods_done) { + *polled = s->periods_done; + mask = POLLIN | POLLOUT | POLLWRNORM; + } else + mask = 0; } - /* Fall through */ - mask = 0; - break; - default: - mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR; - break; + mutex_unlock(&us122l->mutex); } return mask; } @@ -381,6 +378,7 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, { struct usb_stream_config *cfg; struct us122l *us122l = hw->private_data; + struct usb_stream *s; unsigned min_period_frames; int err = 0; bool high_speed; @@ -426,18 +424,18 @@ static int usb_stream_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, snd_power_wait(hw->card, SNDRV_CTL_POWER_D0); mutex_lock(&us122l->mutex); + s = us122l->sk.s; if (!us122l->master) us122l->master = file; else if (us122l->master != file) { - if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) { + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) { err = -EIO; goto unlock; } us122l->slave = file; } - if (!us122l->sk.s || - memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) || - us122l->sk.s->state == usb_stream_xrun) { + if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) || + s->state == usb_stream_xrun) { us122l_stop(us122l); if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames)) err = -EIO; @@ -448,6 +446,7 @@ unlock: mutex_unlock(&us122l->mutex); free: kfree(cfg); + wake_up_all(&us122l->sk.sleep); return err; } diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index f2e9ee1..ecb9732 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -50,6 +50,7 @@ static inline void callchain_init(struct callchain_node *node) INIT_LIST_HEAD(&node->children); INIT_LIST_HEAD(&node->val); + node->children_hit = 0; node->parent = NULL; node->hit = 0; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index c422cd6..af094a2 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -134,6 +134,7 @@ void perf_session__delete(struct perf_session *self) void perf_session__remove_thread(struct perf_session *self, struct thread *th) { + self->last_match = NULL; rb_erase(&th->rb_node, &self->threads); /* * We may have references to this thread, for instance in some hist_entry diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index b81f0eb..33993e3 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -217,7 +217,6 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) events = file->f_op->poll(file, &irqfd->pt); list_add_tail(&irqfd->list, &kvm->irqfds.items); - spin_unlock_irq(&kvm->irqfds.lock); /* * Check if there was an event already pending on the eventfd @@ -226,6 +225,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) if (events & POLLIN) schedule_work(&irqfd->inject); + spin_unlock_irq(&kvm->irqfds.lock); + /* * do not drop the file until the irqfd is fully initialized, otherwise * we might race against the POLLHUP diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index f032806..dd85e29 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -566,6 +566,7 @@ int __kvm_set_memory_region(struct kvm *kvm, new = old = *memslot; + new.id = mem->slot; new.base_gfn = base_gfn; new.npages = npages; new.flags = mem->flags; @@ -1941,10 +1942,12 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, asmlinkage void kvm_handle_fault_on_reboot(void) { - if (kvm_rebooting) + if (kvm_rebooting) { /* spin while reset goes on */ + local_irq_enable(); while (true) ; + } /* Fault while not rebooting. We want the trace. */ BUG(); }