dect
/
linux-2.6
Archived
13
0
Fork 0

Merge commit 'v2.6.35-rc4' into perf/core

Merge reason: Pick up the latest perf fixes

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2010-07-05 08:30:55 +02:00
commit 08f8ba0799
179 changed files with 1876 additions and 1461 deletions

View File

@ -125,6 +125,11 @@ ibmasr:
nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter)
-------------------------------------------------
imx2_wdt:
timeout: Watchdog timeout in seconds (default 60 s)
nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter)
-------------------------------------------------
indydog:
nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter)

View File

@ -896,11 +896,13 @@ S: Maintained
ARM/SAMSUNG ARM ARCHITECTURES
M: Ben Dooks <ben-linux@fluff.org>
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-s3c/
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
ARM/S3C2410 ARM ARCHITECTURE
M: Ben Dooks <ben-linux@fluff.org>
@ -1148,7 +1150,7 @@ F: drivers/mmc/host/atmel-mci.c
F: drivers/mmc/host/atmel-mci-regs.h
ATMEL AT91 / AT32 SERIAL DRIVER
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
F: drivers/serial/atmel_serial.c
@ -1160,18 +1162,18 @@ F: drivers/video/atmel_lcdfb.c
F: include/video/atmel_lcdc.h
ATMEL MACB ETHERNET DRIVER
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
F: drivers/net/macb.*
ATMEL SPI DRIVER
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
S: Supported
F: drivers/spi/atmel_spi.*
ATMEL USBA UDC DRIVER
M: Haavard Skinnemoen <hskinnemoen@atmel.com>
L: kernel@avr32linux.org
M: Nicolas Ferre <nicolas.ferre@atmel.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://avr32linux.org/twiki/bin/view/Main/AtmelUsbDeviceDriver
S: Supported
F: drivers/usb/gadget/atmel_usba_udc.*
@ -2109,11 +2111,18 @@ F: drivers/edac/i5000_edac.c
EDAC-I5400
M: Mauro Carvalho Chehab <mchehab@redhat.com>
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i5400_edac.c
EDAC-I7CORE
M: Mauro Carvalho Chehab <mchehab@redhat.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i7core_edac.c linux/edac_mce.h drivers/edac/edac_mce.c
EDAC-I82975X
M: Ranganathan Desikan <ravi@jetztechnologies.com>
M: "Arvind R." <arvind@jetztechnologies.com>
@ -3373,7 +3382,7 @@ KPROBES
M: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@redhat.com>
M: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
S: Maintained
F: Documentation/kprobes.txt
F: include/linux/kprobes.h
@ -4621,6 +4630,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr>
L: rtc-linux@googlegroups.com
S: Maintained
QLOGIC QLA1280 SCSI DRIVER
M: Michael Reed <mdr@sgi.com>
L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/qla1280.[ch]
QLOGIC QLA2XXX FC-SCSI DRIVER
M: Andrew Vasquez <andrew.vasquez@qlogic.com>
M: linux-driver@qlogic.com

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 35
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Sheep on Meth
# *DOCUMENTATION*
@ -883,80 +883,10 @@ PHONY += $(vmlinux-dirs)
$(vmlinux-dirs): prepare scripts
$(Q)$(MAKE) $(build)=$@
# Build the kernel release string
#
# The KERNELRELEASE value built here is stored in the file
# include/config/kernel.release, and is used when executing several
# make targets, such as "make install" or "make modules_install."
#
# The eventual kernel release string consists of the following fields,
# shown in a hierarchical format to show how smaller parts are concatenated
# to form the larger and final value, with values coming from places like
# the Makefile, kernel config options, make command line options and/or
# SCM tag information.
#
# $(KERNELVERSION)
# $(VERSION) eg, 2
# $(PATCHLEVEL) eg, 6
# $(SUBLEVEL) eg, 18
# $(EXTRAVERSION) eg, -rc6
# $(localver-full)
# $(localver)
# localversion* (files without backups, containing '~')
# $(CONFIG_LOCALVERSION) (from kernel config setting)
# $(LOCALVERSION) (from make command line, if provided)
# $(localver-extra)
# $(scm-identifier) (unique SCM tag, if one exists)
# ./scripts/setlocalversion (only with CONFIG_LOCALVERSION_AUTO)
# .scmversion (only with CONFIG_LOCALVERSION_AUTO)
# + (only without CONFIG_LOCALVERSION_AUTO
# and without LOCALVERSION= and
# repository is at non-tagged commit)
#
# For kernels without CONFIG_LOCALVERSION_AUTO compiled from an SCM that has
# been revised beyond a tagged commit, `+' is appended to the version string
# when not overridden by using "make LOCALVERSION=". This indicates that the
# kernel is not a vanilla release version and has been modified.
pattern = ".*/localversion[^~]*"
string = $(shell cat /dev/null \
`find $(objtree) $(srctree) -maxdepth 1 -regex $(pattern) | sort -u`)
localver = $(subst $(space),, $(string) \
$(patsubst "%",%,$(CONFIG_LOCALVERSION)))
# scripts/setlocalversion is called to create a unique identifier if the source
# is managed by a known SCM and the repository has been revised since the last
# tagged (release) commit. The format of the identifier is determined by the
# SCM's implementation.
#
# .scmversion is used when generating rpm packages so we do not loose
# the version information from the SCM when we do the build of the kernel
# from the copied source
ifeq ($(wildcard .scmversion),)
scm-identifier = $(shell $(CONFIG_SHELL) \
$(srctree)/scripts/setlocalversion $(srctree))
else
scm-identifier = $(shell cat .scmversion 2> /dev/null)
endif
ifdef CONFIG_LOCALVERSION_AUTO
localver-extra = $(scm-identifier)
else
ifneq ($(scm-identifier),)
ifeq ("$(origin LOCALVERSION)", "undefined")
localver-extra = +
endif
endif
endif
localver-full = $(localver)$(LOCALVERSION)$(localver-extra)
# Store (new) KERNELRELASE string in include/config/kernel.release
kernelrelease = $(KERNELVERSION)$(localver-full)
include/config/kernel.release: include/config/auto.conf FORCE
$(Q)rm -f $@
$(Q)echo $(kernelrelease) > $@
$(Q)echo "$(KERNELVERSION)$$($(CONFIG_SHELL) scripts/setlocalversion $(srctree))" > $@
# Things we need to do before we recursively start building the kernel

View File

@ -955,7 +955,8 @@ config XSCALE_PMU
default y
config CPU_HAS_PMU
depends on CPU_V6 || CPU_V7 || XSCALE_PMU
depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \
(!ARCH_OMAP3 || OMAP3_EMU)
default y
bool

View File

@ -21,8 +21,8 @@ struct pxa2xx_udc_mach_info {
* here. Note that sometimes the signals go through inverters...
*/
bool gpio_vbus_inverted;
u16 gpio_vbus; /* high == vbus present */
int gpio_vbus; /* high == vbus present */
bool gpio_pullup_inverted;
u16 gpio_pullup; /* high == pullup activated */
int gpio_pullup; /* high == pullup activated */
};

View File

@ -91,7 +91,11 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6
#define cpu_relax() smp_mb()
#else
#define cpu_relax() barrier()
#endif
/*
* Create a new kernel thread

View File

@ -201,7 +201,7 @@ armpmu_event_update(struct perf_event *event,
{
int shift = 64 - 32;
s64 prev_raw_count, new_raw_count;
s64 delta;
u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);

View File

@ -115,6 +115,8 @@ static struct platform_device physmap_flash_device = {
/* USB */
#if defined(CONFIG_USB_ULPI)
#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
@ -244,10 +246,20 @@ static struct mxc_usbh_platform_data usbh2_pdata = {
.flags = MXC_EHCI_POWER_PINS_ENABLED,
};
static struct platform_device *devices[] __initdata = {
&smsc91x_device,
&physmap_flash_device,
};
static void lilly1131_usb_init(void)
{
usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_usbh1, &usbh1_pdata);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
}
#else
static inline void lilly1131_usb_init(void) {}
#endif /* CONFIG_USB_ULPI */
/* SPI */
@ -279,6 +291,11 @@ static struct spi_board_info mc13783_dev __initdata = {
.platform_data = &mc13783_pdata,
};
static struct platform_device *devices[] __initdata = {
&smsc91x_device,
&physmap_flash_device,
};
static int mx31lilly_baseboard;
core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444);
@ -321,13 +338,7 @@ static void __init mx31lilly_board_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
/* USB */
usbotg_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_usbh1, &usbh1_pdata);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
lilly1131_usb_init();
}
static void __init mx31lilly_timer_init(void)

View File

@ -538,9 +538,7 @@ static void ads7846_dev_init(void)
printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
gpio_direction_input(OMAP3_STALKER_TS_GPIO);
omap_set_gpio_debounce(OMAP3_STALKER_TS_GPIO, 1);
omap_set_gpio_debounce_time(OMAP3_STALKER_TS_GPIO, 0xa);
gpio_set_debounce(OMAP3_STALKER_TS_GPIO, 310);
}
static int ads7846_get_pendown_state(void)

View File

@ -1369,6 +1369,7 @@ static struct clk emif1_ick = {
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.flags = ENABLE_ON_INIT,
.clkdm_name = "l3_emif_clkdm",
.parent = &ddrphy_ck,
.recalc = &followparent_recalc,
@ -1379,6 +1380,7 @@ static struct clk emif2_ick = {
.ops = &clkops_omap2_dflt,
.enable_reg = OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL,
.enable_bit = OMAP4430_MODULEMODE_HWCTRL,
.flags = ENABLE_ON_INIT,
.clkdm_name = "l3_emif_clkdm",
.parent = &ddrphy_ck,
.recalc = &followparent_recalc,

View File

@ -409,10 +409,11 @@ static int _init_main_clk(struct omap_hwmod *oh)
return 0;
oh->_clk = omap_clk_get_by_name(oh->main_clk);
if (!oh->_clk)
if (!oh->_clk) {
pr_warning("omap_hwmod: %s: cannot clk_get main_clk %s\n",
oh->name, oh->main_clk);
return -EINVAL;
}
if (!oh->_clk->clkdm)
pr_warning("omap_hwmod: %s: missing clockdomain for %s.\n",
@ -444,10 +445,11 @@ static int _init_interface_clks(struct omap_hwmod *oh)
continue;
c = omap_clk_get_by_name(os->clk);
if (!c)
if (!c) {
pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
oh->name, os->clk);
ret = -EINVAL;
}
os->_clk = c;
}
@ -470,10 +472,11 @@ static int _init_opt_clks(struct omap_hwmod *oh)
for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++) {
c = omap_clk_get_by_name(oc->clk);
if (!c)
if (!c) {
pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
oh->name, oc->clk);
ret = -EINVAL;
}
oc->_clk = c;
}

View File

@ -99,7 +99,7 @@ static void omap3_enable_io_chain(void)
/* Do a readback to assure write has been done */
prm_read_mod_reg(WKUP_MOD, PM_WKEN);
while (!(prm_read_mod_reg(WKUP_MOD, PM_WKST) &
while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
OMAP3430_ST_IO_CHAIN_MASK)) {
timeout++;
if (timeout > 1000) {
@ -108,7 +108,7 @@ static void omap3_enable_io_chain(void)
return;
}
prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
WKUP_MOD, PM_WKST);
WKUP_MOD, PM_WKEN);
}
}
}

View File

@ -20,6 +20,8 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <plat/mux.h>

View File

@ -697,7 +697,7 @@ static struct i2c_board_info __initdata mioa701_pi2c_devices[] = {
};
/* Board I2C devices. */
static struct i2c_board_info __initdata mioa701_i2c_devices[] = {
static struct i2c_board_info mioa701_i2c_devices[] = {
{
I2C_BOARD_INFO("mt9m111", 0x5d),
},

View File

@ -3,8 +3,9 @@
*
* Support for the Zipit Z2 Handheld device.
*
* Author: Ken McGuire
* Created: Jan 25, 2009
* Copyright (C) 2009-2010 Marek Vasut <marek.vasut@gmail.com>
*
* Based on research and code by: Ken McGuire
* Based on mainstone.c as modified for the Zipit Z2.
*
* This program is free software; you can redistribute it and/or modify
@ -157,21 +158,14 @@ static struct mtd_partition z2_flash_parts[] = {
{
.name = "U-Boot Bootloader",
.offset = 0x0,
.size = 0x20000,
},
{
.name = "Linux Kernel",
.offset = 0x20000,
.size = 0x220000,
},
{
.name = "Filesystem",
.offset = 0x240000,
.size = 0x5b0000,
},
{
.size = 0x40000,
}, {
.name = "U-Boot Environment",
.offset = 0x7f0000,
.offset = 0x40000,
.size = 0x60000,
}, {
.name = "Flash",
.offset = 0x60000,
.size = MTDPART_SIZ_FULL,
},
};

View File

@ -18,6 +18,7 @@ config REALVIEW_EB_ARM11MP
bool "Support ARM11MPCore tile"
depends on MACH_REALVIEW_EB
select CPU_V6
select ARCH_HAS_BARRIERS if SMP
help
Enable support for the ARM11MPCore tile on the Realview platform.
@ -35,6 +36,7 @@ config MACH_REALVIEW_PB11MP
select CPU_V6
select ARM_GIC
select HAVE_PATA_PLATFORM
select ARCH_HAS_BARRIERS if SMP
help
Include support for the ARM(R) RealView MPCore Platform Baseboard.
PB11MPCore is a platform with an on-board ARM11MPCore and has

View File

@ -0,0 +1,8 @@
/*
* Barriers redefined for RealView ARM11MPCore platforms with L220 cache
* controller to work around hardware errata causing the outer_sync()
* operation to deadlock the system.
*/
#define mb() dsb()
#define rmb() dmb()
#define wmb() mb()

View File

@ -32,6 +32,7 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
@ -457,7 +458,7 @@ static void __init realview_eb_init(void)
MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.phys_io = REALVIEW_EB_UART0_BASE,
.phys_io = REALVIEW_EB_UART0_BASE & SECTION_MASK,
.io_pg_offst = (IO_ADDRESS(REALVIEW_EB_UART0_BASE) >> 18) & 0xfffc,
.boot_params = PHYS_OFFSET + 0x00000100,
.fixup = realview_fixup,

View File

@ -32,6 +32,7 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@ -351,7 +352,7 @@ static void __init realview_pb1176_init(void)
MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.phys_io = REALVIEW_PB1176_UART0_BASE,
.phys_io = REALVIEW_PB1176_UART0_BASE & SECTION_MASK,
.io_pg_offst = (IO_ADDRESS(REALVIEW_PB1176_UART0_BASE) >> 18) & 0xfffc,
.boot_params = PHYS_OFFSET + 0x00000100,
.fixup = realview_pb1176_fixup,

View File

@ -32,6 +32,7 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/localtimer.h>
@ -373,7 +374,7 @@ static void __init realview_pb11mp_init(void)
MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.phys_io = REALVIEW_PB11MP_UART0_BASE,
.phys_io = REALVIEW_PB11MP_UART0_BASE & SECTION_MASK,
.io_pg_offst = (IO_ADDRESS(REALVIEW_PB11MP_UART0_BASE) >> 18) & 0xfffc,
.boot_params = PHYS_OFFSET + 0x00000100,
.fixup = realview_fixup,

View File

@ -31,6 +31,7 @@
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/pmu.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
@ -323,7 +324,7 @@ static void __init realview_pba8_init(void)
MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.phys_io = REALVIEW_PBA8_UART0_BASE,
.phys_io = REALVIEW_PBA8_UART0_BASE & SECTION_MASK,
.io_pg_offst = (IO_ADDRESS(REALVIEW_PBA8_UART0_BASE) >> 18) & 0xfffc,
.boot_params = PHYS_OFFSET + 0x00000100,
.fixup = realview_fixup,

View File

@ -31,6 +31,7 @@
#include <asm/mach-types.h>
#include <asm/pmu.h>
#include <asm/smp_twd.h>
#include <asm/pgtable.h>
#include <asm/hardware/gic.h>
#include <asm/hardware/cache-l2x0.h>
@ -409,7 +410,7 @@ static void __init realview_pbx_init(void)
MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
/* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
.phys_io = REALVIEW_PBX_UART0_BASE,
.phys_io = REALVIEW_PBX_UART0_BASE & SECTION_MASK,
.io_pg_offst = (IO_ADDRESS(REALVIEW_PBX_UART0_BASE) >> 18) & 0xfffc,
.boot_params = PHYS_OFFSET + 0x00000100,
.fixup = realview_pbx_fixup,

View File

@ -10,6 +10,7 @@
#include <linux/amba/clcd.h>
#include <asm/clkdev.h>
#include <asm/pgtable.h>
#include <asm/hardware/arm_timer.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
@ -236,7 +237,7 @@ static void ct_ca9x4_init(void)
}
MACHINE_START(VEXPRESS, "ARM-Versatile Express CA9x4")
.phys_io = V2M_UART0,
.phys_io = V2M_UART0 & SECTION_MASK,
.io_pg_offst = (__MMIO_P2V(V2M_UART0) >> 18) & 0xfffc,
.boot_params = PHYS_OFFSET + 0x00000100,
.map_io = ct_ca9x4_map_io,

View File

@ -735,6 +735,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
Forget about fast user space cmpxchg support.
It is just not possible.
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6 && SMP
default y
help
The Snoop Control Unit on ARM11MPCore does not detect the
cache maintenance operations and the dma_{map,unmap}_area()
functions may leave stale cache entries on other CPUs. By
enabling this option, Read or Write For Ownership in the ARMv6
DMA cache maintenance functions is performed. These LDR/STR
instructions change the cache line state to shared or modified
so that the cache operation has the desired effect.
Note that the workaround is only valid on processors that do
not perform speculative loads into the D-cache. For such
processors, if cache maintenance operations are not broadcast
in hardware, other workarounds are needed (e.g. cache
maintenance broadcasting in software via FIQ).
config OUTER_CACHE
bool
@ -794,6 +813,8 @@ config ARM_L1_CACHE_SHIFT
config ARM_DMA_MEM_BUFFERABLE
bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
MACH_REALVIEW_PB11MP)
default y if CPU_V6 || CPU_V7
help
Historically, the kernel has used strongly ordered mappings to

View File

@ -211,8 +211,9 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
#ifdef CONFIG_SMP
str r0, [r0] @ write for ownership
#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
str r2, [r0] @ write for ownership
#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
@ -234,7 +235,7 @@ v6_dma_inv_range:
v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef CONFIG_SMP
#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
#endif
#ifdef HARVARD_CACHE
@ -257,7 +258,7 @@ v6_dma_clean_range:
ENTRY(v6_dma_flush_range)
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef CONFIG_SMP
#ifdef CONFIG_DMA_CACHE_RWFO
ldr r2, [r0] @ read for ownership
str r2, [r0] @ write for ownership
#endif
@ -283,9 +284,13 @@ ENTRY(v6_dma_map_area)
add r1, r1, r0
teq r2, #DMA_FROM_DEVICE
beq v6_dma_inv_range
#ifndef CONFIG_DMA_CACHE_RWFO
b v6_dma_clean_range
#else
teq r2, #DMA_TO_DEVICE
beq v6_dma_clean_range
b v6_dma_flush_range
#endif
ENDPROC(v6_dma_map_area)
/*
@ -295,6 +300,11 @@ ENDPROC(v6_dma_map_area)
* - dir - DMA direction
*/
ENTRY(v6_dma_unmap_area)
#ifndef CONFIG_DMA_CACHE_RWFO
add r1, r1, r0
teq r2, #DMA_TO_DEVICE
bne v6_dma_inv_range
#endif
mov pc, lr
ENDPROC(v6_dma_unmap_area)

View File

@ -24,15 +24,6 @@
#include <asm/tlbflush.h>
#include <asm/sizes.h>
/* Sanity check size */
#if (CONSISTENT_DMA_SIZE % SZ_2M)
#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
#endif
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = ISA_DMA_THRESHOLD;
@ -123,6 +114,15 @@ static void __dma_free_buffer(struct page *page, size_t size)
}
#ifdef CONFIG_MMU
/* Sanity check size */
#if (CONSISTENT_DMA_SIZE % SZ_2M)
#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
#endif
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
*/

View File

@ -541,11 +541,11 @@ void omap_dm_timer_stop(struct omap_dm_timer *timer)
* timer is stopped
*/
udelay(3500000 / clk_get_rate(timer->fclk) + 1);
/* Ack possibly pending interrupt */
omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
OMAP_TIMER_INT_OVERFLOW);
#endif
}
/* Ack possibly pending interrupt */
omap_dm_timer_write_reg(timer, OMAP_TIMER_STAT_REG,
OMAP_TIMER_INT_OVERFLOW);
}
EXPORT_SYMBOL_GPL(omap_dm_timer_stop);

View File

@ -673,6 +673,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
if (cpu_is_omap34xx() || cpu_is_omap44xx())
clk_disable(bank->dbck);
}
bank->dbck_enable_mask = val;
__raw_writel(val, reg);
}

View File

@ -140,8 +140,10 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
return ERR_PTR(-ENOMEM);
err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
if (err)
if (err) {
kfree(sgt);
return ERR_PTR(err);
}
pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);

View File

@ -2,8 +2,9 @@
# Makefile for code common across different PXA processor families
#
obj-y := dma.o pmu.o
obj-y := dma.o
obj-$(CONFIG_ARCH_PXA) += pmu.o
obj-$(CONFIG_GENERIC_GPIO) += gpio.o
obj-$(CONFIG_PXA3xx) += mfp.o
obj-$(CONFIG_ARCH_MMP) += mfp.o

View File

@ -121,7 +121,7 @@ static inline void down_spin(struct spinaphore *ss)
ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
if (time_before(t, serve))
return;
cpu_relax();

View File

@ -10,6 +10,7 @@
#include <errno.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/param.h>
#include "init.h"

View File

@ -102,8 +102,8 @@ static const u64 amd_perfmon_event_map[] =
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
};
static u64 amd_pmu_event_map(int hw_event)

View File

@ -526,6 +526,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
{
struct task_struct *tsk = current;
int user_icebp = 0;
unsigned long dr6;
int si_code;
@ -534,6 +535,14 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
/* Filter out all the reserved bits which are preset to 1 */
dr6 &= ~DR6_RESERVED;
/*
* If dr6 has no reason to give us about the origin of this trap,
* then it's very likely the result of an icebp/int01 trap.
* User wants a sigtrap for that.
*/
if (!dr6 && user_mode(regs))
user_icebp = 1;
/* Catch kmemcheck conditions first of all! */
if ((dr6 & DR_STEP) && kmemcheck_trap(regs))
return;
@ -575,7 +584,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
regs->flags &= ~X86_EFLAGS_TF;
}
si_code = get_si_code(tsk->thread.debugreg6);
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS))
if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
send_sigtrap(tsk, regs, error_code, si_code);
preempt_conditional_cli(regs);

View File

@ -1149,13 +1149,10 @@ void init_request_from_bio(struct request *req, struct bio *bio)
else
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
if (bio_rw_flagged(bio, BIO_RW_DISCARD))
req->cmd_flags |= REQ_DISCARD;
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
req->cmd_flags |= REQ_SOFTBARRIER;
} else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
if (bio_rw_flagged(bio, BIO_RW_BARRIER))
req->cmd_flags |= REQ_HARDBARRIER;
if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
req->cmd_flags |= REQ_RW_SYNC;
if (bio_rw_flagged(bio, BIO_RW_META))
@ -1586,7 +1583,7 @@ void submit_bio(int rw, struct bio *bio)
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
if (bio_has_data(bio)) {
if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) {
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {

View File

@ -14,7 +14,7 @@
#include <linux/rbtree.h>
#include <linux/ioprio.h>
#include <linux/blktrace_api.h>
#include "blk-cgroup.h"
#include "cfq.h"
/*
* tunables
@ -879,7 +879,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
if (!RB_EMPTY_NODE(&cfqg->rb_node))
cfq_rb_erase(&cfqg->rb_node, st);
cfqg->saved_workload_slice = 0;
blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
}
static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
@ -939,8 +939,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
st->min_vdisktime);
blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
blkiocg_set_start_empty_time(&cfqg->blkg);
cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
@ -995,7 +995,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
/* Add group onto cgroup list */
sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
MKDEV(major, minor));
cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
@ -1079,7 +1079,7 @@ static void cfq_release_cfq_groups(struct cfq_data *cfqd)
* it from cgroup list, then it will take care of destroying
* cfqg also.
*/
if (!blkiocg_del_blkio_group(&cfqg->blkg))
if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
cfq_destroy_cfqg(cfqd, cfqg);
}
}
@ -1421,10 +1421,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
elv_rb_del(&cfqq->sort_list, rq);
cfqq->queued[rq_is_sync(rq)]--;
blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
rq_is_sync(rq));
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
cfq_add_rq_rb(rq);
blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
&cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
rq_is_sync(rq));
}
@ -1482,8 +1482,8 @@ static void cfq_remove_request(struct request *rq)
cfq_del_rq_rb(rq);
cfqq->cfqd->rq_queued--;
blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(rq),
rq_is_sync(rq));
cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(rq), rq_is_sync(rq));
if (rq_is_meta(rq)) {
WARN_ON(!cfqq->meta_pending);
cfqq->meta_pending--;
@ -1518,8 +1518,8 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, bio_data_dir(bio),
cfq_bio_sync(bio));
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
bio_data_dir(bio), cfq_bio_sync(bio));
}
static void
@ -1539,8 +1539,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
if (cfqq->next_rq == next)
cfqq->next_rq = rq;
cfq_remove_request(next);
blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, rq_data_dir(next),
rq_is_sync(next));
cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
rq_data_dir(next), rq_is_sync(next));
}
static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@ -1571,7 +1571,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
del_timer(&cfqd->idle_slice_timer);
blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
}
static void __cfq_set_active_queue(struct cfq_data *cfqd,
@ -1580,7 +1580,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
if (cfqq) {
cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
cfqd->serving_prio, cfqd->serving_type);
blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
cfqq->slice_start = 0;
cfqq->dispatch_start = jiffies;
cfqq->allocated_slice = 0;
@ -1911,7 +1911,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
sl = cfqd->cfq_slice_idle;
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
}
@ -1931,7 +1931,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
elv_dispatch_sort(q, rq);
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
rq_data_dir(rq), rq_is_sync(rq));
}
@ -1986,6 +1986,15 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
int process_refs, new_process_refs;
struct cfq_queue *__cfqq;
/*
* If there are no process references on the new_cfqq, then it is
* unsafe to follow the ->new_cfqq chain as other cfqq's in the
* chain may have dropped their last reference (not just their
* last process reference).
*/
if (!cfqq_process_refs(new_cfqq))
return;
/* Avoid a circular list and skip interim queue merges */
while ((__cfqq = new_cfqq->new_cfqq)) {
if (__cfqq == cfqq)
@ -1994,17 +2003,17 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
}
process_refs = cfqq_process_refs(cfqq);
new_process_refs = cfqq_process_refs(new_cfqq);
/*
* If the process for the cfqq has gone away, there is no
* sense in merging the queues.
*/
if (process_refs == 0)
if (process_refs == 0 || new_process_refs == 0)
return;
/*
* Merge in the direction of the lesser amount of work.
*/
new_process_refs = cfqq_process_refs(new_cfqq);
if (new_process_refs >= process_refs) {
cfqq->new_cfqq = new_cfqq;
atomic_add(process_refs, &new_cfqq->ref);
@ -3248,7 +3257,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_clear_cfqq_wait_request(cfqq);
__blk_run_queue(cfqd->queue);
} else {
blkiocg_update_idle_time_stats(
cfq_blkiocg_update_idle_time_stats(
&cfqq->cfqg->blkg);
cfq_mark_cfqq_must_dispatch(cfqq);
}
@ -3276,7 +3285,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
list_add_tail(&rq->queuelist, &cfqq->fifo);
cfq_add_rq_rb(rq);
blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
&cfqd->serving_group->blkg, rq_data_dir(rq),
rq_is_sync(rq));
cfq_rq_enqueued(cfqd, cfqq, rq);
@ -3364,9 +3373,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
WARN_ON(!cfqq->dispatched);
cfqd->rq_in_driver--;
cfqq->dispatched--;
blkiocg_update_completion_stats(&cfqq->cfqg->blkg, rq_start_time_ns(rq),
rq_io_start_time_ns(rq), rq_data_dir(rq),
rq_is_sync(rq));
cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
rq_start_time_ns(rq), rq_io_start_time_ns(rq),
rq_data_dir(rq), rq_is_sync(rq));
cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
@ -3730,7 +3739,7 @@ static void cfq_exit_queue(struct elevator_queue *e)
cfq_put_async_queues(cfqd);
cfq_release_cfq_groups(cfqd);
blkiocg_del_blkio_group(&cfqd->root_group.blkg);
cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
spin_unlock_irq(q->queue_lock);
@ -3798,8 +3807,8 @@ static void *cfq_init_queue(struct request_queue *q)
*/
atomic_set(&cfqg->ref, 1);
rcu_read_lock();
blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
0);
cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
(void *)cfqd, 0);
rcu_read_unlock();
#endif
/*

115
block/cfq.h Normal file
View File

@ -0,0 +1,115 @@
#ifndef _CFQ_H
#define _CFQ_H
#include "blk-cgroup.h"
#ifdef CONFIG_CFQ_GROUP_IOSCHED
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync)
{
blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync);
}
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue)
{
blkiocg_update_dequeue_stats(blkg, dequeue);
}
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
unsigned long time)
{
blkiocg_update_timeslice_used(blkg, time);
}
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg)
{
blkiocg_set_start_empty_time(blkg);
}
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync)
{
blkiocg_update_io_remove_stats(blkg, direction, sync);
}
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync)
{
blkiocg_update_io_merged_stats(blkg, direction, sync);
}
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
{
blkiocg_update_idle_time_stats(blkg);
}
static inline void
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
{
blkiocg_update_avg_queue_size_stats(blkg);
}
static inline void
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
{
blkiocg_update_set_idle_time_stats(blkg);
}
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync)
{
blkiocg_update_dispatch_stats(blkg, bytes, direction, sync);
}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
{
blkiocg_update_completion_stats(blkg, start_time, io_start_time,
direction, sync);
}
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev) {
blkiocg_add_blkio_group(blkcg, blkg, key, dev);
}
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
{
return blkiocg_del_blkio_group(blkg);
}
#else /* CFQ_GROUP_IOSCHED */
static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
struct blkio_group *curr_blkg, bool direction, bool sync) {}
static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
unsigned long dequeue) {}
static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
unsigned long time) {}
static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
bool direction, bool sync) {}
static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg)
{
}
static inline void
cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {}
static inline void
cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {}
static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
uint64_t bytes, bool direction, bool sync) {}
static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {}
static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
struct blkio_group *blkg, void *key, dev_t dev) {}
static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg)
{
return 0;
}
#endif /* CFQ_GROUP_IOSCHED */
#endif

View File

@ -781,7 +781,7 @@ static int __init erst_init(void)
status = acpi_get_table(ACPI_SIG_ERST, 0,
(struct acpi_table_header **)&erst_tab);
if (status == AE_NOT_FOUND) {
pr_err(ERST_PFX "Table is not found!\n");
pr_info(ERST_PFX "Table is not found!\n");
goto err;
} else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);

View File

@ -1053,6 +1053,16 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
return -ENODEV;
/*
* For some reason, MCP89 on MacBook 7,1 doesn't work with
* ahci, use ata_generic instead.
*/
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
pdev->subsystem_device == 0xcb89)
return -ENODEV;
/* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
* At the moment, we can only use the AHCI mode. Let the users know
* that for SAS drives they're out of luck.

View File

@ -32,6 +32,11 @@
* A generic parallel ATA driver using libata
*/
enum {
ATA_GEN_CLASS_MATCH = (1 << 0),
ATA_GEN_FORCE_DMA = (1 << 1),
};
/**
* generic_set_mode - mode setting
* @link: link to set up
@ -46,13 +51,17 @@
static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
{
struct ata_port *ap = link->ap;
const struct pci_device_id *id = ap->host->private_data;
int dma_enabled = 0;
struct ata_device *dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
/* Bits 5 and 6 indicate if DMA is active on master/slave */
if (ap->ioaddr.bmdma_addr)
if (id->driver_data & ATA_GEN_FORCE_DMA) {
dma_enabled = 0xff;
} else if (ap->ioaddr.bmdma_addr) {
/* Bits 5 and 6 indicate if DMA is active on master/slave */
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
}
if (pdev->vendor == PCI_VENDOR_ID_CENATEK)
dma_enabled = 0xFF;
@ -126,7 +135,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
const struct ata_port_info *ppi[] = { &info, NULL };
/* Don't use the generic entry unless instructed to do so */
if (id->driver_data == 1 && all_generic_ide == 0)
if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0)
return -ENODEV;
/* Devices that need care */
@ -155,7 +164,7 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id
return rc;
pcim_pin_device(dev);
}
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, NULL, 0);
return ata_pci_bmdma_init_one(dev, ppi, &generic_sht, (void *)id, 0);
}
static struct pci_device_id ata_generic[] = {
@ -167,7 +176,15 @@ static struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
{ PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_CENATEK,PCI_DEVICE_ID_CENATEK_IDE),
.driver_data = ATA_GEN_FORCE_DMA },
/*
* For some reason, MCP89 on MacBook 7,1 doesn't work with
* ahci, use ata_generic instead.
*/
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA,
PCI_VENDOR_ID_APPLE, 0xcb89,
.driver_data = ATA_GEN_FORCE_DMA },
#if !defined(CONFIG_PATA_TOSHIBA) && !defined(CONFIG_PATA_TOSHIBA_MODULE)
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
@ -175,7 +192,8 @@ static struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), },
#endif
/* Must come last. If you add entries adjust this table appropriately */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL),
.driver_data = ATA_GEN_CLASS_MATCH },
{ 0, },
};

View File

@ -324,6 +324,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = hpriv->mmio;
void __iomem *em_mmio = mmio + hpriv->em_loc;
const unsigned char *msg_buf = buf;
u32 em_ctl, msg;
unsigned long flags;
int i;
@ -343,8 +344,8 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
}
for (i = 0; i < size; i += 4) {
msg = buf[i] | buf[i + 1] << 8 |
buf[i + 2] << 16 | buf[i + 3] << 24;
msg = msg_buf[i] | msg_buf[i + 1] << 8 |
msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
writel(msg, em_mmio + i);
}

View File

@ -861,6 +861,7 @@ cciss_scsi_detect(int ctlr)
sh->n_io_port = 0; // I don't think we use these two...
sh->this_id = SELF_SCSI_ID;
sh->sg_tablesize = hba[ctlr]->maxsgentries;
sh->max_cmd_len = MAX_COMMAND_SIZE;
((struct cciss_scsi_adapter_data_t *)
hba[ctlr]->scsi_ctlr)->scsi_host = sh;

View File

@ -386,7 +386,7 @@ static void __devexit cpqarray_remove_one_eisa (int i)
}
/* pdev is NULL for eisa */
static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
{
struct request_queue *q;
int j;
@ -503,7 +503,7 @@ Enomem4:
return -1;
}
static int __init cpqarray_init_one( struct pci_dev *pdev,
static int __devinit cpqarray_init_one( struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int i;
@ -740,7 +740,7 @@ __setup("smart2=", cpqarray_setup);
/*
* Find an EISA controller's signature. Set up an hba if we find it.
*/
static int __init cpqarray_eisa_detect(void)
static int __devinit cpqarray_eisa_detect(void)
{
int i=0, j;
__u32 board_id;

View File

@ -1236,8 +1236,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Last part of the attaching process ... */
if (ns.conn >= C_CONNECTED &&
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
drbd_send_state(mdev);

View File

@ -1114,6 +1114,12 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
mdev->new_state_tmp.i = ns.i;
ns.i = os.i;
ns.disk = D_NEGOTIATING;
/* We expect to receive up-to-date UUIDs soon.
To avoid a race in receive_state, free p_uuid while
holding req_lock. I.e. atomic with the state change */
kfree(mdev->p_uuid);
mdev->p_uuid = NULL;
}
rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);

View File

@ -97,20 +97,18 @@ EXPORT_SYMBOL(agp_flush_chipset);
void agp_alloc_page_array(size_t size, struct agp_memory *mem)
{
mem->pages = NULL;
mem->vmalloc_flag = false;
if (size <= 2*PAGE_SIZE)
mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (mem->pages == NULL) {
mem->pages = vmalloc(size);
mem->vmalloc_flag = true;
}
}
EXPORT_SYMBOL(agp_alloc_page_array);
void agp_free_page_array(struct agp_memory *mem)
{
if (mem->vmalloc_flag) {
if (is_vmalloc_addr(mem->pages)) {
vfree(mem->pages);
} else {
kfree(mem->pages);

View File

@ -302,6 +302,12 @@ struct smi_info {
static int force_kipmid[SI_MAX_PARMS];
static int num_force_kipmid;
#ifdef CONFIG_PCI
static int pci_registered;
#endif
#ifdef CONFIG_PPC_OF
static int of_registered;
#endif
static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
static int num_max_busy_us;
@ -1018,7 +1024,7 @@ static int ipmi_thread(void *data)
else if (smi_result == SI_SM_IDLE)
schedule_timeout_interruptible(100);
else
schedule_timeout_interruptible(0);
schedule_timeout_interruptible(1);
}
return 0;
}
@ -3314,6 +3320,8 @@ static __devinit int init_ipmi_si(void)
rv = pci_register_driver(&ipmi_pci_driver);
if (rv)
printk(KERN_ERR PFX "Unable to register PCI driver: %d\n", rv);
else
pci_registered = 1;
#endif
#ifdef CONFIG_ACPI
@ -3330,6 +3338,7 @@ static __devinit int init_ipmi_si(void)
#ifdef CONFIG_PPC_OF
of_register_platform_driver(&ipmi_of_platform_driver);
of_registered = 1;
#endif
/* We prefer devices with interrupts, but in the case of a machine
@ -3383,11 +3392,13 @@ static __devinit int init_ipmi_si(void)
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
if (pci_registered)
pci_unregister_driver(&ipmi_pci_driver);
#endif
#ifdef CONFIG_PPC_OF
of_unregister_platform_driver(&ipmi_of_platform_driver);
if (of_registered)
of_unregister_platform_driver(&ipmi_of_platform_driver);
#endif
driver_unregister(&ipmi_driver.driver);
printk(KERN_WARNING PFX
@ -3478,14 +3489,16 @@ static __exit void cleanup_ipmi_si(void)
return;
#ifdef CONFIG_PCI
pci_unregister_driver(&ipmi_pci_driver);
if (pci_registered)
pci_unregister_driver(&ipmi_pci_driver);
#endif
#ifdef CONFIG_ACPI
pnp_unregister_driver(&ipmi_pnp_driver);
#endif
#ifdef CONFIG_PPC_OF
of_unregister_platform_driver(&ipmi_of_platform_driver);
if (of_registered)
of_unregister_platform_driver(&ipmi_of_platform_driver);
#endif
mutex_lock(&smi_infos_lock);

View File

@ -143,7 +143,7 @@ static inline int which_bucket(unsigned int duration)
* This allows us to calculate
* E(duration)|iowait
*/
if (nr_iowait_cpu())
if (nr_iowait_cpu(smp_processor_id()))
bucket = BUCKETS/2;
if (duration < 10)
@ -175,7 +175,7 @@ static inline int performance_multiplier(void)
mult += 2 * get_loadavg();
/* for IO wait tasks (per cpu!) we add 5x each */
mult += 10 * nr_iowait_cpu();
mult += 10 * nr_iowait_cpu(smp_processor_id());
return mult;
}

View File

@ -4257,10 +4257,12 @@ static int ppc440spe_adma_setup_irqs(struct ppc440spe_adma_device *adev,
struct ppc440spe_adma_chan *chan,
int *initcode)
{
struct of_device *ofdev;
struct device_node *np;
int ret;
np = container_of(adev->dev, struct of_device, dev)->node;
ofdev = container_of(adev->dev, struct of_device, dev);
np = ofdev->dev.of_node;
if (adev->id != PPC440SPE_XOR_ID) {
adev->err_irq = irq_of_parse_and_map(np, 1);
if (adev->err_irq == NO_IRQ) {

View File

@ -1958,20 +1958,20 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
u32 value = 0;
int err_sym = 0;
amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
if (boot_cpu_data.x86 == 0x10) {
/* F3x180[EccSymbolSize]=1, x8 symbols */
if (boot_cpu_data.x86 == 0x10 &&
boot_cpu_data.x86_model > 7 &&
value & BIT(25)) {
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors), 8);
return map_err_sym_to_channel(err_sym, 8);
} else {
err_sym = decode_syndrome(syndrome, x4_vectors,
ARRAY_SIZE(x4_vectors), 4);
return map_err_sym_to_channel(err_sym, 4);
amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
/* F3x180[EccSymbolSize]=1 => x8 symbols */
if (boot_cpu_data.x86_model > 7 &&
value & BIT(25)) {
err_sym = decode_syndrome(syndrome, x8_vectors,
ARRAY_SIZE(x8_vectors), 8);
return map_err_sym_to_channel(err_sym, 8);
}
}
err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), 4);
return map_err_sym_to_channel(err_sym, 4);
}
/*

View File

@ -1233,10 +1233,28 @@ static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
for (i = 0; i < MAX_SOCKET_BUSES; i++)
pcibios_scan_specific_bus(255-i);
}
pci_dev_put(pdev);
table++;
}
}
static unsigned i7core_pci_lastbus(void)
{
int last_bus = 0, bus;
struct pci_bus *b = NULL;
while ((b = pci_find_next_bus(b)) != NULL) {
bus = b->number;
debugf0("Found bus %d\n", bus);
if (bus > last_bus)
last_bus = bus;
}
debugf0("Last bus %d\n", last_bus);
return last_bus;
}
/*
* i7core_get_devices Find and perform 'get' operation on the MCH's
* device/functions we want to reference for this driver
@ -1244,7 +1262,8 @@ static void __init i7core_xeon_pci_fixup(struct pci_id_table *table)
* Need to 'get' device 16 func 1 and func 2
*/
int i7core_get_onedevice(struct pci_dev **prev, int devno,
struct pci_id_descr *dev_descr, unsigned n_devs)
struct pci_id_descr *dev_descr, unsigned n_devs,
unsigned last_bus)
{
struct i7core_dev *i7core_dev;
@ -1291,10 +1310,7 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
}
bus = pdev->bus->number;
if (bus == 0x3f)
socket = 0;
else
socket = 255 - bus;
socket = last_bus - bus;
i7core_dev = get_i7core_dev(socket);
if (!i7core_dev) {
@ -1358,17 +1374,21 @@ int i7core_get_onedevice(struct pci_dev **prev, int devno,
static int i7core_get_devices(struct pci_id_table *table)
{
int i, rc;
int i, rc, last_bus;
struct pci_dev *pdev = NULL;
struct pci_id_descr *dev_descr;
last_bus = i7core_pci_lastbus();
while (table && table->descr) {
dev_descr = table->descr;
for (i = 0; i < table->n_devs; i++) {
pdev = NULL;
do {
rc = i7core_get_onedevice(&pdev, i, &dev_descr[i],
table->n_devs);
rc = i7core_get_onedevice(&pdev, i,
&dev_descr[i],
table->n_devs,
last_bus);
if (rc < 0) {
if (i == 0) {
i = table->n_devs;
@ -1927,21 +1947,26 @@ fail:
* 0 for FOUND a device
* < 0 for error code
*/
static int probed = 0;
static int __devinit i7core_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int dev_idx = id->driver_data;
int rc;
struct i7core_dev *i7core_dev;
/* get the pci devices we want to reserve for our use */
mutex_lock(&i7core_edac_lock);
/*
* All memory controllers are allocated at the first pass.
*/
if (unlikely(dev_idx >= 1))
if (unlikely(probed >= 1)) {
mutex_unlock(&i7core_edac_lock);
return -EINVAL;
/* get the pci devices we want to reserve for our use */
mutex_lock(&i7core_edac_lock);
}
probed++;
rc = i7core_get_devices(pci_dev_table);
if (unlikely(rc < 0))
@ -2013,6 +2038,8 @@ static void __devexit i7core_remove(struct pci_dev *pdev)
i7core_dev->socket);
}
}
probed--;
mutex_unlock(&i7core_edac_lock);
}

View File

@ -1,5 +1,5 @@
#
# GPIO infrastructure and expanders
# platform-neutral GPIO infrastructure and expanders
#
config ARCH_WANT_OPTIONAL_GPIOLIB

View File

@ -1,4 +1,8 @@
# gpio support: dedicated expander chips, etc
# generic gpio support: dedicated expander chips, etc
#
# NOTE: platform-specific GPIO drivers don't belong in the
# drivers/gpio directory; put them with other platform setup
# code, IRQ controllers, board init, etc.
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG

View File

@ -146,7 +146,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_fb_helper_conn
cvt = 1;
break;
case 'R':
if (!cvt)
if (cvt)
rb = 1;
break;
case 'm':
@ -1024,11 +1024,18 @@ static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_conne
}
create_mode:
mode = drm_cvt_mode(fb_helper_conn->connector->dev, cmdline_mode->xres,
cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
if (cmdline_mode->cvt)
mode = drm_cvt_mode(fb_helper_conn->connector->dev,
cmdline_mode->xres, cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
else
mode = drm_gtf_mode(fb_helper_conn->connector->dev,
cmdline_mode->xres, cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->interlace,
cmdline_mode->margins);
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
list_add(&mode->head, &fb_helper_conn->connector->modes);
return mode;

View File

@ -208,7 +208,7 @@ static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo)
uint8_t ctl2;
if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) {
if (ctl2 & TFP410_CTL_2_HTPLG)
if (ctl2 & TFP410_CTL_2_RSEN)
ret = connector_status_connected;
else
ret = connector_status_disconnected;

View File

@ -620,7 +620,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
else if (IS_I915GM(dev))
sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;

View File

@ -128,9 +128,11 @@ static int i915_dma_cleanup(struct drm_device * dev)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
if (HAS_BSD(dev))
intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
@ -1229,7 +1231,7 @@ static void i915_warn_stolen(struct drm_device *dev)
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
@ -1410,6 +1412,10 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto cleanup_vga_client;
/* IIR "flip pending" bit means done if this bit is set */
if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
dev_priv->flip_pending_is_done = true;
intel_modeset_init(dev);
ret = drm_irq_install(dev);

View File

@ -596,6 +596,7 @@ typedef struct drm_i915_private {
struct drm_crtc *plane_to_crtc_mapping[2];
struct drm_crtc *pipe_to_crtc_mapping[2];
wait_queue_head_t pending_flip_queue;
bool flip_pending_is_done;
/* Reclocking support */
bool render_reclock_avail;
@ -1076,7 +1077,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
drm_i915_private_t *dev_priv = dev->dev_private; \
if (I915_VERBOSE) \
DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
intel_ring_begin(dev, &dev_priv->render_ring, 4*(n)); \
intel_ring_begin(dev, &dev_priv->render_ring, (n)); \
} while (0)

View File

@ -2239,7 +2239,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
page = read_cache_page_gfp(mapping, i,
mapping_gfp_mask (mapping) |
GFP_HIGHUSER |
__GFP_COLD |
gfpmask);
if (IS_ERR(page))

View File

@ -940,22 +940,30 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
if (dev_priv->flip_pending_is_done)
intel_finish_page_flip_plane(dev, 0);
}
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 1);
if (dev_priv->flip_pending_is_done)
intel_finish_page_flip_plane(dev, 1);
}
if (pipea_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 0);
intel_finish_page_flip(dev, 0);
if (!dev_priv->flip_pending_is_done)
intel_finish_page_flip(dev, 0);
}
if (pipeb_stats & vblank_status) {
vblank++;
drm_handle_vblank(dev, 1);
intel_finish_page_flip(dev, 1);
if (!dev_priv->flip_pending_is_done)
intel_finish_page_flip(dev, 1);
}
if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@ -1387,29 +1395,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->pipestat[1] = 0;
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
hotplug_en |= CRT_HOTPLUG_INT_EN;
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
i915_enable_irq(dev_priv, I915_DISPLAY_PORT_INTERRUPT);
dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
@ -1427,16 +1416,41 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
/* Disable pipe interrupt enables, clear pending pipe status */
I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
/* Clear pending interrupt status */
I915_WRITE(IIR, I915_READ(IIR));
I915_WRITE(IER, enable_mask);
I915_WRITE(IMR, dev_priv->irq_mask_reg);
I915_WRITE(IER, enable_mask);
(void) I915_READ(IER);
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
/* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
opregion_enable_asle(dev);
return 0;

View File

@ -178,6 +178,7 @@
#define MI_OVERLAY_OFF (0x2<<21)
#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
@ -368,6 +369,9 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
/* GEN6 interrupt control */
#define GEN6_RENDER_HWSTAM 0x2098
@ -1130,7 +1134,6 @@
#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)

View File

@ -234,14 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
else
tries = 1;
hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= CRT_HOTPLUG_MASK;
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
for (i = 0; i < tries ; i++) {
unsigned long timeout;
/* turn on the FORCE_DETECT */

View File

@ -2970,11 +2970,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
if (srwm < 0)
srwm = 1;
srwm &= 0x3f;
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
if (IS_I965GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
if (IS_I965GM(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
@ -4483,6 +4485,7 @@ static void intel_idle_update(struct work_struct *work)
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
int enabled = 0;
if (!i915_powersave)
return;
@ -4491,21 +4494,22 @@ static void intel_idle_update(struct work_struct *work)
i915_update_gfx_val(dev_priv);
if (IS_I945G(dev) || IS_I945GM(dev)) {
DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
enabled++;
intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->busy)
intel_decrease_pllclock(crtc);
}
if ((enabled == 1) && (IS_I945G(dev) || IS_I945GM(dev))) {
DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
}
mutex_unlock(&dev->struct_mutex);
}
@ -4601,10 +4605,10 @@ static void intel_unpin_work_fn(struct work_struct *__work)
kfree(work);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj_priv;
@ -4648,6 +4652,22 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
schedule_work(&work->work);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
do_intel_finish_page_flip(dev, crtc);
}
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
do_intel_finish_page_flip(dev, crtc);
}
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@ -4678,6 +4698,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
unsigned long flags;
int pipesrc_reg = (intel_crtc->pipe == 0) ? PIPEASRC : PIPEBSRC;
int ret, pipesrc;
u32 flip_mask;
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
@ -4731,15 +4752,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
if (intel_crtc->plane)
flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
else
flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
/* Wait for any previous flip to finish */
if (IS_GEN3(dev))
while (I915_READ(ISR) & flip_mask)
;
BEGIN_LP_RING(4);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
if (IS_I965G(dev)) {
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
pipesrc = I915_READ(pipesrc_reg);
OUT_RING(pipesrc & 0x0fff0fff);
} else {
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset);
OUT_RING(MI_NOOP);
}

View File

@ -135,6 +135,12 @@ intel_dp_link_required(struct drm_device *dev,
return pixel_clock * 3;
}
static int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
return (max_link_clock * max_lanes * 8) / 10;
}
static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@ -144,8 +150,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
> max_link_clock * max_lanes)
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
if (!IS_eDP(intel_encoder) &&
(intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes)))
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@ -506,7 +515,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
<= link_avail) {
@ -521,6 +530,18 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
}
if (IS_eDP(intel_encoder)) {
/* okay we failed just pick the highest */
dp_priv->lane_count = max_lane_count;
dp_priv->link_bw = bws[max_clock];
adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
"count %d clock %d\n",
dp_priv->link_bw, dp_priv->lane_count,
adjusted_mode->clock);
return true;
}
return false;
}

View File

@ -224,6 +224,7 @@ extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
extern void intel_setup_overlay(struct drm_device *dev);
extern void intel_cleanup_overlay(struct drm_device *dev);

View File

@ -983,8 +983,8 @@ void intel_lvds_init(struct drm_device *dev)
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
DRM_MODE_SCALE_ASPECT);
lvds_priv->fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC

View File

@ -94,7 +94,7 @@ render_ring_flush(struct drm_device *dev,
#if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
intel_ring_begin(dev, ring, 8);
intel_ring_begin(dev, ring, 2);
intel_ring_emit(dev, ring, cmd);
intel_ring_emit(dev, ring, MI_NOOP);
intel_ring_advance(dev, ring);
@ -358,7 +358,7 @@ bsd_ring_flush(struct drm_device *dev,
u32 invalidate_domains,
u32 flush_domains)
{
intel_ring_begin(dev, ring, 8);
intel_ring_begin(dev, ring, 2);
intel_ring_emit(dev, ring, MI_FLUSH);
intel_ring_emit(dev, ring, MI_NOOP);
intel_ring_advance(dev, ring);
@ -687,6 +687,7 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
*virt++ = MI_NOOP;
ring->tail = 0;
ring->space = ring->head - 8;
return 0;
}
@ -721,8 +722,9 @@ int intel_wait_ring_buffer(struct drm_device *dev,
}
void intel_ring_begin(struct drm_device *dev,
struct intel_ring_buffer *ring, int n)
struct intel_ring_buffer *ring, int num_dwords)
{
int n = 4*num_dwords;
if (unlikely(ring->tail + n > ring->size))
intel_wrap_ring_buffer(dev, ring);
if (unlikely(ring->space < n))
@ -752,7 +754,7 @@ void intel_fill_struct(struct drm_device *dev,
{
unsigned int *virt = ring->virtual_start + ring->tail;
BUG_ON((len&~(4-1)) != 0);
intel_ring_begin(dev, ring, len);
intel_ring_begin(dev, ring, len/4);
memcpy(virt, data, len);
ring->tail += len;
ring->tail &= ring->size - 1;

View File

@ -498,7 +498,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
pll->flags |= (RADEON_PLL_USE_FRAC_FB_DIV |
pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
RADEON_PLL_PREFER_CLOSEST_LOWER);
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */

View File

@ -607,7 +607,7 @@ static void evergreen_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
@ -1222,11 +1222,11 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
ps_thread_count = 128;
sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
sq_thread_resource_mgmt |= NUM_VS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
sq_thread_resource_mgmt |= NUM_GS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
sq_thread_resource_mgmt |= NUM_ES_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
sq_thread_resource_mgmt_2 = NUM_HS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
sq_thread_resource_mgmt_2 |= NUM_LS_THREADS(((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8;
sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt |= NUM_GS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt |= NUM_ES_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt_2 = NUM_HS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_thread_resource_mgmt_2 |= NUM_LS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
sq_stack_resource_mgmt_1 = NUM_PS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
sq_stack_resource_mgmt_1 |= NUM_VS_STACK_ENTRIES((rdev->config.evergreen.max_stack_entries * 1) / 6);
@ -1260,6 +1260,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(VGT_GS_VERTEX_REUSE, 16);
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
WREG32(VGT_OUT_DEALLOC_CNTL, 16);
WREG32(CB_PERF_CTR0_SEL_0, 0);
WREG32(CB_PERF_CTR0_SEL_1, 0);
WREG32(CB_PERF_CTR1_SEL_0, 0);
@ -1269,6 +1272,26 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
WREG32(CB_PERF_CTR3_SEL_0, 0);
WREG32(CB_PERF_CTR3_SEL_1, 0);
/* clear render buffer base addresses */
WREG32(CB_COLOR0_BASE, 0);
WREG32(CB_COLOR1_BASE, 0);
WREG32(CB_COLOR2_BASE, 0);
WREG32(CB_COLOR3_BASE, 0);
WREG32(CB_COLOR4_BASE, 0);
WREG32(CB_COLOR5_BASE, 0);
WREG32(CB_COLOR6_BASE, 0);
WREG32(CB_COLOR7_BASE, 0);
WREG32(CB_COLOR8_BASE, 0);
WREG32(CB_COLOR9_BASE, 0);
WREG32(CB_COLOR10_BASE, 0);
WREG32(CB_COLOR11_BASE, 0);
/* set the shader const cache sizes to 0 */
for (i = SQ_ALU_CONST_BUFFER_SIZE_PS_0; i < 0x28200; i += 4)
WREG32(i, 0);
for (i = SQ_ALU_CONST_BUFFER_SIZE_HS_0; i < 0x29000; i += 4)
WREG32(i, 0);
hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);

View File

@ -1197,7 +1197,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
@ -1209,7 +1209,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
ib[idx+1+(i*8)+4] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
ib[idx+1+(i*8)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = evergreen_check_texture_resource(p, idx+1+(i*8),
texture, mipmap);

View File

@ -713,6 +713,9 @@
#define SQ_GSVS_RING_OFFSET_2 0x28930
#define SQ_GSVS_RING_OFFSET_3 0x28934
#define SQ_ALU_CONST_BUFFER_SIZE_PS_0 0x28140
#define SQ_ALU_CONST_BUFFER_SIZE_HS_0 0x28f80
#define SQ_ALU_CONST_CACHE_PS_0 0x28940
#define SQ_ALU_CONST_CACHE_PS_1 0x28944
#define SQ_ALU_CONST_CACHE_PS_2 0x28948

View File

@ -1628,6 +1628,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_RGB332:
case RADEON_TXFORMAT_Y8:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_AI88:
case RADEON_TXFORMAT_ARGB1555:
@ -1639,12 +1640,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_TXFORMAT_LDUDV655:
case RADEON_TXFORMAT_DUDV88:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_ARGB8888:
case RADEON_TXFORMAT_RGBA8888:
case RADEON_TXFORMAT_SHADOW32:
case RADEON_TXFORMAT_LDUDUV8888:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case RADEON_TXFORMAT_DXT1:
track->textures[i].cpp = 1;
@ -2604,12 +2607,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
int surf_index = reg * 16;
int flags = 0;
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags = pitch / 16;
else
flags = pitch / 8;
if (rdev->family <= CHIP_RS200) {
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
@ -2633,6 +2630,20 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
if (tiling_flags & RADEON_TILING_SWAP_32BIT)
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
if (ASIC_IS_RN50(rdev))
pitch /= 16;
}
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags |= pitch / 16;
else
flags |= pitch / 8;
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
@ -3147,33 +3158,6 @@ static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
DRM_ERROR("compress format %d\n", t->compress_format);
}
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
size = w * h;
size *= track->textures[idx].cpp;
size += track->textures[idx].cube_info[face].offset;
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
static int r100_track_compress_size(int compress_format, int w, int h)
{
int block_width, block_height, block_bytes;
@ -3204,6 +3188,37 @@ static int r100_track_compress_size(int compress_format, int w, int h)
return sz;
}
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
unsigned compress_format = track->textures[idx].compress_format;
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
size += track->textures[idx].cube_info[face].offset;
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{

View File

@ -415,6 +415,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
/* 2D, 3D, CUBE */
switch (tmp) {
case 0:
case 3:
case 4:
case 5:
case 6:
case 7:
@ -450,6 +452,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_RGB332:
case R200_TXFORMAT_Y8:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_AI88:
case R200_TXFORMAT_ARGB1555:
@ -461,6 +464,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_DVDU88:
case R200_TXFORMAT_AVYU4444:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_ARGB8888:
case R200_TXFORMAT_RGBA8888:
@ -468,6 +472,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_TXFORMAT_BGR111110:
case R200_TXFORMAT_LDVDU8888:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R200_TXFORMAT_DXT1:
track->textures[i].cpp = 1;

View File

@ -881,6 +881,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_Y4X4:
case R300_TX_FORMAT_Z3Y3X2:
track->textures[i].cpp = 1;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_X16:
case R300_TX_FORMAT_Y8X8:
@ -892,6 +893,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_B8G8_B8G8:
case R300_TX_FORMAT_G8R8_G8B8:
track->textures[i].cpp = 2;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_Y16X16:
case R300_TX_FORMAT_Z11Y11X10:
@ -902,14 +904,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_TX_FORMAT_FL_I32:
case 0x1e:
track->textures[i].cpp = 4;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_W16Z16Y16X16:
case R300_TX_FORMAT_FL_R16G16B16A16:
case R300_TX_FORMAT_FL_I32A32:
track->textures[i].cpp = 8;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_FL_R32G32B32A32:
track->textures[i].cpp = 16;
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
break;
case R300_TX_FORMAT_DXT1:
track->textures[i].cpp = 1;

View File

@ -130,9 +130,14 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
break;
}
}
} else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
} else {
if (rdev->pm.current_power_state_index == 0)
rdev->pm.requested_power_state_index =
rdev->pm.num_power_states - 1;
else
rdev->pm.requested_power_state_index =
rdev->pm.current_power_state_index - 1;
}
}
rdev->pm.requested_clock_mode_index = 0;
/* don't use the power state if crtcs are active and no display flag is set */
@ -1097,7 +1102,7 @@ static void r600_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF);
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
@ -1219,8 +1224,10 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.visible_vram_size = rdev->mc.aper_size;
r600_vram_gtt_location(rdev, &rdev->mc);
if (rdev->flags & RADEON_IS_IGP)
if (rdev->flags & RADEON_IS_IGP) {
rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
}
radeon_update_bandwidth_info(rdev);
return 0;
}

View File

@ -177,6 +177,7 @@ void radeon_pm_resume(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
void rs690_pm_info(struct radeon_device *rdev);
/*
* Fences.
@ -619,7 +620,8 @@ enum radeon_dynpm_state {
DYNPM_STATE_DISABLED,
DYNPM_STATE_MINIMUM,
DYNPM_STATE_PAUSED,
DYNPM_STATE_ACTIVE
DYNPM_STATE_ACTIVE,
DYNPM_STATE_SUSPENDED,
};
enum radeon_dynpm_action {
DYNPM_ACTION_NONE,

View File

@ -780,6 +780,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_R423:
case CHIP_RV410:
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->set_memory_clock = NULL;
}
break;
case CHIP_RS400:
case CHIP_RS480:

View File

@ -48,6 +48,10 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
if (!(rdev->flags & RADEON_IS_IGP))
if (!radeon_card_posted(rdev))
return false;
rdev->bios = NULL;
vram_base = drm_get_resource_start(rdev->ddev, 0);
bios = ioremap(vram_base, size);

View File

@ -1411,6 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
rdev->mode_info.connector_table = CT_IMAC_G5_ISIGHT;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
if (ASIC_IS_RN50(rdev))
rdev->mode_info.connector_table = CT_RN50_POWER;
else
#endif
rdev->mode_info.connector_table = CT_GENERIC;
}
@ -1853,6 +1858,33 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
case CT_RN50_POWER:
DRM_INFO("Connector Table: %d (rn50-power)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
@ -1906,15 +1938,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
return false;
}
/* Some RV100 cards with 2 VGA ports show up with DVI+VGA */
if (dev->pdev->device == 0x5159 &&
dev->pdev->subsystem_vendor == 0x1002 &&
dev->pdev->subsystem_device == 0x013a) {
if (*legacy_connector == CONNECTOR_DVI_I_LEGACY)
*legacy_connector = CONNECTOR_CRT_LEGACY;
}
/* X300 card with extra non-existent DVI port */
if (dev->pdev->device == 0x5B60 &&
dev->pdev->subsystem_vendor == 0x17af &&
@ -3019,6 +3042,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
combios_write_ram_size(dev);
}
/* quirk for rs4xx HP nx6125 laptop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS480 &&
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x308b)
return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)

View File

@ -194,7 +194,7 @@ unpin:
fail:
drm_gem_object_unreference_unlocked(obj);
return 0;
return ret;
}
int radeon_crtc_cursor_move(struct drm_crtc *crtc,

View File

@ -779,6 +779,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
int radeon_resume_kms(struct drm_device *dev)
{
struct drm_connector *connector;
struct radeon_device *rdev = dev->dev_private;
if (rdev->powered_down)
@ -797,6 +798,12 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_resume(rdev);
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
/* turn on display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();

View File

@ -1072,6 +1072,8 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
if (!ASIC_IS_DCE4(rdev))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
@ -1079,8 +1081,6 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
}
if (!ASIC_IS_DCE4(rdev))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:

View File

@ -928,16 +928,14 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
if (ASIC_IS_R300(rdev)) {
gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1;
disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
}
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev))
disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
else
} else if (rdev->family != CHIP_R200)
disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
if (rdev->family == CHIP_R200)
else if (rdev->family == CHIP_R200)
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
if (rdev->family >= CHIP_R200)
disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL);
if (is_tv) {
uint32_t dac_cntl;
@ -1002,15 +1000,13 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
if (ASIC_IS_R300(rdev)) {
WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
}
} else if (rdev->family != CHIP_R200)
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
else if (rdev->family == CHIP_R200)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (rdev->family >= CHIP_R200)
WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl);
else
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
if (rdev->family == CHIP_R200)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (is_tv)
radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode);

View File

@ -206,6 +206,7 @@ enum radeon_connector_table {
CT_MINI_INTERNAL,
CT_IMAC_G5_ISIGHT,
CT_EMAC,
CT_RN50_POWER,
};
enum radeon_dvo_chip {

View File

@ -397,13 +397,20 @@ static ssize_t radeon_set_pm_method(struct device *dev,
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
mutex_unlock(&rdev->pm.mutex);
} else if (strncmp("profile", buf, strlen("profile")) == 0) {
bool flush_wq = false;
mutex_lock(&rdev->pm.mutex);
rdev->pm.pm_method = PM_METHOD_PROFILE;
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
flush_wq = true;
}
/* disable dynpm */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
rdev->pm.pm_method = PM_METHOD_PROFILE;
mutex_unlock(&rdev->pm.mutex);
if (flush_wq)
flush_workqueue(rdev->wq);
} else {
DRM_ERROR("invalid power method!\n");
goto fail;
@ -418,9 +425,18 @@ static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon
void radeon_pm_suspend(struct radeon_device *rdev)
{
bool flush_wq = false;
mutex_lock(&rdev->pm.mutex);
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
flush_wq = true;
}
mutex_unlock(&rdev->pm.mutex);
if (flush_wq)
flush_workqueue(rdev->wq);
}
void radeon_pm_resume(struct radeon_device *rdev)
@ -432,6 +448,12 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
radeon_pm_compute_clocks(rdev);
}
@ -486,6 +508,8 @@ int radeon_pm_init(struct radeon_device *rdev)
void radeon_pm_fini(struct radeon_device *rdev)
{
if (rdev->pm.num_power_states > 1) {
bool flush_wq = false;
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
rdev->pm.profile = PM_PROFILE_DEFAULT;
@ -493,13 +517,16 @@ void radeon_pm_fini(struct radeon_device *rdev)
radeon_pm_set_clocks(rdev);
} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
/* cancel work */
cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
cancel_delayed_work(&rdev->pm.dynpm_idle_work);
flush_wq = true;
/* reset default clocks */
rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
radeon_pm_set_clocks(rdev);
}
mutex_unlock(&rdev->pm.mutex);
if (flush_wq)
flush_workqueue(rdev->wq);
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
@ -720,12 +747,12 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
}
queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
/*

View File

@ -80,8 +80,8 @@ evergreen 0x9400
0x00028010 DB_RENDER_OVERRIDE2
0x00028028 DB_STENCIL_CLEAR
0x0002802C DB_DEPTH_CLEAR
0x00028034 PA_SC_SCREEN_SCISSOR_BR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
0x0002805C DB_DEPTH_SLICE
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
@ -460,8 +460,8 @@ evergreen 0x9400
0x00028844 SQ_PGM_RESOURCES_PS
0x00028848 SQ_PGM_RESOURCES_2_PS
0x0002884C SQ_PGM_EXPORTS_PS
0x0002885C SQ_PGM_RESOURCES_VS
0x00028860 SQ_PGM_RESOURCES_2_VS
0x00028860 SQ_PGM_RESOURCES_VS
0x00028864 SQ_PGM_RESOURCES_2_VS
0x00028878 SQ_PGM_RESOURCES_GS
0x0002887C SQ_PGM_RESOURCES_2_GS
0x00028890 SQ_PGM_RESOURCES_ES
@ -469,8 +469,8 @@ evergreen 0x9400
0x000288A8 SQ_PGM_RESOURCES_FS
0x000288BC SQ_PGM_RESOURCES_HS
0x000288C0 SQ_PGM_RESOURCES_2_HS
0x000288D0 SQ_PGM_RESOURCES_LS
0x000288D4 SQ_PGM_RESOURCES_2_LS
0x000288D4 SQ_PGM_RESOURCES_LS
0x000288D8 SQ_PGM_RESOURCES_2_LS
0x000288E8 SQ_LDS_ALLOC
0x000288EC SQ_LDS_ALLOC_PS
0x000288F0 SQ_VTX_SEMANTIC_CLEAR

View File

@ -79,7 +79,13 @@ void rs690_pm_info(struct radeon_device *rdev)
tmp.full = dfixed_const(100);
rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
if (info->info.usK8MemoryClock)
rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
else if (rdev->clock.default_mclk) {
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
} else
rdev->pm.igp_system_mclk.full = dfixed_const(400);
rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
break;
@ -87,34 +93,31 @@ void rs690_pm_info(struct radeon_device *rdev)
tmp.full = dfixed_const(100);
rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
if (info->info_v2.ulBootUpUMAClock)
rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
else if (rdev->clock.default_mclk)
rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
else
rdev->pm.igp_system_mclk.full = dfixed_const(66700);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
break;
default:
tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
rdev->pm.igp_system_mclk.full = dfixed_const(100);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
rdev->pm.igp_system_mclk.full = dfixed_const(200);
rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
break;
}
} else {
tmp.full = dfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
rdev->pm.igp_sideport_mclk.full = dfixed_const(333);
/* FIXME: system clock ? */
rdev->pm.igp_system_mclk.full = dfixed_const(100);
rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
rdev->pm.igp_ht_link_clk.full = dfixed_const(200);
rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
rdev->pm.igp_system_mclk.full = dfixed_const(200);
rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
rdev->pm.igp_ht_link_width.full = dfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
}
@ -228,10 +231,6 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
fixed20_12 a, b, c;
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
/* FIXME: detect IGP with sideport memory, i don't think there is any
* such product available
*/
bool sideport = false;
if (!crtc->base.enabled) {
/* FIXME: wouldn't it better to set priority mark to maximum */
@ -300,7 +299,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
/* Maximun bandwidth is the minimun bandwidth of all component */
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth;
if (sideport) {
if (rdev->mc.igp_sideport_enabled) {
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
rdev->pm.sideport_bandwidth.full)
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;

View File

@ -224,7 +224,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_FB_LOCATION, tmp);
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
if (rdev->flags & RADEON_IS_AGP) {
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);

View File

@ -667,7 +667,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
struct page *p = NULL;
int gfp_flags = 0;
int gfp_flags = GFP_USER;
int r;
/* set zero flag for page allocation if required */

View File

@ -9,19 +9,13 @@
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/i2c.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/z2_battery.h>
#define Z2_DEFAULT_NAME "Z2"

View File

@ -777,7 +777,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
read_rtc:
/* read RTC registers */
tmp = ds1307->read_block_data(ds1307->client, 0, 8, buf);
tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf);
if (tmp != 8) {
pr_debug("read error %d\n", tmp);
err = -EIO;
@ -862,7 +862,7 @@ read_rtc:
if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM)
tmp += 12;
i2c_smbus_write_byte_data(client,
DS1307_REG_HOUR,
ds1307->offset + DS1307_REG_HOUR,
bin2bcd(tmp));
}

View File

@ -930,6 +930,83 @@ static void cpm_uart_config_port(struct uart_port *port, int flags)
}
}
#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE)
/*
* Write a string to the serial port
* Note that this is called with interrupts already disabled
*/
static void cpm_uart_early_write(struct uart_cpm_port *pinfo,
const char *string, u_int count)
{
unsigned int i;
cbd_t __iomem *bdp, *bdbase;
unsigned char *cpm_outp_addr;
/* Get the address of the host memory buffer.
*/
bdp = pinfo->tx_cur;
bdbase = pinfo->tx_bd_base;
/*
* Now, do each character. This is not as bad as it looks
* since this is a holding FIFO and not a transmitting FIFO.
* We could add the complexity of filling the entire transmit
* buffer, but we would just wait longer between accesses......
*/
for (i = 0; i < count; i++, string++) {
/* Wait for transmitter fifo to empty.
* Ready indicates output is ready, and xmt is doing
* that, not that it is ready for us to send.
*/
while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
;
/* Send the character out.
* If the buffer address is in the CPM DPRAM, don't
* convert it.
*/
cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
pinfo);
*cpm_outp_addr = *string;
out_be16(&bdp->cbd_datlen, 1);
setbits16(&bdp->cbd_sc, BD_SC_READY);
if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
bdp = bdbase;
else
bdp++;
/* if a LF, also do CR... */
if (*string == 10) {
while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
;
cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr),
pinfo);
*cpm_outp_addr = 13;
out_be16(&bdp->cbd_datlen, 1);
setbits16(&bdp->cbd_sc, BD_SC_READY);
if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
bdp = bdbase;
else
bdp++;
}
}
/*
* Finally, Wait for transmitter & holding register to empty
* and restore the IER
*/
while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
;
pinfo->tx_cur = bdp;
}
#endif
#ifdef CONFIG_CONSOLE_POLL
/* Serial polling routines for writing and reading from the uart while
* in an interrupt or debug context.
@ -999,7 +1076,7 @@ static void cpm_put_poll_char(struct uart_port *port,
static char ch[2];
ch[0] = (char)c;
cpm_uart_early_write(pinfo->port.line, ch, 1);
cpm_uart_early_write(pinfo, ch, 1);
}
#endif /* CONFIG_CONSOLE_POLL */
@ -1130,9 +1207,6 @@ static void cpm_uart_console_write(struct console *co, const char *s,
u_int count)
{
struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
unsigned int i;
cbd_t __iomem *bdp, *bdbase;
unsigned char *cp;
unsigned long flags;
int nolock = oops_in_progress;
@ -1142,66 +1216,7 @@ static void cpm_uart_console_write(struct console *co, const char *s,
spin_lock_irqsave(&pinfo->port.lock, flags);
}
/* Get the address of the host memory buffer.
*/
bdp = pinfo->tx_cur;
bdbase = pinfo->tx_bd_base;
/*
* Now, do each character. This is not as bad as it looks
* since this is a holding FIFO and not a transmitting FIFO.
* We could add the complexity of filling the entire transmit
* buffer, but we would just wait longer between accesses......
*/
for (i = 0; i < count; i++, s++) {
/* Wait for transmitter fifo to empty.
* Ready indicates output is ready, and xmt is doing
* that, not that it is ready for us to send.
*/
while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
;
/* Send the character out.
* If the buffer address is in the CPM DPRAM, don't
* convert it.
*/
cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
*cp = *s;
out_be16(&bdp->cbd_datlen, 1);
setbits16(&bdp->cbd_sc, BD_SC_READY);
if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
bdp = bdbase;
else
bdp++;
/* if a LF, also do CR... */
if (*s == 10) {
while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
;
cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo);
*cp = 13;
out_be16(&bdp->cbd_datlen, 1);
setbits16(&bdp->cbd_sc, BD_SC_READY);
if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP)
bdp = bdbase;
else
bdp++;
}
}
/*
* Finally, Wait for transmitter & holding register to empty
* and restore the IER
*/
while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0)
;
pinfo->tx_cur = bdp;
cpm_uart_early_write(pinfo, s, count);
if (unlikely(nolock)) {
local_irq_restore(flags);

View File

@ -225,9 +225,9 @@ static struct bat_attribute *mesh_attrs[] = {
NULL,
};
static ssize_t transtable_local_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
static ssize_t transtable_local_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
{
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);
@ -235,9 +235,9 @@ static ssize_t transtable_local_read(struct kobject *kobj,
return hna_local_fill_buffer_text(net_dev, buff, count, off);
}
static ssize_t transtable_global_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
static ssize_t transtable_global_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
{
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);
@ -245,9 +245,9 @@ static ssize_t transtable_global_read(struct kobject *kobj,
return hna_global_fill_buffer_text(net_dev, buff, count, off);
}
static ssize_t originators_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
static ssize_t originators_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
{
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);
@ -255,9 +255,9 @@ static ssize_t originators_read(struct kobject *kobj,
return orig_fill_buffer_text(net_dev, buff, count, off);
}
static ssize_t vis_data_read(struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
static ssize_t vis_data_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buff, loff_t off, size_t count)
{
struct device *dev = to_dev(kobj->parent);
struct net_device *net_dev = to_net_dev(dev);

View File

@ -196,7 +196,7 @@ ssize_t bat_device_read(struct file *file, char __user *buf, size_t count,
kfree(device_packet);
if (error)
return error;
return -EFAULT;
return sizeof(struct icmp_packet);
}

View File

@ -824,9 +824,12 @@ static int pci9111_ai_do_cmd(struct comedi_device *dev,
plx9050_interrupt_control(dev_private->lcr_io_base, true, true,
false, true, true);
dev_private->scan_delay =
(async_cmd->scan_begin_arg / (async_cmd->convert_arg *
async_cmd->chanlist_len)) - 1;
if (async_cmd->scan_begin_src == TRIG_TIMER) {
dev_private->scan_delay =
(async_cmd->scan_begin_arg /
(async_cmd->convert_arg *
async_cmd->chanlist_len)) - 1;
}
break;

View File

@ -52,7 +52,6 @@ Please report success/failure with other different cards to
#include "8255.h"
#define PCI_VENDOR_ID_CB 0x1307 /* PCI vendor number of ComputerBoards */
#define N_BOARDS 10 /* Number of boards in cb_pcidda_boards */
#define EEPROM_SIZE 128 /* number of entries in eeprom */
#define MAX_AO_CHANNELS 8 /* maximum number of ao channels for supported boards */
@ -307,7 +306,7 @@ static int cb_pcidda_attach(struct comedi_device *dev,
continue;
}
}
for (index = 0; index < N_BOARDS; index++) {
for (index = 0; index < ARRAY_SIZE(cb_pcidda_boards); index++) {
if (cb_pcidda_boards[index].device_id ==
pcidev->device) {
goto found;

View File

@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/completion.h>
#include "osd.h"
#include "logging.h"
#include "vmbus_private.h"
@ -293,6 +294,25 @@ void FreeVmbusChannel(struct vmbus_channel *Channel)
Channel);
}
DECLARE_COMPLETION(hv_channel_ready);
/*
* Count initialized channels, and ensure all channels are ready when hv_vmbus
* module loading completes.
*/
static void count_hv_channel(void)
{
static int counter;
unsigned long flags;
spin_lock_irqsave(&gVmbusConnection.channel_lock, flags);
if (++counter == MAX_MSG_TYPES)
complete(&hv_channel_ready);
spin_unlock_irqrestore(&gVmbusConnection.channel_lock, flags);
}
/*
* VmbusChannelProcessOffer - Process the offer by creating a channel/device
* associated with this offer
@ -373,22 +393,21 @@ static void VmbusChannelProcessOffer(void *context)
* can cleanup properly
*/
newChannel->State = CHANNEL_OPEN_STATE;
cnt = 0;
while (cnt != MAX_MSG_TYPES) {
/* Open IC channels */
for (cnt = 0; cnt < MAX_MSG_TYPES; cnt++) {
if (memcmp(&newChannel->OfferMsg.Offer.InterfaceType,
&hv_cb_utils[cnt].data,
sizeof(struct hv_guid)) == 0) {
sizeof(struct hv_guid)) == 0 &&
VmbusChannelOpen(newChannel, 2 * PAGE_SIZE,
2 * PAGE_SIZE, NULL, 0,
hv_cb_utils[cnt].callback,
newChannel) == 0) {
hv_cb_utils[cnt].channel = newChannel;
DPRINT_INFO(VMBUS, "%s",
hv_cb_utils[cnt].log_msg);
if (VmbusChannelOpen(newChannel, 2 * PAGE_SIZE,
2 * PAGE_SIZE, NULL, 0,
hv_cb_utils[cnt].callback,
newChannel) == 0)
hv_cb_utils[cnt].channel = newChannel;
hv_cb_utils[cnt].log_msg);
count_hv_channel();
}
cnt++;
}
}
DPRINT_EXIT(VMBUS);

View File

@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/reboot.h>
#include <linux/dmi.h>
#include <linux/pci.h>
#include "logging.h"
#include "osd.h"
@ -251,10 +253,36 @@ static void heartbeat_onchannelcallback(void *context)
DPRINT_EXIT(VMBUS);
}
static const struct pci_device_id __initconst
hv_utils_pci_table[] __maybe_unused = {
{ PCI_DEVICE(0x1414, 0x5353) }, /* Hyper-V emulated VGA controller */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, hv_utils_pci_table);
static const struct dmi_system_id __initconst
hv_utils_dmi_table[] __maybe_unused = {
{
.ident = "Hyper-V",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
},
},
{ },
};
MODULE_DEVICE_TABLE(dmi, hv_utils_dmi_table);
static int __init init_hyperv_utils(void)
{
printk(KERN_INFO "Registering HyperV Utility Driver\n");
if (!dmi_check_system(hv_utils_dmi_table))
return -ENODEV;
hv_cb_utils[HV_SHUTDOWN_MSG].channel->OnChannelCallback =
&shutdown_onchannelcallback;
hv_cb_utils[HV_SHUTDOWN_MSG].callback = &shutdown_onchannelcallback;

View File

@ -74,4 +74,6 @@ int vmbus_child_driver_register(struct driver_context *driver_ctx);
void vmbus_child_driver_unregister(struct driver_context *driver_ctx);
void vmbus_get_interface(struct vmbus_channel_interface *interface);
extern struct completion hv_channel_ready;
#endif /* _VMBUS_H_ */

View File

@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <linux/completion.h>
#include "version_info.h"
#include "osd.h"
#include "logging.h"
@ -356,6 +357,8 @@ static int vmbus_bus_init(int (*drv_init)(struct hv_driver *drv))
vmbus_drv_obj->GetChannelOffers();
wait_for_completion(&hv_channel_ready);
cleanup:
DPRINT_EXIT(VMBUS_DRV);

Some files were not shown because too many files have changed in this diff Show More