dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
Highlights of this -next round:
- ivb fdi B/C fixes
- hsw sprite/plane offset fixes from Damien
- unified dp/hdmi encoder for hsw, finally external dp support on hsw
  (Paulo)
- kill-agp and some other prep work in the gtt code from Ben
- some fb handling fixes from Ville
- massive pile of patches to align hsw VGA with the spec and make it
  actually work (Paulo)
- pile of workarounds from Jesse, mostly for vlv, but also some other
  related platforms
- start of a dev_priv reorg, that thing grew out of bounds and chaotic
- small bits&pieces all over the place, down to better error handling for
  load-detect on gen2 (Chris, Jani, Mika, Zhenyu, ...)

On top of the previous pile (just copypasta):
- tons of hsw dp prep patches form Paulo
- round scheduled work items and timers to nearest second (Chris)
- some hw workarounds (Jesse&Damien)
- vlv dp support and related fixups (Vijay et al.)
- basic haswell dp support, not yet wired up for external ports (Paulo)
- edp support (Paulo)
- tons of refactorings to prepare for the above (Paulo)
- panel rework, unifiying code between lvds and edp panels (Jani)
- panel fitter scaling modes (Jani + Yuly Novikov)
- panel power improvements, should now work without the BIOS setting it up
- extracting some dp helpers from radeon/i915 and move them to
  drm_dp_helper.c
- randome pile of workarounds (Damien, Ben, ...)
- some cleanups for the register restore code for suspend/resume
- secure batchbuffer support, should enable tear-free blits on gen6+
  Chris)
- random smaller fixlets and cleanups.

* 'for-airlied' of git://people.freedesktop.org/~danvet/drm-intel: (231 commits)
  drm/i915: Restore physical HWS_PGA after resume
  drm/i915: Report amount of usable graphics memory in MiB
  drm/i915/i2c: Track users of GMBUS force-bit
  drm/i915: Allocate the proper size for contexts.
  drm/i915: Update load-detect failure paths for modeset-rework
  drm/i915: Clear unused fields of mode for framebuffer creation
  drm/i915: Always calculate 8xx WM values based on a 32-bpp framebuffer
  drm/i915: Fix sparse warnings in from AGP kill code
  drm/i915: Missed lock change with rps lock
  drm/i915: Move the remaining gtt code
  drm/i915: flush system agent TLBs on SNB
  drm/i915: Kill off now unused gen6+ AGP code
  drm/i915: Calculate correct stolen size for GEN7+
  drm/i915: Stop using AGP layer for GEN6+
  drm/i915: drop the double-OP_STOREDW usage in blt_ring_flush
  drm/i915: don't rewrite the GTT on resume v4
  drm/i915: protect RPS/RC6 related accesses (including PCU) with a new mutex
  drm/i915: put ring frequency and turbo setup into a work queue v5
  drm/i915: don't block resume on fb console resume v2
  drm/i915: extract l3_parity substruct from dev_priv
  ...
This commit is contained in:
Dave Airlie 2012-11-20 09:22:35 +10:00
commit 9fabd4eede
41 changed files with 5073 additions and 2922 deletions

View File

@ -62,12 +62,6 @@
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
@ -97,7 +91,6 @@
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define GFX_FLSH_CNTL_VLV 0x101008
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
@ -148,29 +141,6 @@
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
#define SNB_GTT_SIZE_0M (0 << 8)
#define SNB_GTT_SIZE_1M (1 << 8)
#define SNB_GTT_SIZE_2M (2 << 8)
#define SNB_GTT_SIZE_MASK (3 << 8)
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@ -219,66 +189,5 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
#endif

View File

@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
stolen_size = 0;
break;
}
} else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
stolen_size = MB(512);
break;
}
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
static unsigned int intel_gtt_total_entries(void)
{
int size;
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
else if (INTEL_GTT_GEN == 6) {
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
default:
case SNB_GTT_SIZE_0M:
printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
size = MB(0);
break;
case SNB_GTT_SIZE_1M:
size = MB(1);
break;
case SNB_GTT_SIZE_2M:
size = MB(2);
break;
}
return size/4;
} else {
else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
{
u8 __iomem *reg;
if (INTEL_GTT_GEN >= 6)
return true;
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
static bool gen6_check_flags(unsigned int flags)
{
return true;
}
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else {
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
}
static void gen6_cleanup(void)
{
}
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
static int i9xx_setup(void)
{
u32 reg_addr;
u32 reg_addr, gtt_addr;
int size = KB(512);
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
reg_addr &= 0xfff80000;
if (INTEL_GTT_GEN >= 7)
size = MB(2);
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
if (INTEL_GTT_GEN == 3) {
u32 gtt_addr;
switch (INTEL_GTT_GEN) {
case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
} else {
u32 gtt_offset;
switch (INTEL_GTT_GEN) {
case 5:
case 6:
case 7:
gtt_offset = MB(2);
break;
case 4:
default:
gtt_offset = KB(512);
break;
}
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
break;
case 5:
intel_private.gtt_bus_addr = reg_addr + MB(2);
break;
default:
intel_private.gtt_bus_addr = reg_addr + KB(512);
break;
}
if (needs_idle_maps())
@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver sandybridge_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = gen6_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
};
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
const struct intel_gtt *intel_gtt_get(void)
struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}

View File

@ -19,7 +19,7 @@ drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-usb-y := drm_usb.o
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o

View File

@ -39,6 +39,24 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
{
struct drm_connector *connector, *tmp;
struct list_head panel_list;
INIT_LIST_HEAD(&panel_list);
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
list_move_tail(&connector->head, &panel_list);
}
list_splice(&panel_list, &dev->mode_config.connector_list);
}
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);

View File

@ -37,7 +37,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
{
struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
int ret;
ret = (*algo_data->aux_ch)(adapter, mode,
write_byte, read_byte);
return ret;
@ -182,7 +182,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
{
(void) i2c_algo_dp_aux_address(adapter, 0, false);
(void) i2c_algo_dp_aux_stop(adapter, false);
}
static int
@ -198,7 +197,7 @@ int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
{
int error;
error = i2c_dp_aux_prepare_bus(adapter);
if (error)
return error;
@ -206,3 +205,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
return error;
}
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
/* Helpers for DP link training */
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
u8 lane_status;
int lane;
lane_align = dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
u8 lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(100);
else
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
udelay(400);
else
mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
switch (link_rate) {
case 162000:
default:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
switch (link_bw) {
case DP_LINK_BW_1_62:
default:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
case DP_LINK_BW_5_4:
return 540000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);

View File

@ -1068,7 +1068,7 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
int count=0, ret;
@ -1097,6 +1097,9 @@ static int gen6_drpc_info(struct seq_file *m)
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
mutex_unlock(&dev_priv->rps.hw_lock);
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@ -1148,6 +1151,12 @@ static int gen6_drpc_info(struct seq_file *m)
seq_printf(m, "RC6++ residency since boot: %u\n",
I915_READ(GEN6_GT_GFX_RC6pp));
seq_printf(m, "RC6 voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
seq_printf(m, "RC6+ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
seq_printf(m, "RC6++ voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
return 0;
}
@ -1273,7 +1282,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@ -1282,19 +1291,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
gpu_freq++) {
I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
GEN6_PCODE_READ_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode read of freq table timed out\n");
continue;
}
ia_freq = I915_READ(GEN6_PCODE_DATA);
ia_freq = gpu_freq;
sandybridge_pcode_read(dev_priv,
GEN6_PCODE_READ_MIN_FREQ_TABLE,
&ia_freq);
seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
}
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return 0;
}
@ -1398,15 +1402,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
if (dev_priv->pwrctx) {
if (dev_priv->ips.pwrctx) {
seq_printf(m, "power context ");
describe_obj(m, dev_priv->pwrctx);
describe_obj(m, dev_priv->ips.pwrctx);
seq_printf(m, "\n");
}
if (dev_priv->renderctx) {
if (dev_priv->ips.renderctx) {
seq_printf(m, "render context ");
describe_obj(m, dev_priv->renderctx);
describe_obj(m, dev_priv->ips.renderctx);
seq_printf(m, "\n");
}
@ -1711,13 +1715,13 @@ i915_max_freq_read(struct file *filp,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@ -1752,7 +1756,7 @@ i915_max_freq_write(struct file *filp,
DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@ -1762,7 +1766,7 @@ i915_max_freq_write(struct file *filp,
dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}
@ -1787,13 +1791,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
len = snprintf(buf, sizeof(buf),
"min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
if (len > sizeof(buf))
len = sizeof(buf);
@ -1826,7 +1830,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
ret = mutex_lock_interruptible(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
if (ret)
return ret;
@ -1836,7 +1840,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return cnt;
}

View File

@ -103,32 +103,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
I915_WRITE(HWS_PGA, addr);
}
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
0, PAGE_SIZE);
i915_write_hws_pga(dev);
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
dev_priv->dri1.counter++;
if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
@ -775,21 +749,21 @@ static int i915_emit_irq(struct drm_device * dev)
DRM_DEBUG_DRIVER("\n");
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 1;
dev_priv->dri1.counter++;
if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
return dev_priv->counter;
return dev_priv->dri1.counter;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@ -820,7 +794,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
return ret;
@ -1014,6 +988,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@ -1326,6 +1303,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@ -1491,19 +1470,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
ret = -EIO;
ret = i915_gem_gtt_init(dev);
if (ret)
goto put_bridge;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_gmch;
}
if (drm_core_check_feature(dev, DRIVER_MODESET))
i915_kick_out_firmware_fb(dev_priv);
@ -1590,18 +1559,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
/* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
i915_gem_load(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret)
goto out_gem_unload;
}
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@ -1621,6 +1582,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@ -1678,7 +1641,7 @@ out_mtrrfree:
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
intel_gmch_remove();
i915_gem_gtt_fini(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@ -1721,6 +1684,7 @@ int i915_driver_unload(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
cancel_work_sync(&dev_priv->console_resume_work);
/*
* free the memory space allocated for the child device

View File

@ -423,19 +423,23 @@ void intel_detect_pch(struct drm_device *dev)
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
return error;
}
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
intel_modeset_disable(dev);
drm_irq_uninstall(dev);
@ -526,17 +532,23 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
}
static int i915_drm_thaw(struct drm_device *dev)
void intel_console_resume(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
console_resume_work);
struct drm_device *dev = dev_priv->dev;
console_lock();
intel_fbdev_set_suspend(dev, 0);
console_unlock();
}
static int __i915_drm_thaw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
i915_restore_state(dev);
intel_opregion_setup(dev);
@ -553,7 +565,6 @@ static int i915_drm_thaw(struct drm_device *dev)
intel_modeset_init_hw(dev);
intel_modeset_setup_hw_state(dev);
drm_mode_config_reset(dev);
drm_irq_install(dev);
}
@ -561,14 +572,41 @@ static int i915_drm_thaw(struct drm_device *dev)
dev_priv->modeset_on_lid = 0;
console_lock();
intel_fbdev_set_suspend(dev, 0);
console_unlock();
/*
* The console lock can be pretty contented on resume due
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
if (console_trylock()) {
intel_fbdev_set_suspend(dev, 0);
console_unlock();
} else {
schedule_work(&dev_priv->console_resume_work);
}
return error;
}
static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
intel_gt_reset(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
__i915_drm_thaw(dev);
return error;
}
int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@ -579,7 +617,20 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
ret = i915_drm_thaw(dev);
intel_gt_reset(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
* earlier) need this since the BIOS might clear all our scratch PTEs.
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) &&
!dev_priv->opregion.header) {
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
}
ret = __i915_drm_thaw(dev);
if (ret)
return ret;
@ -1140,12 +1191,40 @@ static bool IS_DISPLAYREG(u32 reg)
if (reg == GEN6_GDRST)
return false;
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
return true;
}
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@ -1177,6 +1256,8 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \

View File

@ -58,6 +58,14 @@ enum pipe {
};
#define pipe_name(p) ((p) + 'A')
enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP = 0xF,
};
#define transcoder_name(t) ((t) + 'A')
enum plane {
PLANE_A = 0,
PLANE_B,
@ -93,6 +101,12 @@ struct intel_pch_pll {
};
#define I915_NUM_PLLS 2
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
/* Interface history:
*
* 1.1: Original.
@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
struct drm_i915_gem_object *cur_obj;
};
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
int start;
int size;
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
@ -251,6 +257,7 @@ struct drm_i915_display_funcs {
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@ -263,7 +270,6 @@ struct drm_i915_display_funcs {
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
@ -338,6 +344,7 @@ struct intel_device_info {
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
struct drm_device *dev;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
@ -383,149 +390,14 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
u32 force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
typedef struct drm_i915_private {
struct drm_device *dev;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* DPIO indirect register protection */
spinlock_t dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
struct notifier_block lid_notifier;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
@ -676,10 +548,205 @@ typedef struct drm_i915_private {
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
};
struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
struct delayed_work delayed_resume_work;
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
*/
struct mutex hw_lock;
};
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
int c_m;
int r_t;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
};
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
uint32_t counter;
};
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
};
typedef struct drm_i915_private {
struct drm_device *dev;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* DPIO indirect register protection */
spinlock_t dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
struct {
/** Bridge to intel-gtt-ko */
const struct intel_gtt *gtt;
struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
@ -706,8 +773,6 @@ typedef struct drm_i915_private {
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
u32 *l3_remap_info;
struct shrinker inactive_shrinker;
/**
@ -785,19 +850,6 @@ typedef struct drm_i915_private {
u32 object_count;
} mm;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
} dri1;
/* Kernel Modesetting */
struct sdvo_device_mapping sdvo_mappings[2];
@ -811,6 +863,7 @@ typedef struct drm_i915_private {
wait_queue_head_t pending_flip_queue;
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
/* Reclocking support */
bool render_reclock_avail;
@ -820,46 +873,17 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
struct drm_connector *int_edp_connector;
bool mchbar_need_disable;
/* gen6+ rps state */
struct {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
struct intel_l3_parity l3_parity;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
struct {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
int c_m;
int r_t;
} ips;
struct intel_ilk_power_mgmt ips;
enum no_fbc_reason no_fbc_reason;
@ -871,14 +895,25 @@ typedef struct drm_i915_private {
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
/*
* The console may be contended at resume, but we don't
* want it to block on it.
*/
struct work_struct console_resume_work;
struct backlight_device *backlight;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
struct work_struct parity_error_work;
bool hw_contexts_disabled;
uint32_t hw_context_size;
struct i915_suspend_saved_registers regfile;
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
} drm_i915_private_t;
/* Iterate over initialised rings */
@ -1120,6 +1155,9 @@ struct drm_i915_file_private {
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
@ -1250,6 +1288,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
@ -1257,6 +1296,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
void i915_error_state_free(struct kref *error_ref);
@ -1499,6 +1539,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
extern inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@ -1628,6 +1676,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);

View File

@ -845,12 +845,12 @@ out:
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
}
}
if (needs_clflush_after)
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
return ret;
}
@ -2022,12 +2022,12 @@ i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer,
jiffies +
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
intel_mark_busy(dev_priv->dev);
}
}
@ -2218,7 +2218,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
if (!mutex_trylock(&dev->struct_mutex)) {
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
return;
}
@ -2236,7 +2237,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
}
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
intel_mark_idle(dev);
@ -3059,7 +3061,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
i915_gem_clflush_object(obj);
intel_gtt_chipset_flush();
i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@ -3832,7 +3834,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
if (!IS_IVYBRIDGE(dev))
return;
if (!dev_priv->mm.l3_remap_info)
if (!dev_priv->l3_parity.remap_info)
return;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@ -3841,12 +3843,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->mm.l3_remap_info[i/4])
if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
}
/* Make sure all the writes land before disabling dop clock gating */
@ -3876,68 +3878,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
static bool
intel_enable_blt(struct drm_device *dev)
{
@ -3960,7 +3900,7 @@ i915_gem_init_hw(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
if (!intel_enable_gtt())
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@ -4295,7 +4235,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_cache_release(page);
}
}
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
obj->phys_obj->cur_obj = NULL;
obj->phys_obj = NULL;
@ -4382,7 +4322,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
return -EFAULT;
}
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
return 0;
}

View File

@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
struct i915_hw_context *ctx;
int ret, id;
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);

View File

@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
}
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@ -800,6 +800,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
u32 flags;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@ -811,6 +812,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
return ret;
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (!file->is_master || !capable(CAP_SYS_ADMIN))
return -EPERM;
flags |= I915_DISPATCH_SECURE;
}
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
@ -983,6 +992,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but let's be paranoid and do it
* unconditionally for now. */
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
if (ret)
goto err;
@ -1028,7 +1044,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
trace_i915_gem_ring_dispatch(ring, seqno);
trace_i915_gem_ring_dispatch(ring, seqno, flags);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
@ -1040,12 +1056,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len);
exec_start, exec_len,
flags);
if (ret)
goto err;
}
} else {
ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
flags);
if (ret)
goto err;
}

View File

@ -28,19 +28,67 @@
#include "i915_trace.h"
#include "intel_drv.h"
typedef uint32_t gtt_pte_t;
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static inline gtt_pte_t pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
return pte;
}
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
uint32_t *pt_vaddr;
uint32_t scratch_pte;
gtt_pte_t *pt_vaddr;
gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return ret;
ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
enum i915_cache_level cache_level)
{
uint32_t *pt_vaddr, pte;
gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pte = GEN6_PTE_ADDR_ENCODE(page_addr);
pt_vaddr[j] = pte | pte_flags;
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
cache_level);
/* grab the next page */
if (++m == segment_len) {
@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
uint32_t pte_flags = GEN6_PTE_VALID;
switch (cache_level) {
case I915_CACHE_LLC_MLC:
pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
pte_flags);
cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
obj->base.size >> PAGE_SHIFT);
}
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
void i915_gem_init_ppgtt(struct drm_device *dev)
{
switch (cache_level) {
case I915_CACHE_LLC_MLC:
if (INTEL_INFO(dev)->gen >= 6)
return AGP_USER_CACHED_MEMORY_LLC_MLC;
/* Older chipsets do not have this extra level of CPU
* cacheing, so fallthrough and request the PTE simply
* as cached.
*/
case I915_CACHE_LLC:
return AGP_USER_CACHED_MEMORY;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
if (!dev_priv->mm.aliasing_ppgtt)
return;
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
@ -288,13 +360,38 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
dev_priv->mm.interruptible = interruptible;
}
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
volatile void __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
memset_io(gtt_base, scratch_pte, num_entries * sizeof(scratch_pte));
readl(gtt_base);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@ -302,7 +399,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
}
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@ -318,21 +415,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
return 0;
}
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
for_each_sg(st->sgl, sg, st->nents, unused) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
i++;
}
}
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
}
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
i915_ggtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@ -390,5 +542,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page;
dma_addr_t dma_addr;
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
get_page(page);
set_pages_uc(page, 1);
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dma_addr))
return -EINVAL;
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
return 0;
}
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->mm.gtt->scratch_page);
__free_page(dev_priv->mm.gtt->scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
return stolen_decoder[snb_gmch_ctl] << 20;
}
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
u16 snb_gmch_ctl;
u32 tmp;
int ret;
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
intel_gmch_remove();
return -ENODEV;
}
return 0;
}
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_0, &tmp);
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = (tmp & PCI_BASE_ADDRESS_MEM_MASK) + (2<<20);
pci_read_config_dword(dev->pdev, PCI_BASE_ADDRESS_2, &tmp);
dev_priv->mm.gtt->gma_bus_addr = tmp & PCI_BASE_ADDRESS_MEM_MASK;
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
else
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
}
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
goto err_out;
}
dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) {
DRM_ERROR("Failed to map the gtt page table\n");
teardown_scratch_page(dev);
ret = -ENOMEM;
goto err_out;
}
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
return 0;
err_out:
kfree(dev_priv->mm.gtt);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
return ret;
}
void i915_gem_gtt_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->mm.gtt->gtt);
teardown_scratch_page(dev);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
kfree(dev_priv->mm.gtt);
}

View File

@ -122,7 +122,10 @@ static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
/* Called from drm generic code, passed a 'crtc', which
@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int vbl_start, vbl_end, htotal, vtotal;
bool in_vbl = true;
int ret = 0;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
}
/* Get vtotal. */
vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
if (INTEL_INFO(dev)->gen >= 4) {
/* No obvious pixelcount register. Only query vertical
@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
*/
position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
*vpos = position / htotal;
*hpos = position - (*vpos * htotal);
}
/* Query vblank area. */
vbl = I915_READ(VBLANK(pipe));
vbl = I915_READ(VBLANK(cpu_transcoder));
/* Test position against vblank region. */
vbl_start = vbl & 0x1fff;
@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
jiffies +
msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
gen6_set_rps(dev_priv->dev, new_delay);
}
mutex_unlock(&dev_priv->dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
}
@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
parity_error_work);
l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
queue_work(dev_priv->wq, &dev_priv->parity_error_work);
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void snb_gt_irq_handler(struct drm_device *dev,
@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
if (pch_iir & SDE_HOTPLUG_MASK)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
I915_READ(FDI_RX_IIR(pipe)));
}
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(dev, &dev_priv->ring[VCS]);
}
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
atomic_inc(&dev_priv->irq_received);
@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
if (HAS_PCH_CPT(dev))
hotplug_mask = SDE_HOTPLUG_MASK_CPT;
else
hotplug_mask = SDE_HOTPLUG_MASK;
ret = IRQ_HANDLED;
if (IS_GEN5(dev))
@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
@ -1751,7 +1751,7 @@ void i915_hangcheck_elapsed(unsigned long data)
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
/* drm_dma.h hooks
@ -1956,6 +1956,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@ -1995,21 +1996,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
GT_GEN6_BLT_CS_ERROR_INTERRUPT |
GT_GEN6_BLT_USER_INTERRUPT |
GT_GEN6_BSD_USER_INTERRUPT |
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
GT_PIPE_NOTIFY |
GT_RENDER_CS_ERROR_INTERRUPT |
GT_SYNC_STATUS |
GT_USER_INTERRUPT);
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
/* ack & enable invalid PTE error interrupts */
@ -2019,7 +2011,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
#endif
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@ -2027,15 +2018,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
@ -2129,7 +2119,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
return 0;
}
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2307,7 +2297,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
return 0;
}
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2545,7 +2535,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
return 0;
}
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@ -2691,7 +2681,7 @@ void intel_irq_init(struct drm_device *dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */

View File

@ -26,6 +26,7 @@
#define _I915_REG_H_
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
@ -40,6 +41,14 @@
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
#define IVB_GMCH_GMS_SHIFT 4
#define IVB_GMCH_GMS_MASK 0xf
/* PCI config space */
@ -105,23 +114,6 @@
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PDE_VALID (1 << 0)
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
#define GEN6_PTE_GFDT (1 << 3)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@ -241,11 +233,18 @@
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_INVALIDATE_TLB (1<<18)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_PPGTT_HSW (1<<8)
#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@ -369,6 +368,7 @@
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
@ -384,6 +384,9 @@
#define DPIO_FASTCLK_DISABLE 0x8100
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
/*
* Fence registers
*/
@ -521,6 +524,7 @@
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define MI_MODE 0x0209c
@ -547,6 +551,8 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE 0x182060
#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
@ -661,6 +667,7 @@
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@ -670,6 +677,8 @@
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
@ -1559,14 +1568,14 @@
#define _VSYNCSHIFT_B 0x61028
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
/* VGA port control */
#define ADPA 0x61100
@ -2641,6 +2650,7 @@
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
@ -2711,7 +2721,7 @@
#define PIPE_12BPC (3 << 5)
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@ -2998,12 +3008,19 @@
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
#define DISPPLANE_15_16BPP (0x4<<26)
#define DISPPLANE_16BPP (0x5<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
#define DISPPLANE_32BPP (0x7<<26)
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_BGRA555 (0x3<<26)
#define DISPPLANE_BGRX555 (0x4<<26)
#define DISPPLANE_BGRX565 (0x5<<26)
#define DISPPLANE_BGRX888 (0x6<<26)
#define DISPPLANE_BGRA888 (0x7<<26)
#define DISPPLANE_RGBX101010 (0x8<<26)
#define DISPPLANE_RGBA101010 (0x9<<26)
#define DISPPLANE_BGRX101010 (0xa<<26)
#define DISPPLANE_RGBX161616 (0xc<<26)
#define DISPPLANE_RGBX888 (0xe<<26)
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
@ -3024,6 +3041,8 @@
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@ -3033,6 +3052,8 @@
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
@ -3078,6 +3099,8 @@
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
#define _DSPBOFFSET 0x711A4
#define _DSPBSURFLIVE 0x711AC
/* Sprite A control */
#define _DVSACNTR 0x72180
@ -3143,6 +3166,7 @@
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
@ -3177,6 +3201,8 @@
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
@ -3197,6 +3223,8 @@
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
#define _SPRB_OFFSET 0x712a4
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
@ -3210,8 +3238,10 @@
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
/* VBIOS regs */
#define VGACNTRL 0x71400
@ -3246,12 +3276,6 @@
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
#define PCH_DSPCLK_GATE_D 0x42020
# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@ -3301,14 +3325,14 @@
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
@ -3423,15 +3447,13 @@
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define IVB_VRHUNIT_CLK_GATE (1<<28)
#define ILK_DPARB_CLK_GATE (1<<5)
#define ILK_DPFD_CLK_GATE (1<<7)
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
#define ILK_CLK_FBC (1<<7)
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
#define ILK_DSPCLK_GATE_D 0x42020
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@ -3447,14 +3469,21 @@
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_L3AGDIS (1<<19)
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
/* PCH */
/* south display engine interrupt: IBX */
@ -3686,7 +3715,7 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define VLV_VIDEO_DIP_CTL_A 0x60220
#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
@ -3795,16 +3824,22 @@
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
@ -3901,16 +3936,21 @@
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
@ -4003,6 +4043,11 @@
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
#define PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
#define PANEL_POWER_PORT_LVDS (0 << 30)
#define PANEL_POWER_PORT_DP_A (1 << 30)
#define PANEL_POWER_PORT_DP_C (2 << 30)
#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@ -4050,7 +4095,7 @@
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
@ -4108,6 +4153,8 @@
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
@ -4220,6 +4267,10 @@
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
@ -4251,6 +4302,15 @@
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@ -4380,33 +4440,39 @@
#define HSW_PWR_WELL_CTL6 0x45414
/* Per-pipe DDI Function Control */
#define PIPE_DDI_FUNC_CTL_A 0x60400
#define PIPE_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
#define TRANS_DDI_FUNC_CTL_C 0x62400
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
TRANS_DDI_FUNC_CTL_B)
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_MASK (7<<20)
#define PIPE_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_6 (2<<20)
#define PIPE_DDI_BPC_12 (3<<20)
#define PIPE_DDI_PVSYNC (1<<17)
#define PIPE_DDI_PHSYNC (1<<16)
#define PIPE_DDI_BFI_ENABLE (1<<4)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
#define TRANS_DDI_PORT_MASK (7<<28)
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
#define TRANS_DDI_BPC_MASK (7<<20)
#define TRANS_DDI_BPC_8 (0<<20)
#define TRANS_DDI_BPC_10 (1<<20)
#define TRANS_DDI_BPC_6 (2<<20)
#define TRANS_DDI_BPC_12 (3<<20)
#define TRANS_DDI_PVSYNC (1<<17)
#define TRANS_DDI_PHSYNC (1<<16)
#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_BFI_ENABLE (1<<4)
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
@ -4420,12 +4486,16 @@
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
/* DDI Buffer Control */
@ -4490,8 +4560,8 @@
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
#define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
@ -4500,7 +4570,7 @@
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@ -4517,21 +4587,36 @@
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* Transcoder clock selection */
#define TRANS_CLK_SEL_A 0x46140
#define TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
_TRANSB_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
#define TRANS_MSA_10_BPC (2<<5)
#define TRANS_MSA_12_BPC (3<<5)
#define TRANS_MSA_16_BPC (4<<5)
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270

File diff suppressed because it is too large Load Diff

View File

@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
if (ret)
return ret;
if (!dev_priv->mm.l3_remap_info) {
if (!dev_priv->l3_parity.remap_info) {
temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!temp) {
mutex_unlock(&drm_dev->struct_mutex);
@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
* at this point it is left as a TODO.
*/
if (temp)
dev_priv->mm.l3_remap_info = temp;
dev_priv->l3_parity.remap_info = temp;
memcpy(dev_priv->mm.l3_remap_info + (offset/4),
memcpy(dev_priv->l3_parity.remap_info + (offset/4),
buf + (offset/4),
count);
@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
dev_priv->rps.max_delay = val;
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return count;
}
@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
mutex_lock(&dev_priv->rps.hw_lock);
ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return snprintf(buf, PAGE_SIZE, "%d", ret);
}
@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val /= GT_FREQUENCY_MULTIPLIER;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
mutex_lock(&dev_priv->rps.hw_lock);
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
hw_max = (rp_state_cap & 0xff);
hw_min = ((rp_state_cap & 0xff0000) >> 16);
if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return -EINVAL;
}
@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
dev_priv->rps.min_delay = val;
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
return count;

View File

@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
);
TRACE_EVENT(i915_gem_ring_dispatch,
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
TP_ARGS(ring, seqno),
TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
TP_ARGS(ring, seqno, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
__field(u32, flags)
),
TP_fast_assign(
__entry->dev = ring->dev->primary->index;
__entry->ring = ring->id;
__entry->seqno = seqno;
__entry->flags = flags;
i915_trace_irq_get(ring, seqno);
),
TP_printk("dev=%u, ring=%u, seqno=%u",
__entry->dev, __entry->ring, __entry->seqno)
TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
__entry->dev, __entry->ring, __entry->seqno, __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,

View File

@ -221,14 +221,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
adpa = ADPA_HOTPLUG_BITS;
if (HAS_PCH_SPLIT(dev))
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_CPT(dev))
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
@ -401,12 +407,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret;
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
return intel_connector_update_modes(connector, edid);
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
}
static bool intel_crt_detect_ddc(struct drm_connector *connector)
@ -644,10 +654,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
}
/*
@ -749,7 +771,10 @@ void intel_crt_init(struct drm_device *dev)
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_hw_state = intel_crt_get_hw_state;
if (IS_HASWELL(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@ -766,18 +791,6 @@ void intel_crt_init(struct drm_device *dev)
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -94,6 +94,7 @@
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
@ -163,6 +164,11 @@ struct intel_encoder {
int crtc_mask;
};
struct intel_panel {
struct drm_display_mode *fixed_mode;
int fitting_mode;
};
struct intel_connector {
struct drm_connector base;
/*
@ -179,12 +185,19 @@ struct intel_connector {
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
/* Panel info for eDP and LVDS */
struct intel_panel panel;
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid;
};
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
@ -212,12 +225,14 @@ struct intel_crtc {
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
};
struct intel_plane {
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
@ -317,10 +332,8 @@ struct dip_infoframe {
} __attribute__((packed));
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
@ -331,18 +344,15 @@ struct intel_hdmi {
struct drm_display_mode *adjusted_mode);
};
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
@ -357,11 +367,16 @@ struct intel_dp {
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
struct intel_connector *attached_connector;
};
struct intel_digital_port {
struct intel_encoder base;
enum port port;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
static inline struct drm_crtc *
@ -395,6 +410,8 @@ struct intel_fbc_work {
int interval;
};
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@ -405,7 +422,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
@ -418,10 +440,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
@ -431,6 +470,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
/* intel_panel.c */
extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
@ -439,7 +482,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
@ -473,6 +516,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
return to_intel_connector(connector)->encoder;
}
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
}
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
return container_of(intel_dp, struct intel_digital_port, dp);
}
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@ -481,6 +549,9 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
@ -550,6 +621,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned int pitch);
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@ -573,12 +648,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
extern void intel_enable_ddi(struct intel_encoder *encoder);
extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
extern void intel_ddi_pll_init(struct drm_device *dev);
extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
#endif /* __INTEL_DRV_H__ */

View File

@ -36,10 +36,15 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
}
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->hdmi;
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_hdmi, base);
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@ -754,16 +760,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
@ -786,6 +792,9 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
@ -814,6 +823,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
intel_encoder->type = INTEL_OUTPUT_HDMI;
}
return status;
@ -859,6 +869,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
@ -898,8 +910,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
return -EINVAL;
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
if (intel_dig_port->base.base.crtc) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
@ -914,12 +926,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
@ -951,43 +957,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
enum port port = intel_dig_port->port;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_hdmi);
return;
}
intel_encoder = &intel_hdmi->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@ -1007,8 +994,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
BUG();
}
intel_hdmi->sdvox_reg = sdvox_reg;
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
@ -1026,21 +1011,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
if (IS_HASWELL(dev)) {
intel_encoder->enable = intel_enable_ddi;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs_hsw);
} else {
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs);
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
if (IS_HASWELL(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_hdmi_add_properties(intel_hdmi, connector);
@ -1056,3 +1030,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
if (!intel_dig_port)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
}
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_dig_port->port = port;
intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
intel_dig_port->dp.output_reg = 0;
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}

View File

@ -432,7 +432,7 @@ timeout:
I915_WRITE(GMBUS0 + reg_offset, 0);
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = true;
bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
out:
@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = true;
bus->force_bit = 1;
intel_gpio_setup(bus, port);
@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
bus->force_bit = force_bit;
bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
}
void intel_teardown_gmbus(struct drm_device *dev)

View File

@ -40,28 +40,30 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
struct intel_lvds {
struct intel_lvds_connector {
struct intel_connector base;
struct notifier_block lid_notifier;
};
struct intel_lvds_encoder {
struct intel_encoder base;
struct edid *edid;
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
struct drm_display_mode *fixed_mode;
struct intel_lvds_connector *attached_connector;
};
static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_lvds, base.base);
return container_of(encoder, struct intel_lvds_encoder, base.base);
}
static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_lvds, base);
return container_of(connector, struct intel_lvds_connector, base.base);
}
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
if (intel_lvds->pfit_dirty) {
if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control,
intel_lvds->pfit_pgm_ratios);
lvds_encoder->pfit_control,
lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
intel_lvds->pfit_dirty = false;
I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
lvds_encoder->pfit_dirty = false;
}
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
if (intel_lvds->pfit_control) {
if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true;
lvds_encoder->pfit_dirty = true;
}
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
return false;
}
if (intel_encoder_check_is_cloned(&intel_lvds->base))
if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
/*
@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
drm_mode_set_crtcinfo(adjusted_mode, 0);
switch (intel_lvds->fitting_mode) {
switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
@ -396,11 +402,11 @@ out:
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control;
intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
intel_lvds->pfit_dirty = true;
if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
@ -449,14 +455,20 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
if (intel_lvds->edid)
return drm_add_edid_modes(connector, intel_lvds->edid);
/* use cached edid if we have one */
if (lvds_connector->base.edid) {
/* invalid edid */
if (IS_ERR(lvds_connector->base.edid))
return 0;
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
return drm_add_edid_modes(connector, lvds_connector->base.edid);
}
mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
return 0;
@ -496,10 +508,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector = dev_priv->int_lvds_connector;
struct intel_lvds_connector *lvds_connector =
container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_connector *connector = &lvds_connector->base.base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
@ -508,9 +521,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
if (connector)
connector->status = connector->funcs->detect(connector,
false);
connector->status = connector->funcs->detect(connector, false);
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
@ -541,13 +552,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_connector *lvds_connector =
to_lvds_connector(connector);
intel_panel_destroy_backlight(dev);
if (lvds_connector->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);
if (dev_priv->lid_notifier.notifier_call)
acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@ -557,22 +573,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value)
{
struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
if (property == dev->mode_config.scaling_mode_property) {
struct drm_crtc *crtc = intel_lvds->base.base.crtc;
struct drm_crtc *crtc;
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (intel_lvds->fitting_mode == value) {
if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
intel_lvds->fitting_mode = value;
intel_connector->panel.fitting_mode = value;
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
@ -912,12 +930,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
@ -945,23 +966,25 @@ bool intel_lvds_init(struct drm_device *dev)
}
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return false;
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
return false;
}
lvds_encoder->attached_connector = lvds_connector;
if (!HAS_PCH_SPLIT(dev)) {
intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
intel_encoder = &intel_lvds->base;
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@ -993,14 +1016,10 @@ bool intel_lvds_init(struct drm_device *dev)
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
* the initial panel fitting mode will be FULL_SCREEN.
*/
drm_connector_attach_property(&intel_connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
@ -1015,20 +1034,21 @@ bool intel_lvds_init(struct drm_device *dev)
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
intel_lvds->edid);
edid);
} else {
kfree(intel_lvds->edid);
intel_lvds->edid = NULL;
kfree(edid);
edid = ERR_PTR(-EINVAL);
}
} else {
edid = ERR_PTR(-ENOENT);
}
if (!intel_lvds->edid) {
lvds_connector->base.edid = edid;
if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
@ -1041,22 +1061,17 @@ bool intel_lvds_init(struct drm_device *dev)
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
intel_lvds->fixed_mode =
drm_mode_duplicate(dev, scan);
intel_find_lvds_downclock(dev,
intel_lvds->fixed_mode,
connector);
fixed_mode = drm_mode_duplicate(dev, scan);
intel_find_lvds_downclock(dev, fixed_mode, connector);
goto out;
}
}
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
intel_lvds->fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (intel_lvds->fixed_mode) {
intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
@ -1076,16 +1091,15 @@ bool intel_lvds_init(struct drm_device *dev)
crtc = intel_get_crtc_for_pipe(dev, pipe);
if (crtc && (lvds & LVDS_PORT_EN)) {
intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
if (intel_lvds->fixed_mode) {
intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
fixed_mode = intel_crtc_mode_get(dev, crtc);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
/* If we still don't have a mode after all that, give up. */
if (!intel_lvds->fixed_mode)
if (!fixed_mode)
goto failed;
out:
@ -1100,16 +1114,15 @@ out:
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
dev_priv->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
lvds_connector->lid_notifier.notifier_call = NULL;
}
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
intel_panel_setup_backlight(dev);
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
return true;
@ -1117,7 +1130,9 @@ failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
if (fixed_mode)
drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false;
}

View File

@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
kfree(edid);
return ret;
}
@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret;
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
return intel_connector_update_modes(connector, edid);
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
return ret;
}
static const struct drm_prop_enum_list force_audio_names[] = {

View File

@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
u32 max;
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;

View File

@ -138,24 +138,24 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
if (dev_priv->saveBLC_PWM_CTL2 == 0) {
dev_priv->saveBLC_PWM_CTL2 = val;
if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
dev_priv->saveBLC_PWM_CTL2);
val = dev_priv->saveBLC_PWM_CTL2;
dev_priv->regfile.saveBLC_PWM_CTL2);
val = dev_priv->regfile.saveBLC_PWM_CTL2;
}
} else {
val = I915_READ(BLC_PWM_CTL);
if (dev_priv->saveBLC_PWM_CTL == 0) {
dev_priv->saveBLC_PWM_CTL = val;
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
dev_priv->regfile.saveBLC_PWM_CTL = val;
dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
I915_WRITE(BLC_PWM_CTL,
dev_priv->saveBLC_PWM_CTL);
dev_priv->regfile.saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_CTL2,
dev_priv->saveBLC_PWM_CTL2);
val = dev_priv->saveBLC_PWM_CTL;
dev_priv->regfile.saveBLC_PWM_CTL2);
val = dev_priv->regfile.saveBLC_PWM_CTL;
}
}
@ -416,21 +416,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
.get_brightness = intel_panel_get_brightness,
};
int intel_panel_setup_backlight(struct drm_device *dev)
int intel_panel_setup_backlight(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
struct drm_connector *connector;
intel_panel_init_backlight(dev);
if (dev_priv->int_lvds_connector)
connector = dev_priv->int_lvds_connector;
else if (dev_priv->int_edp_connector)
connector = dev_priv->int_edp_connector;
else
return -ENODEV;
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
@ -460,9 +453,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
backlight_device_unregister(dev_priv->backlight);
}
#else
int intel_panel_setup_backlight(struct drm_device *dev)
int intel_panel_setup_backlight(struct drm_connector *connector)
{
intel_panel_init_backlight(dev);
intel_panel_init_backlight(connector->dev);
return 0;
}
@ -471,3 +464,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
return;
}
#endif
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
{
panel->fixed_mode = fixed_mode;
return 0;
}
void intel_panel_fini(struct intel_panel *panel)
{
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
}

View File

@ -1468,9 +1468,12 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
planea_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
} else
@ -1479,9 +1482,12 @@ static void i9xx_update_wm(struct drm_device *dev)
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
planeb_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
enabled = crtc;
@ -1571,8 +1577,7 @@ static void i830_update_wm(struct drm_device *dev)
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
crtc->fb->bits_per_pixel / 8,
latency_ns);
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@ -2323,7 +2328,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
@ -2404,12 +2409,12 @@ static void gen6_enable_rps(struct drm_device *dev)
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int i;
int i, ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
@ -2503,30 +2508,16 @@ static void gen6_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_DATA, 0);
I915_WRITE(GEN6_PCODE_MAILBOX,
GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
if (!ret) {
pcu_mbox = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
if (ret && pcu_mbox & (1<<31)) { /* OC supported */
dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
} else {
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@ -2540,6 +2531,20 @@ static void gen6_enable_rps(struct drm_device *dev)
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
} else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
gen6_gt_force_wake_put(dev_priv);
}
@ -2550,7 +2555,7 @@ static void gen6_update_ring_freq(struct drm_device *dev)
int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
max_ia_freq = cpufreq_quick_get_max(0);
/*
@ -2581,17 +2586,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
I915_WRITE(GEN6_PCODE_DATA,
(ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode write of freq table timed out\n");
continue;
}
sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq | gpu_freq);
}
}
@ -2599,16 +2598,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx) {
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
if (dev_priv->ips.renderctx) {
i915_gem_object_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL;
}
if (dev_priv->pwrctx) {
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
if (dev_priv->ips.pwrctx) {
i915_gem_object_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL;
}
}
@ -2634,14 +2633,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
if (dev_priv->ips.renderctx == NULL)
dev_priv->ips.renderctx = intel_alloc_context_page(dev);
if (!dev_priv->ips.renderctx)
return -ENOMEM;
if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
if (dev_priv->ips.pwrctx == NULL)
dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
@ -2679,7 +2678,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@ -2701,7 +2700,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@ -3310,37 +3309,72 @@ static void intel_init_emon(struct drm_device *dev)
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
mutex_lock(&dev_priv->rps.hw_lock);
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
static void intel_gen6_powersave_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private,
rps.delayed_resume_work.work);
struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
gen6_enable_rps(dev);
gen6_update_ring_freq(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
*/
schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
round_jiffies_up_relative(HZ));
}
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/* Required for FBC */
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
DPFCRUNIT_CLOCK_GATE_DISABLE |
DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
@ -3348,8 +3382,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
@ -3360,9 +3392,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
I915_WRITE(ILK_DSPCLK_GATE,
(I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
@ -3384,28 +3414,51 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPFC_DIS1 |
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
/* WaDisableRenderCachePipelinedFlush */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
ibx_init_clock_gating(dev);
}
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* WADP0ClockGatingDisable */
for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
}
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
@ -3460,10 +3513,10 @@ static void gen6_init_clock_gating(struct drm_device *dev)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
@ -3479,6 +3532,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
cpt_init_clock_gating(dev);
}
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@ -3497,9 +3552,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
@ -3510,12 +3562,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@ -3559,21 +3605,31 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
/* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
/* WaDisablePSDDualDispatchEnable */
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
else
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@ -3582,7 +3638,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
GEN7_WA_L3_CHICKEN_MODE);
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else
I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
@ -3626,34 +3693,53 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
cpt_init_clock_gating(dev);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaDisableEarlyCull */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
/* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* WaDisableDopClockGating */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@ -3710,6 +3796,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
/*
* WaDisableVLVClockGating_VBIIssue
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void g4x_init_clock_gating(struct drm_device *dev)
@ -3728,6 +3821,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
/* WaDisableRenderCachePipelinedFlush */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
static void crestline_init_clock_gating(struct drm_device *dev)
@ -3783,44 +3880,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* Without this, mode sets may fail silently on FDI */
for_each_pipe(pipe)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->display.init_clock_gating(dev);
if (dev_priv->display.init_pch_clock_gating)
dev_priv->display.init_pch_clock_gating(dev);
}
/* Starting with Haswell, we have different power wells for
@ -3846,7 +3910,7 @@ void intel_init_power_wells(struct drm_device *dev)
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
@ -3884,11 +3948,6 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
@ -3999,6 +4058,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
DRM_ERROR("GT thread status wait timed out\n");
}
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
}
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@ -4012,7 +4077,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE, 1);
I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@ -4022,6 +4087,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
__gen6_gt_wait_for_thread_c0(dev_priv);
}
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
}
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
@ -4035,7 +4106,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@ -4079,7 +4150,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
@ -4117,13 +4188,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
}
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
@ -4134,49 +4210,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
void intel_gt_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
}
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_init(&dev_priv->gt_lock);
intel_gt_reset(dev);
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
} else if (INTEL_INFO(dev)->gen >= 6) {
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
/* IVB configs may use multi-threaded forcewake */
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
u32 ecobus;
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = I915_READ_NOTRACE(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
DRM_DEBUG_KMS("Using MT version of forcewake\n");
dev_priv->gt.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put =
__gen6_gt_force_wake_mt_put;
}
}
}
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
return -EAGAIN;
}
I915_WRITE(GEN6_PCODE_DATA, *val);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500)) {
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
*val = I915_READ(GEN6_PCODE_DATA);
I915_WRITE(GEN6_PCODE_DATA, 0);
return 0;
}
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
return -EAGAIN;
}
I915_WRITE(GEN6_PCODE_DATA, val);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
I915_WRITE(GEN6_PCODE_DATA, 0);
return 0;
}

View File

@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
ret = intel_ring_begin(ring, 4);
@ -964,7 +964,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
}
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags)
{
int ret;
@ -975,7 +977,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
MI_BATCH_NON_SECURE_I965);
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@ -984,7 +986,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
u32 offset, u32 len,
unsigned flags)
{
int ret;
@ -993,7 +996,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
@ -1003,7 +1006,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
u32 offset, u32 len,
unsigned flags)
{
int ret;
@ -1012,7 +1016,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
return 0;
@ -1075,6 +1079,29 @@ err:
return ret;
}
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
return 0;
}
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
@ -1093,6 +1120,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ret = init_status_page(ring);
if (ret)
return ret;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_hws_pga(ring);
if (ret)
return ret;
}
obj = i915_gem_alloc_object(dev, ring->size);
@ -1391,10 +1423,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@ -1402,8 +1441,9 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
}
static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
unsigned flags)
{
int ret;
@ -1411,7 +1451,30 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
if (ret)
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
return 0;
}
static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
unsigned flags)
{
int ret;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
@ -1432,10 +1495,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
return ret;
cmd = MI_FLUSH_DW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB;
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
@ -1490,7 +1560,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6)
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@ -1501,12 +1573,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
}
return intel_init_ring_buffer(dev, ring);
}
@ -1514,6 +1580,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
ring->name = "render ring";
ring->id = RCS;
@ -1551,16 +1618,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
if (!I915_NEED_GFX_HWS(dev))
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
ring->effective_size = ring->size;
if (IS_I830(ring->dev))
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
ring->virtual_start = ioremap_wc(start, size);
@ -1570,6 +1634,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
return -ENOMEM;
}
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_hws_pga(ring);
if (ret)
return ret;
}
return 0;
}
@ -1618,7 +1688,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
ring->init = init_ring_common;
return intel_init_ring_buffer(dev, ring);
}

View File

@ -81,7 +81,9 @@ struct intel_ring_buffer {
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
@ -181,6 +183,8 @@ intel_read_status_page(struct intel_ring_buffer *ring,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);

View File

@ -2072,17 +2072,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
else
mapping = &dev_priv->sdvo_mappings[1];
pin = GMBUS_PORT_DPB;
if (mapping->initialized)
if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
pin = mapping->i2c_pin;
else
pin = GMBUS_PORT_DPB;
if (intel_gmbus_is_port_valid(pin)) {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
* our code totally fails once we start using gmbus. Hence fall back to
* bit banging for now. */
intel_gmbus_force_bit(sdvo->i2c, true);
}
/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
static void
intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
{
intel_gmbus_force_bit(sdvo->i2c, false);
}
static bool
@ -2658,10 +2665,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
return false;
}
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
goto err_i2c_bus;
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
@ -2746,6 +2751,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);
err_i2c_bus:
intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo);
return false;

View File

@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
int pixel_size;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
sprctl = I915_READ(SPRCTL(pipe));
@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
pixel_size = 2;
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@ -127,18 +119,28 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
I915_WRITE(SPRLINOFF(pipe), offset);
}
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= sprsurf_offset;
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@ -152,7 +154,8 @@ ivb_disable_plane(struct drm_plane *plane)
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
@ -225,8 +228,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
dvscntr = I915_READ(DVSCNTR(pipe));
@ -239,33 +244,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
pixel_size = 2;
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
BUG();
}
if (obj->tiling_mode != I915_TILING_NONE)
@ -289,18 +285,23 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
I915_WRITE(DVSLINOFF(pipe), offset);
}
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= dvssurf_offset;
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
@ -422,6 +423,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@ -436,7 +439,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
src_h = src_h >> 16;
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
if (crtc_x >= primary_w || crtc_y >= primary_h)
@ -446,6 +449,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
case I915_TILING_NONE:
case I915_TILING_X:
break;
default:
return -EINVAL;
}
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
@ -472,6 +484,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (!crtc_w || !crtc_h) /* Again, nothing to display */
goto out;
/*
* We may not have a scaler, eg. HSW does not have it any more
*/
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
return -EINVAL;
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
@ -665,6 +683,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
@ -681,6 +700,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
break;
case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
intel_plane->can_scale = false;
else
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;

View File

@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
int dspcntr_reg = DSPCNTR(intel_crtc->plane);
int pipeconf = I915_READ(pipeconf_reg);
int dspcntr = I915_READ(dspcntr_reg);
int dspbase_reg = DSPADDR(intel_crtc->plane);
int xpos = 0x0, ypos = 0x0;
unsigned int xsize, ysize;
/* Pipe must be off here */
I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
/* Flush the plane changes */
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
intel_flush_display_plane(dev_priv, intel_crtc->plane);
/* Wait for vblank for the disable to take effect */
if (IS_GEN2(dev))
@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
I915_WRITE(pipeconf_reg, pipeconf);
I915_WRITE(dspcntr_reg, dspcntr);
/* Flush the plane changes */
I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
intel_flush_display_plane(dev_priv, intel_crtc->plane);
}
j = 0;

View File

@ -34,8 +34,7 @@
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
#define DP_LINK_STATUS_SIZE 6
#define DP_DPCD_SIZE 8
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
/***** general DP utility functions *****/
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_LANE0_1_STATUS + (lane >> 1);
int s = (lane & 1) * 4;
u8 l = dp_link_status(link_status, i);
return (l >> s) & 0xf;
}
static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
int lane;
u8 lane_status;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
}
return true;
}
static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count)
{
u8 lane_align;
u8 lane_status;
int lane;
lane_align = dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < lane_count; lane++) {
lane_status = dp_get_lane_status(link_status, lane);
if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
return false;
}
return true;
}
static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
u8 l = dp_link_status(link_status, i);
return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
int lane;
for (lane = 0; lane < lane_count; lane++) {
u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
return (link_rate * lane_num * 8) / bpp;
}
static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
{
switch (dpcd[DP_MAX_LINK_RATE]) {
case DP_LINK_BW_1_62:
default:
return 162000;
case DP_LINK_BW_2_7:
return 270000;
case DP_LINK_BW_5_4:
return 540000;
}
}
static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
{
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
}
static u8 dp_get_dp_link_rate_coded(int link_rate)
{
switch (link_rate) {
case 162000:
default:
return DP_LINK_BW_1_62;
case 270000:
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
}
}
/***** radeon specific DP functions *****/
/* First get the min lane# when low rate is used according to pixel clock
@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
int pix_clock)
{
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int max_link_rate = drm_dp_max_link_rate(dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
return 540000;
}
return dp_get_max_link_rate(dpcd);
return drm_dp_max_link_rate(dpcd);
}
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 msg[25];
u8 msg[DP_DPCD_SIZE];
int ret, i;
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0);
ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
DP_DPCD_SIZE, 0);
if (ret > 0) {
memcpy(dig_connector->dpcd, msg, 8);
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
DRM_DEBUG_KMS("DPCD: ");
for (i = 0; i < 8; i++)
for (i = 0; i < DP_DPCD_SIZE; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
int enc_id;
int dp_clock;
int dp_lane_count;
int rd_interval;
bool tp3_supported;
u8 dpcd[8];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 train_set[4];
u8 link_status[DP_LINK_STATUS_SIZE];
u8 tries;
@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
/* set the link rate on the sink */
tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock);
tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
/* start training on the source */
@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
voltage = 0xff;
while (1) {
if (dp_info->rd_interval == 0)
udelay(100);
else
mdelay(dp_info->rd_interval * 4);
drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
break;
}
@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
channel_eq = false;
while (1) {
if (dp_info->rd_interval == 0)
udelay(400);
else
mdelay(dp_info->rd_interval * 4);
drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
break;
}
@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
else
dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
dp_info.tp3_supported = true;
else
dp_info.tp3_supported = false;
memcpy(dp_info.dpcd, dig_connector->dpcd, 8);
memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
dp_info.rdev = rdev;
dp_info.encoder = encoder;
dp_info.connector = connector;

View File

@ -427,7 +427,7 @@ struct radeon_connector_atom_dig {
uint32_t igp_lane_info;
/* displayport */
struct radeon_i2c_chan *dp_i2c_bus;
u8 dpcd[8];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;

View File

@ -137,6 +137,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd2 *mode_cmd);

View File

@ -25,6 +25,7 @@
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/delay.h>
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@ -322,4 +323,34 @@ struct i2c_algo_dp_aux_data {
int
i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
#define DP_LINK_STATUS_SIZE 6
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
int lane_count);
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
#define DP_RECEIVER_CAP_SIZE 0xf
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
static inline int
drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
}
static inline u8
drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
}
#endif /* _DRM_DP_HELPER_H_ */

View File

@ -3,7 +3,7 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
const struct intel_gtt {
struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
@ -17,6 +17,7 @@ const struct intel_gtt {
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */
@ -39,10 +40,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
#define AGP_DCACHE_MEMORY 1
#define AGP_PHYS_MEMORY 2
/* New caching attributes for gen6/sandybridge */
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)

View File

@ -306,6 +306,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
#define I915_PARAM_HAS_SECURE_BATCHES 23
typedef struct drm_i915_getparam {
int param;
@ -671,6 +672,11 @@ struct drm_i915_gem_execbuffer2 {
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
/** Request a privileged ("secure") batch buffer. Note only available for
* DRM_ROOT_ONLY | DRM_MASTER processes.
*/
#define I915_EXEC_SECURE (1<<9)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK