Commit 83dbb15e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (40 commits)
  vmwgfx: Snoop DMA transfers with non-covering sizes
  vmwgfx: Move the prefered mode first in the list
  vmwgfx: Unreference surface on cursor error path
  vmwgfx: Free prefered mode on error path
  vmwgfx: Use pointer return error codes
  vmwgfx: Fix hw cursor position
  vmwgfx: Infrastructure for explicit placement
  vmwgfx: Make the preferred autofit mode have a 60Hz vrefresh
  vmwgfx: Remove screen object active list
  vmwgfx: Screen object cleanups
  drm/radeon/kms: consolidate GART code, fix segfault after GPU lockup V2
  drm/radeon/kms: don't poll forever if MC GDDR link training fails
  drm/radeon/kms: fix DP setup on TRAVIS bridges
  drm/radeon/kms: set HPD polarity in hpd_init()
  drm/radeon/kms: add MSI module parameter
  drm/radeon/kms: Add MSI quirk for Dell RS690
  drm/radeon/kms: Add MSI quirk for HP RS690
  drm/radeon/kms: split MSI check into a separate function
  vmwgfx: Reinstate the update_layout ioctl
  drm/radeon/kms: always do extended edid probe
  ...
parents 6e6bc679 2ac86371
......@@ -163,6 +163,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
......@@ -171,6 +172,7 @@ static struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
char *drm_get_encoder_name(struct drm_encoder *encoder)
......@@ -464,8 +466,10 @@ void drm_connector_init(struct drm_device *dev,
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
drm_connector_attach_property(connector,
dev->mode_config.edid_property, 0);
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_connector_attach_property(connector,
dev->mode_config.edid_property,
0);
drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
......
......@@ -70,7 +70,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
radeon_trace_points.o ni.o cayman_blit_shaders.o
radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
......
......@@ -558,7 +558,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder)) {
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
......@@ -638,44 +638,29 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (ss_enabled && ss->percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
radeon_encoder_is_dp_bridge(encoder)) {
if (ENCODER_MODE_IS_DP(encoder_mode)) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
/* deep color support */
args.v3.sInput.usPixelClock =
cpu_to_le16((mode->clock * bpc / 8) / 10);
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else {
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
/* deep color support */
args.v3.sInput.usPixelClock =
cpu_to_le16((mode->clock * bpc / 8) / 10);
}
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
DISPPLL_CONFIG_DUAL_LINK;
}
if (radeon_encoder_is_dp_bridge(encoder)) {
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
} else
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)
args.v3.sInput.ucExtTransmitterID =
radeon_encoder_get_dp_bridge_encoder_id(encoder);
else
args.v3.sInput.ucExtTransmitterID = 0;
atom_execute_table(rdev->mode_info.atom_context,
......@@ -945,6 +930,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
bpc = connector->display_info.bpc;
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
case ATOM_ENCODER_MODE_DP:
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
......@@ -1450,7 +1436,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
* PPLL/DCPLL programming and only program the DP DTO for the
* crtc virtual pixel clock.
*/
if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
return ATOM_PPLL_INVALID;
}
......
......@@ -482,7 +482,8 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int lane_num, max_pix_clock;
if (radeon_connector_encoder_is_dp_bridge(connector))
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
return 270000;
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
......@@ -553,17 +554,32 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
if (!ASIC_IS_DCE4(rdev))
return;
if (radeon_connector_encoder_is_dp_bridge(connector))
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_TRAVIS)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
atombios_dig_encoder_setup(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
panel_mode);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
(panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
}
}
void radeon_dp_set_link_config(struct drm_connector *connector,
......
This diff is collapsed.
......@@ -353,6 +353,7 @@ void evergreen_hpd_init(struct radeon_device *rdev)
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
evergreen_irq_set(rdev);
......@@ -893,7 +894,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r;
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
......@@ -945,7 +946,6 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
......@@ -965,14 +965,7 @@ void evergreen_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
radeon_gart_table_vram_unpin(rdev);
}
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
......@@ -3031,6 +3024,10 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
r = r600_vram_scratch_init(rdev);
if (r)
return r;
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
......@@ -3235,6 +3232,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
......
......@@ -94,6 +94,15 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
if (rdev->family >= CHIP_CAYMAN) {
/* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
* to the RB directly. For IBs, the CP programs this as part of the
* surface_sync packet.
*/
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
}
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, sync_type);
radeon_ring_write(rdev, cp_coher_size);
......@@ -174,7 +183,7 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr)
u64 gpu_addr, u32 size)
{
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
......@@ -196,6 +205,9 @@ set_tex_resource(struct radeon_device *rdev,
sq_tex_resource_word7 = format |
S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word0);
......@@ -613,11 +625,13 @@ int evergreen_blit_init(struct radeon_device *rdev)
rdev->r600_blit.primitives.set_default_state = set_default_state;
rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
rdev->r600_blit.ring_size_per_loop = 74;
if (rdev->family >= CHIP_CAYMAN)
rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
rdev->r600_blit.max_dim = 16384;
......
......@@ -262,8 +262,11 @@ int ni_mc_load_microcode(struct radeon_device *rdev)
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
/* wait for training to complete */
while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
udelay(10);
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
break;
udelay(1);
}
if (running)
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
......@@ -933,7 +936,7 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
int r;
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
......@@ -978,8 +981,6 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
int r;
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
......@@ -995,14 +996,7 @@ void cayman_pcie_gart_disable(struct radeon_device *rdev)
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
radeon_gart_table_vram_unpin(rdev);
}
void cayman_pcie_gart_fini(struct radeon_device *rdev)
......@@ -1362,6 +1356,10 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
r = r600_vram_scratch_init(rdev);
if (r)
return r;
evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
......@@ -1557,6 +1555,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
......
......@@ -537,6 +537,7 @@ void r100_hpd_init(struct radeon_device *rdev)
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
r100_irq_set(rdev);
......@@ -577,7 +578,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
{
int r;
if (rdev->gart.table.ram.ptr) {
if (rdev->gart.ptr) {
WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
......@@ -636,10 +637,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
u32 *gtt = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
gtt[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
......
......@@ -74,7 +74,7 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
void __iomem *ptr = rdev->gart.ptr;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
......@@ -93,7 +93,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
{
int r;
if (rdev->gart.table.vram.robj) {
if (rdev->gart.robj) {
WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
......@@ -116,7 +116,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
uint32_t tmp;
int r;
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
......@@ -154,7 +154,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
......@@ -163,14 +162,7 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
radeon_gart_table_vram_unpin(rdev);
}
void rv370_pcie_gart_fini(struct radeon_device *rdev)
......
......@@ -763,13 +763,14 @@ void r600_hpd_init(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
tmp |= DC_HPDx_EN;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
tmp |= DC_HPDx_EN;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
......@@ -799,10 +800,7 @@ void r600_hpd_init(struct radeon_device *rdev)
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
} else {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
......@@ -820,6 +818,7 @@ void r600_hpd_init(struct radeon_device *rdev)
break;
}
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
r600_irq_set(rdev);
......@@ -897,7 +896,7 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
void __iomem *ptr = (void *)rdev->gart.ptr;
u32 tmp;
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
......@@ -932,7 +931,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev)
{
int r;
if (rdev->gart.table.vram.robj) {
if (rdev->gart.robj) {
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
......@@ -949,7 +948,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
u32 tmp;
int r, i;
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
......@@ -1004,7 +1003,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i, r;
int i;
/* Disable all tables */
for (i = 0; i < 7; i++)
......@@ -1031,14 +1030,7 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
}
}
radeon_gart_table_vram_unpin(rdev);
}
void r600_pcie_gart_fini(struct radeon_device *rdev)
......@@ -1138,7 +1130,7 @@ static void r600_mc_program(struct radeon_device *rdev)
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
......@@ -1277,6 +1269,53 @@ int r600_mc_init(struct radeon_device *rdev)
return 0;
}
int r600_vram_scratch_init(struct radeon_device *rdev)
{
int r;
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
return r;
}
}
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->vram_scratch.robj,
RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
void r600_vram_scratch_fini(struct radeon_device *rdev)
{
int r;
if (rdev->vram_scratch.robj == NULL) {
return;
}
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->vram_scratch.robj);
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
}
radeon_bo_unref(&rdev->vram_scratch.robj);
}
/* We doesn't check that the GPU really needs a reset we simply do the
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
......@@ -2332,6 +2371,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
/* flush read cache over gart */
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(rdev, 0xFFFFFFFF);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
......@@ -2340,6 +2387,14 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, 0);
} else {
/* flush read cache over gart */
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |