dect
/
linux-2.6
Archived
13
0
Fork 0

drm/radeon: replace the per ring mutex with a global one

A single global mutex for ring submissions seems sufficient.

Signed-off-by: Christian König <deathsimple@vodafone.de>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Christian König 2012-05-09 15:34:45 +02:00 committed by Dave Airlie
parent 133f4cb336
commit d6999bc7b5
5 changed files with 41 additions and 45 deletions

View File

@ -676,7 +676,6 @@ struct radeon_ring {
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
@ -815,6 +814,7 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsign
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
@ -1534,6 +1534,7 @@ struct radeon_device {
rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
struct radeon_semaphore_driver semaphore_drv;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;

View File

@ -724,8 +724,7 @@ int radeon_device_init(struct radeon_device *rdev,
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
radeon_mutex_init(&rdev->ib_pool.mutex);
for (i = 0; i < RADEON_NUM_RINGS; ++i)
mutex_init(&rdev->ring[i].mutex);
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);

View File

@ -252,10 +252,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (rdev->ring[i].ring_obj)
mutex_lock(&rdev->ring[i].mutex);
}
mutex_lock(&rdev->ring_lock);
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@ -311,10 +308,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (rdev->ring[i].ring_obj)
mutex_unlock(&rdev->ring[i].mutex);
}
mutex_unlock(&rdev->ring_lock);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}

View File

@ -346,9 +346,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
if (ndw < ring->ring_free_dw) {
break;
}
mutex_unlock(&ring->mutex);
mutex_unlock(&rdev->ring_lock);
r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
mutex_lock(&ring->mutex);
mutex_lock(&rdev->ring_lock);
if (r)
return r;
}
@ -361,10 +361,10 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
{
int r;
mutex_lock(&ring->mutex);
mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
mutex_unlock(&ring->mutex);
mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
@ -389,20 +389,25 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev, ring);
mutex_unlock(&ring->mutex);
mutex_unlock(&rdev->ring_lock);
}
void radeon_ring_undo(struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
}
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
ring->wptr = ring->wptr_old;
mutex_unlock(&ring->mutex);
radeon_ring_undo(ring);
mutex_unlock(&rdev->ring_lock);
}
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
mutex_lock(&ring->mutex);
mutex_lock(&rdev->ring_lock);
radeon_ring_free_size(rdev, ring);
if (ring->rptr == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1);
@ -411,7 +416,7 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
radeon_ring_commit(rdev, ring);
}
}
mutex_unlock(&ring->mutex);
mutex_unlock(&rdev->ring_lock);
}
void radeon_ring_lockup_update(struct radeon_ring *ring)
@ -520,11 +525,12 @@ void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
int r;
struct radeon_bo *ring_obj;
mutex_lock(&ring->mutex);
mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
mutex_unlock(&ring->mutex);
mutex_unlock(&rdev->ring_lock);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);

View File

@ -39,7 +39,6 @@ static int radeon_semaphore_add_bo(struct radeon_device *rdev)
uint32_t *cpu_ptr;
int r, i;
bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
if (bo == NULL) {
return -ENOMEM;
@ -154,13 +153,17 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
bool sync_to[RADEON_NUM_RINGS],
int dst_ring)
{
int i, r;
int i = 0, r;
mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, &rdev->ring[dst_ring], RADEON_NUM_RINGS * 8);
if (r) {
goto error;
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
unsigned num_ops = i == dst_ring ? RADEON_NUM_RINGS : 1;
/* don't lock unused rings */
if (!sync_to[i] && i != dst_ring)
/* no need to sync to our own or unused rings */
if (!sync_to[i] || i == dst_ring)
continue;
/* prevent GPU deadlocks */
@ -170,28 +173,19 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
goto error;
}
r = radeon_ring_lock(rdev, &rdev->ring[i], num_ops * 8);
if (r)
r = radeon_ring_alloc(rdev, &rdev->ring[i], 8);
if (r) {
goto error;
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
/* no need to sync to our own or unused rings */
if (!sync_to[i] || i == dst_ring)
continue;
}
radeon_semaphore_emit_signal(rdev, i, semaphore);
radeon_semaphore_emit_wait(rdev, dst_ring, semaphore);
radeon_ring_commit(rdev, &rdev->ring[i]);
}
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
/* don't unlock unused rings */
if (!sync_to[i] && i != dst_ring)
continue;
radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
}
radeon_ring_commit(rdev, &rdev->ring[dst_ring]);
mutex_unlock(&rdev->ring_lock);
return 0;
@ -199,9 +193,11 @@ error:
/* unlock all locks taken so far */
for (--i; i >= 0; --i) {
if (sync_to[i] || i == dst_ring) {
radeon_ring_unlock_undo(rdev, &rdev->ring[i]);
radeon_ring_undo(&rdev->ring[i]);
}
}
radeon_ring_undo(&rdev->ring[dst_ring]);
mutex_unlock(&rdev->ring_lock);
return r;
}