dect
/
linux-2.6
Archived
13
0
Fork 0

[PATCH] lockdep: clean up rwsems

Clean up rwsems.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Ingo Molnar 2006-07-03 00:24:29 -07:00 committed by Linus Torvalds
parent 8b3db9c542
commit c4e05116a2
6 changed files with 109 additions and 128 deletions

View File

@ -61,23 +61,11 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
spinlock_t wait_lock;
struct list_head wait_list;
#if RWSEM_DEBUG
int debug;
#endif
};
/*
* initialisation
*/
#if RWSEM_DEBUG
#define __RWSEM_DEBUG_INIT , 0
#else
#define __RWSEM_DEBUG_INIT /* */
#endif
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
__RWSEM_DEBUG_INIT }
}
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
@ -87,9 +75,6 @@ static inline void init_rwsem(struct rw_semaphore *sem)
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
#if RWSEM_DEBUG
sem->debug = 0;
#endif
}
/*

View File

@ -32,22 +32,10 @@ struct rw_semaphore {
__s32 activity;
spinlock_t wait_lock;
struct list_head wait_list;
#if RWSEM_DEBUG
int debug;
#endif
};
/*
* initialisation
*/
#if RWSEM_DEBUG
#define __RWSEM_DEBUG_INIT , 0
#else
#define __RWSEM_DEBUG_INIT /* */
#endif
#define __RWSEM_INITIALIZER(name) \
{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)

View File

@ -9,8 +9,6 @@
#include <linux/linkage.h>
#define RWSEM_DEBUG 0
#ifdef __KERNEL__
#include <linux/types.h>
@ -26,23 +24,13 @@ struct rw_semaphore;
#include <asm/rwsem.h> /* use an arch-specific implementation */
#endif
#ifndef rwsemtrace
#if RWSEM_DEBUG
extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
#else
#define rwsemtrace(SEM,FMT)
#endif
#endif
/*
* lock for reading
*/
static inline void down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsemtrace(sem,"Entering down_read");
__down_read(sem);
rwsemtrace(sem,"Leaving down_read");
}
/*
@ -51,9 +39,7 @@ static inline void down_read(struct rw_semaphore *sem)
static inline int down_read_trylock(struct rw_semaphore *sem)
{
int ret;
rwsemtrace(sem,"Entering down_read_trylock");
ret = __down_read_trylock(sem);
rwsemtrace(sem,"Leaving down_read_trylock");
return ret;
}
@ -63,9 +49,7 @@ static inline int down_read_trylock(struct rw_semaphore *sem)
static inline void down_write(struct rw_semaphore *sem)
{
might_sleep();
rwsemtrace(sem,"Entering down_write");
__down_write(sem);
rwsemtrace(sem,"Leaving down_write");
}
/*
@ -74,9 +58,7 @@ static inline void down_write(struct rw_semaphore *sem)
static inline int down_write_trylock(struct rw_semaphore *sem)
{
int ret;
rwsemtrace(sem,"Entering down_write_trylock");
ret = __down_write_trylock(sem);
rwsemtrace(sem,"Leaving down_write_trylock");
return ret;
}
@ -85,9 +67,7 @@ static inline int down_write_trylock(struct rw_semaphore *sem)
*/
static inline void up_read(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering up_read");
__up_read(sem);
rwsemtrace(sem,"Leaving up_read");
}
/*
@ -95,9 +75,7 @@ static inline void up_read(struct rw_semaphore *sem)
*/
static inline void up_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering up_write");
__up_write(sem);
rwsemtrace(sem,"Leaving up_write");
}
/*
@ -105,9 +83,7 @@ static inline void up_write(struct rw_semaphore *sem)
*/
static inline void downgrade_write(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering downgrade_write");
__downgrade_write(sem);
rwsemtrace(sem,"Leaving downgrade_write");
}
#endif /* __KERNEL__ */

105
kernel/rwsem.c Normal file
View File

@ -0,0 +1,105 @@
/* kernel/rwsem.c: R/W semaphores, public implementation
*
* Written by David Howells (dhowells@redhat.com).
* Derived from asm-i386/semaphore.h
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rwsem.h>
#include <asm/system.h>
#include <asm/atomic.h>
/*
* lock for reading
*/
void down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
__down_read(sem);
}
EXPORT_SYMBOL(down_read);
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int down_read_trylock(struct rw_semaphore *sem)
{
int ret = __down_read_trylock(sem);
if (ret == 1)
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(down_read_trylock);
/*
* lock for writing
*/
void down_write(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
__down_write(sem);
}
EXPORT_SYMBOL(down_write);
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int down_write_trylock(struct rw_semaphore *sem)
{
int ret = __down_write_trylock(sem);
if (ret == 1)
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
return ret;
}
EXPORT_SYMBOL(down_write_trylock);
/*
* release a read lock
*/
void up_read(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_read(sem);
}
EXPORT_SYMBOL(up_read);
/*
* release a write lock
*/
void up_write(struct rw_semaphore *sem)
{
rwsem_release(&sem->dep_map, 1, _RET_IP_);
__up_write(sem);
}
EXPORT_SYMBOL(up_write);
/*
* downgrade write lock to read lock
*/
void downgrade_write(struct rw_semaphore *sem)
{
/*
* lockdep: a downgraded write will live on as a write
* dependency.
*/
__downgrade_write(sem);
}
EXPORT_SYMBOL(downgrade_write);

View File

@ -17,16 +17,6 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
#if RWSEM_DEBUG
void rwsemtrace(struct rw_semaphore *sem, const char *str)
{
if (sem->debug)
printk("[%d] %s({%d,%d})\n",
current->pid, str, sem->activity,
list_empty(&sem->wait_list) ? 0 : 1);
}
#endif
/*
* initialise the semaphore
*/
@ -35,9 +25,6 @@ void fastcall init_rwsem(struct rw_semaphore *sem)
sem->activity = 0;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
#if RWSEM_DEBUG
sem->debug = 0;
#endif
}
/*
@ -56,8 +43,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
struct task_struct *tsk;
int woken;
rwsemtrace(sem, "Entering __rwsem_do_wake");
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (!wakewrite) {
@ -104,7 +89,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
sem->activity += woken;
out:
rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem;
}
@ -138,8 +122,6 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
struct rwsem_waiter waiter;
struct task_struct *tsk;
rwsemtrace(sem, "Entering __down_read");
spin_lock_irq(&sem->wait_lock);
if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
@ -171,9 +153,8 @@ void fastcall __sched __down_read(struct rw_semaphore *sem)
}
tsk->state = TASK_RUNNING;
out:
rwsemtrace(sem, "Leaving __down_read");
;
}
/*
@ -184,7 +165,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
rwsemtrace(sem, "Entering __down_read_trylock");
spin_lock_irqsave(&sem->wait_lock, flags);
@ -196,7 +176,6 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __down_read_trylock");
return ret;
}
@ -209,8 +188,6 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
struct rwsem_waiter waiter;
struct task_struct *tsk;
rwsemtrace(sem, "Entering __down_write");
spin_lock_irq(&sem->wait_lock);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@ -242,9 +219,8 @@ void fastcall __sched __down_write(struct rw_semaphore *sem)
}
tsk->state = TASK_RUNNING;
out:
rwsemtrace(sem, "Leaving __down_write");
;
}
/*
@ -255,8 +231,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
unsigned long flags;
int ret = 0;
rwsemtrace(sem, "Entering __down_write_trylock");
spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@ -267,7 +241,6 @@ int fastcall __down_write_trylock(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __down_write_trylock");
return ret;
}
@ -278,16 +251,12 @@ void fastcall __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
rwsemtrace(sem, "Entering __up_read");
spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->activity == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __up_read");
}
/*
@ -297,8 +266,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
rwsemtrace(sem, "Entering __up_write");
spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 0;
@ -306,8 +273,6 @@ void fastcall __up_write(struct rw_semaphore *sem)
sem = __rwsem_do_wake(sem, 1);
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __up_write");
}
/*
@ -318,8 +283,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
rwsemtrace(sem, "Entering __downgrade_write");
spin_lock_irqsave(&sem->wait_lock, flags);
sem->activity = 1;
@ -327,8 +290,6 @@ void fastcall __downgrade_write(struct rw_semaphore *sem)
sem = __rwsem_do_wake(sem, 0);
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving __downgrade_write");
}
EXPORT_SYMBOL(init_rwsem);
@ -339,6 +300,3 @@ EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
#if RWSEM_DEBUG
EXPORT_SYMBOL(rwsemtrace);
#endif

View File

@ -16,17 +16,6 @@ struct rwsem_waiter {
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
#if RWSEM_DEBUG
#undef rwsemtrace
void rwsemtrace(struct rw_semaphore *sem, const char *str)
{
printk("sem=%p\n", sem);
printk("(sem)=%08lx\n", sem->count);
if (sem->debug)
printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
}
#endif
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here from up_xxxx(), then:
@ -45,8 +34,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
struct list_head *next;
signed long oldcount, woken, loop;
rwsemtrace(sem, "Entering __rwsem_do_wake");
if (downgrading)
goto dont_wake_writers;
@ -127,7 +114,6 @@ __rwsem_do_wake(struct rw_semaphore *sem, int downgrading)
next->prev = &sem->wait_list;
out:
rwsemtrace(sem, "Leaving __rwsem_do_wake");
return sem;
/* undo the change to count, but check for a transition 1->0 */
@ -186,13 +172,9 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
rwsemtrace(sem, "Entering rwsem_down_read_failed");
waiter.flags = RWSEM_WAITING_FOR_READ;
rwsem_down_failed_common(sem, &waiter,
RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
rwsemtrace(sem, "Leaving rwsem_down_read_failed");
return sem;
}
@ -204,12 +186,9 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
rwsemtrace(sem, "Entering rwsem_down_write_failed");
waiter.flags = RWSEM_WAITING_FOR_WRITE;
rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
rwsemtrace(sem, "Leaving rwsem_down_write_failed");
return sem;
}
@ -221,8 +200,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
rwsemtrace(sem, "Entering rwsem_wake");
spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
@ -231,8 +208,6 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving rwsem_wake");
return sem;
}
@ -245,8 +220,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
rwsemtrace(sem, "Entering rwsem_downgrade_wake");
spin_lock_irqsave(&sem->wait_lock, flags);
/* do nothing if list empty */
@ -255,7 +228,6 @@ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
spin_unlock_irqrestore(&sem->wait_lock, flags);
rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
return sem;
}
@ -263,6 +235,3 @@ EXPORT_SYMBOL(rwsem_down_read_failed);
EXPORT_SYMBOL(rwsem_down_write_failed);
EXPORT_SYMBOL(rwsem_wake);
EXPORT_SYMBOL(rwsem_downgrade_wake);
#if RWSEM_DEBUG
EXPORT_SYMBOL(rwsemtrace);
#endif