(2006-08-06) rescue-bootcd
This commit is contained in:
40
extra/linux-2.6.10/lib/dec_and_lock.c
Normal file
40
extra/linux-2.6.10/lib/dec_and_lock.c
Normal file
@@ -0,0 +1,40 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
* This is an architecture-neutral, but slow,
|
||||
* implementation of the notion of "decrement
|
||||
* a reference count, and return locked if it
|
||||
* decremented to zero".
|
||||
*
|
||||
* NOTE NOTE NOTE! This is _not_ equivalent to
|
||||
*
|
||||
* if (atomic_dec_and_test(&atomic)) {
|
||||
* spin_lock(&lock);
|
||||
* return 1;
|
||||
* }
|
||||
* return 0;
|
||||
*
|
||||
* because the spin-lock and the decrement must be
|
||||
* "atomic".
|
||||
*
|
||||
* This slow version gets the spinlock unconditionally,
|
||||
* and releases it if it isn't needed. Architectures
|
||||
* are encouraged to come up with better approaches,
|
||||
* this is trivially done efficiently using a load-locked
|
||||
* store-conditional approach, for example.
|
||||
*/
|
||||
|
||||
#ifndef ATOMIC_DEC_AND_LOCK
|
||||
int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
|
||||
{
|
||||
spin_lock(lock);
|
||||
if (atomic_dec_and_test(atomic))
|
||||
return 1;
|
||||
spin_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(_atomic_dec_and_lock);
|
||||
#endif
|
||||
Reference in New Issue
Block a user