LibC: Implement blocking pthread_rwlock

pthread_rwlock now uses a mutex and condition variable internally so it
doesn't need to yield while waiting!
This commit is contained in:
2026-04-05 12:06:38 +03:00
parent ec4aa8d0b6
commit 34b59f062b
2 changed files with 68 additions and 70 deletions

View File

@@ -67,9 +67,11 @@ typedef struct
} pthread_rwlockattr_t; } pthread_rwlockattr_t;
typedef struct typedef struct
{ {
pthread_rwlockattr_t attr; pthread_mutex_t lock;
unsigned lockers; pthread_cond_t cond;
unsigned writers; unsigned writers_waiting;
unsigned writer_active;
unsigned readers_active;
} pthread_rwlock_t; } pthread_rwlock_t;
__END_DECLS __END_DECLS

View File

@@ -22,8 +22,6 @@ struct pthread_trampoline_info_t
void* arg; void* arg;
}; };
static constexpr unsigned rwlock_writer_locked = -1;
// stack is 16 byte aligned on entry, this `call` is used to align it // stack is 16 byte aligned on entry, this `call` is used to align it
extern "C" void _pthread_trampoline(void*); extern "C" void _pthread_trampoline(void*);
asm( asm(
@@ -942,102 +940,100 @@ int pthread_rwlock_init(pthread_rwlock_t* __restrict rwlock, const pthread_rwloc
if (attr == nullptr) if (attr == nullptr)
attr = &default_attr; attr = &default_attr;
*rwlock = { *rwlock = {
.attr = *attr, .lock = PTHREAD_MUTEX_INITIALIZER,
.lockers = 0, .cond = PTHREAD_COND_INITIALIZER,
.writers = 0, .writers_waiting = 0,
.writer_active = 0,
.readers_active = 0,
}; };
const pthread_mutexattr_t mattr {
.type = PTHREAD_MUTEX_DEFAULT,
.shared = attr->shared,
};
pthread_mutex_init(&rwlock->lock, &mattr);
const pthread_condattr_t cattr {
.clock = CLOCK_REALTIME,
.shared = attr->shared,
};
pthread_cond_init(&rwlock->cond, &cattr);
return 0; return 0;
} }
// TODO: rewrite rwlock with futexes
template<typename T>
static int pthread_rwlock_timedlock(T* __restrict lock, const struct timespec* __restrict abstime, int (*trylock)(T*))
{
if (trylock(lock) == 0)
return 0;
constexpr auto has_timed_out =
[](const struct timespec* abstime) -> bool
{
struct timespec curtime;
clock_gettime(CLOCK_REALTIME, &curtime);
if (curtime.tv_sec < abstime->tv_sec)
return false;
if (curtime.tv_sec > abstime->tv_sec)
return true;
return curtime.tv_nsec >= abstime->tv_nsec;
};
while (!has_timed_out(abstime))
{
if (trylock(lock) == 0)
return 0;
sched_yield();
}
return ETIMEDOUT;
}
int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock)
{ {
unsigned expected = BAN::atomic_load(rwlock->lockers); return pthread_rwlock_timedrdlock(rwlock, nullptr);
for (;;)
{
if (expected == rwlock_writer_locked || BAN::atomic_load(rwlock->writers))
sched_yield();
else if (BAN::atomic_compare_exchange(rwlock->lockers, expected, expected + 1))
break;
}
return 0;
} }
int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock) int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock)
{ {
unsigned expected = BAN::atomic_load(rwlock->lockers); int ret = 0;
while (expected != rwlock_writer_locked && BAN::atomic_load(rwlock->writers) == 0) pthread_mutex_lock(&rwlock->lock);
if (BAN::atomic_compare_exchange(rwlock->lockers, expected, expected + 1)) if (!rwlock->writers_waiting && !rwlock->writer_active)
return 0; rwlock->readers_active++;
return EBUSY; else
ret = EBUSY;
pthread_mutex_unlock(&rwlock->lock);
return ret;
} }
int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime) int pthread_rwlock_timedrdlock(pthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime)
{ {
return pthread_rwlock_timedlock(rwlock, abstime, &pthread_rwlock_tryrdlock); int ret = 0;
pthread_mutex_lock(&rwlock->lock);
while (ret == 0 && (rwlock->writers_waiting || rwlock->writer_active))
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abstime);
if (ret == 0)
rwlock->readers_active++;
pthread_mutex_unlock(&rwlock->lock);
return ret;
} }
int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock)
{ {
BAN::atomic_add_fetch(rwlock->writers, 1); return pthread_rwlock_timedwrlock(rwlock, nullptr);
unsigned expected = 0;
while (!BAN::atomic_compare_exchange(rwlock->lockers, expected, rwlock_writer_locked))
{
sched_yield();
expected = 0;
}
BAN::atomic_sub_fetch(rwlock->writers, 1);
return 0;
} }
int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock) int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock)
{ {
unsigned expected = 0; int ret = 0;
if (!BAN::atomic_compare_exchange(rwlock->lockers, expected, rwlock_writer_locked)) pthread_mutex_lock(&rwlock->lock);
return EBUSY; if (!rwlock->readers_active && !rwlock->writer_active)
return 0; rwlock->writer_active = 1;
else
ret = EBUSY;
pthread_mutex_unlock(&rwlock->lock);
return ret;
} }
int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime) int pthread_rwlock_timedwrlock(pthread_rwlock_t* __restrict rwlock, const struct timespec* __restrict abstime)
{ {
return pthread_rwlock_timedlock(rwlock, abstime, &pthread_rwlock_trywrlock); int ret = 0;
pthread_mutex_lock(&rwlock->lock);
rwlock->writers_waiting++;
while (ret == 0 && (rwlock->readers_active || rwlock->writer_active))
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abstime);
rwlock->writers_waiting--;
if (ret == 0)
rwlock->writer_active = 1;
pthread_mutex_unlock(&rwlock->lock);
return ret;
} }
int pthread_rwlock_unlock(pthread_rwlock_t* rwlock) int pthread_rwlock_unlock(pthread_rwlock_t* rwlock)
{ {
if (BAN::atomic_load(rwlock->lockers) == rwlock_writer_locked) pthread_mutex_lock(&rwlock->lock);
BAN::atomic_store(rwlock->lockers, 0); if (rwlock->writer_active)
{
rwlock->writer_active = 0;
pthread_cond_broadcast(&rwlock->cond);
}
else else
BAN::atomic_sub_fetch(rwlock->lockers, 1); {
rwlock->readers_active--;
if (rwlock->readers_active == 0)
pthread_cond_broadcast(&rwlock->cond);
}
pthread_mutex_unlock(&rwlock->lock);
return 0; return 0;
} }