LibC: Implement pthread_spin_* functions

This commit is contained in:
Bananymous 2025-04-02 12:49:20 +03:00
parent 28392050bf
commit f32f62dfc1
2 changed files with 68 additions and 6 deletions

View File

@ -70,16 +70,16 @@ __BEGIN_DECLS
#endif
#undef __need_pthread_rwlockattr_t
#if !defined(__pthread_spinlock_t_defined) && (defined(__need_all_types) || defined(__need_pthread_spinlock_t))
#define __pthread_spinlock_t_defined 1
typedef int pthread_spinlock_t;
#endif
#undef __need_pthread_spinlock_t
#if !defined(__pthread_t_defined) && (defined(__need_all_types) || defined(__need_pthread_t))
#define __pthread_t_defined 1
typedef pid_t pthread_t;
#endif
#undef __need_pthread_t
#if !defined(__pthread_spinlock_t_defined) && (defined(__need_all_types) || defined(__need_pthread_spinlock_t))
#define __pthread_spinlock_t_defined 1
typedef pthread_t pthread_spinlock_t;
#endif
#undef __need_pthread_spinlock_t
__END_DECLS

View File

@ -1,5 +1,8 @@
#include <BAN/Assert.h>
#include <BAN/Atomic.h>
#include <BAN/PlacementNew.h>
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
@ -52,3 +55,62 @@ pthread_t pthread_self(void)
{
return syscall(SYS_PTHREAD_SELF);
}
static inline BAN::Atomic<pthread_t>& pthread_spin_get_atomic(pthread_spinlock_t* lock)
{
static_assert(sizeof(pthread_spinlock_t) <= sizeof(BAN::Atomic<pthread_t>));
static_assert(alignof(pthread_spinlock_t) <= alignof(BAN::Atomic<pthread_t>));
return *reinterpret_cast<BAN::Atomic<pthread_t>*>(lock);
}
int pthread_spin_destroy(pthread_spinlock_t* lock)
{
pthread_spin_get_atomic(lock).~Atomic<pthread_t>();
return 0;
}
int pthread_spin_init(pthread_spinlock_t* lock, int pshared)
{
(void)pshared;
new (lock) BAN::Atomic<pthread_t>();
pthread_spin_get_atomic(lock) = false;
return 0;
}
int pthread_spin_lock(pthread_spinlock_t* lock)
{
auto& atomic = pthread_spin_get_atomic(lock);
const pthread_t tid = pthread_self();
ASSERT(atomic.load(BAN::MemoryOrder::memory_order_relaxed) != tid);
pthread_t expected = 0;
while (!atomic.compare_exchange(expected, tid, BAN::MemoryOrder::memory_order_acquire))
{
sched_yield();
expected = 0;
}
return 0;
}
int pthread_spin_trylock(pthread_spinlock_t* lock)
{
auto& atomic = pthread_spin_get_atomic(lock);
const pthread_t tid = pthread_self();
ASSERT(atomic.load(BAN::MemoryOrder::memory_order_relaxed) != tid);
pthread_t expected = 0;
if (atomic.compare_exchange(expected, tid, BAN::MemoryOrder::memory_order_acquire))
return 0;
return EBUSY;
}
int pthread_spin_unlock(pthread_spinlock_t* lock)
{
auto& atomic = pthread_spin_get_atomic(lock);
ASSERT(atomic.load(BAN::MemoryOrder::memory_order_relaxed) == pthread_self());
atomic.store(0, BAN::MemoryOrder::memory_order_release);
return 0;
}