BAN: Add more APIs for Atomic and make compare_exchage take a reference

This commit is contained in:
Bananymous 2024-06-28 21:47:47 +03:00
parent 0c645ba867
commit 48a76426e7
4 changed files with 41 additions and 8 deletions

View File

@ -45,9 +45,23 @@ namespace BAN
inline T operator--(int) volatile { return __atomic_fetch_sub(&m_value, 1, MEM_ORDER); }
inline T operator++(int) volatile { return __atomic_fetch_add(&m_value, 1, MEM_ORDER); }
inline bool compare_exchange(T expected, T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_compare_exchange_n(&m_value, &expected, desired, false, mem_order, mem_order); }
inline bool compare_exchange(T& expected, T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_compare_exchange_n(&m_value, &expected, desired, false, mem_order, mem_order); }
inline T exchange(T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_exchange_n(&m_value, desired, mem_order); };
inline T add_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_add_fetch (&m_value, val, mem_order); }
inline T sub_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_sub_fetch (&m_value, val, mem_order); }
inline T and_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_and_fetch (&m_value, val, mem_order); }
inline T xor_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_xor_fetch (&m_value, val, mem_order); }
inline T or_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_or_fetch (&m_value, val, mem_order); }
inline T nand_fetch(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_nand_fetch(&m_value, val, mem_order); }
inline T fetch_add (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_add (&m_value, val, mem_order); }
inline T fetch_sub (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_sub (&m_value, val, mem_order); }
inline T fetch_and (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_and (&m_value, val, mem_order); }
inline T fetch_xor (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_xor (&m_value, val, mem_order); }
inline T fetch_or (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch__or (&m_value, val, mem_order); }
inline T fetch_nand(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_nfetch_and(&m_value, val, mem_order); }
private:
T m_value;
};

View File

@ -24,8 +24,12 @@ namespace Kernel
ASSERT(m_lock_depth > 0);
else
{
while (!m_locker.compare_exchange(-1, tid))
pid_t expected = -1;
while (!m_locker.compare_exchange(expected, tid))
{
Scheduler::get().yield();
expected = -1;
}
ASSERT(m_lock_depth == 0);
if (Scheduler::current_tid())
Thread::current().add_mutex();
@ -40,7 +44,8 @@ namespace Kernel
ASSERT(m_lock_depth > 0);
else
{
if (!m_locker.compare_exchange(-1, tid))
pid_t expected = -1;
if (!m_locker.compare_exchange(expected, tid))
return false;
ASSERT(m_lock_depth == 0);
if (Scheduler::current_tid())
@ -89,8 +94,12 @@ namespace Kernel
bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_length++;
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
pid_t expected = -1;
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(expected, tid))
{
Scheduler::get().yield();
expected = -1;
}
ASSERT(m_lock_depth == 0);
if (Scheduler::current_tid())
Thread::current().add_mutex();
@ -106,7 +115,8 @@ namespace Kernel
else
{
bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
pid_t expected = -1;
if (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(expected, tid))
return false;
if (has_priority)
m_queue_length++;

View File

@ -26,8 +26,12 @@ namespace Kernel
auto id = Processor::current_id();
ASSERT(m_locker != id);
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
ProcessorID expected = PROCESSOR_NONE;
while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire))
{
__builtin_ia32_pause();
expected = PROCESSOR_NONE;
}
return state;
}
@ -67,8 +71,12 @@ namespace Kernel
ASSERT(m_lock_depth > 0);
else
{
while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire))
ProcessorID expected = PROCESSOR_NONE;
while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire))
{
__builtin_ia32_pause();
expected = PROCESSOR_NONE;
}
ASSERT(m_lock_depth == 0);
}

View File

@ -346,7 +346,8 @@ namespace Kernel
if (m_info.has<ConnectionInfo>())
{
auto& connection_info = m_info.get<ConnectionInfo>();
if (connection_info.target_closed.compare_exchange(true, false))
bool expected = true;
if (connection_info.target_closed.compare_exchange(expected, false))
return 0;
if (!connection_info.connection)
return BAN::Error::from_errno(ENOTCONN);