diff --git a/BAN/include/BAN/Atomic.h b/BAN/include/BAN/Atomic.h index 18b7ac87..c2fe88e0 100644 --- a/BAN/include/BAN/Atomic.h +++ b/BAN/include/BAN/Atomic.h @@ -45,9 +45,23 @@ namespace BAN inline T operator--(int) volatile { return __atomic_fetch_sub(&m_value, 1, MEM_ORDER); } inline T operator++(int) volatile { return __atomic_fetch_add(&m_value, 1, MEM_ORDER); } - inline bool compare_exchange(T expected, T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_compare_exchange_n(&m_value, &expected, desired, false, mem_order, mem_order); } + inline bool compare_exchange(T& expected, T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_compare_exchange_n(&m_value, &expected, desired, false, mem_order, mem_order); } inline T exchange(T desired, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_exchange_n(&m_value, desired, mem_order); }; + inline T add_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_add_fetch (&m_value, val, mem_order); } + inline T sub_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_sub_fetch (&m_value, val, mem_order); } + inline T and_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_and_fetch (&m_value, val, mem_order); } + inline T xor_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_xor_fetch (&m_value, val, mem_order); } + inline T or_fetch (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_or_fetch (&m_value, val, mem_order); } + inline T nand_fetch(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_nand_fetch(&m_value, val, mem_order); } + + inline T fetch_add (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_add (&m_value, val, mem_order); } + inline T fetch_sub (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_sub (&m_value, val, mem_order); } + inline T fetch_and (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_and (&m_value, val, mem_order); } + inline T fetch_xor (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch_xor (&m_value, val, mem_order); } + inline T fetch_or (T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_fetch__or (&m_value, val, mem_order); } + inline T fetch_nand(T val, MemoryOrder mem_order = MEM_ORDER) volatile { return __atomic_nfetch_and(&m_value, val, mem_order); } + private: T m_value; }; diff --git a/kernel/include/kernel/Lock/Mutex.h b/kernel/include/kernel/Lock/Mutex.h index 431bafbb..691e2d01 100644 --- a/kernel/include/kernel/Lock/Mutex.h +++ b/kernel/include/kernel/Lock/Mutex.h @@ -24,8 +24,12 @@ namespace Kernel ASSERT(m_lock_depth > 0); else { - while (!m_locker.compare_exchange(-1, tid)) + pid_t expected = -1; + while (!m_locker.compare_exchange(expected, tid)) + { Scheduler::get().yield(); + expected = -1; + } ASSERT(m_lock_depth == 0); if (Scheduler::current_tid()) Thread::current().add_mutex(); @@ -40,7 +44,8 @@ namespace Kernel ASSERT(m_lock_depth > 0); else { - if (!m_locker.compare_exchange(-1, tid)) + pid_t expected = -1; + if (!m_locker.compare_exchange(expected, tid)) return false; ASSERT(m_lock_depth == 0); if (Scheduler::current_tid()) @@ -89,8 +94,12 @@ namespace Kernel bool has_priority = tid ? !Thread::current().is_userspace() : true; if (has_priority) m_queue_length++; - while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid)) + pid_t expected = -1; + while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(expected, tid)) + { Scheduler::get().yield(); + expected = -1; + } ASSERT(m_lock_depth == 0); if (Scheduler::current_tid()) Thread::current().add_mutex(); @@ -106,7 +115,8 @@ namespace Kernel else { bool has_priority = tid ? !Thread::current().is_userspace() : true; - if (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid)) + pid_t expected = -1; + if (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(expected, tid)) return false; if (has_priority) m_queue_length++; diff --git a/kernel/include/kernel/Lock/SpinLock.h b/kernel/include/kernel/Lock/SpinLock.h index 68310320..563bf8bc 100644 --- a/kernel/include/kernel/Lock/SpinLock.h +++ b/kernel/include/kernel/Lock/SpinLock.h @@ -26,8 +26,12 @@ namespace Kernel auto id = Processor::current_id(); ASSERT(m_locker != id); - while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire)) + ProcessorID expected = PROCESSOR_NONE; + while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire)) + { __builtin_ia32_pause(); + expected = PROCESSOR_NONE; + } return state; } @@ -67,8 +71,12 @@ namespace Kernel ASSERT(m_lock_depth > 0); else { - while (!m_locker.compare_exchange(PROCESSOR_NONE, id, BAN::MemoryOrder::memory_order_acquire)) + ProcessorID expected = PROCESSOR_NONE; + while (!m_locker.compare_exchange(expected, id, BAN::MemoryOrder::memory_order_acquire)) + { __builtin_ia32_pause(); + expected = PROCESSOR_NONE; + } ASSERT(m_lock_depth == 0); } diff --git a/kernel/kernel/Networking/UNIX/Socket.cpp b/kernel/kernel/Networking/UNIX/Socket.cpp index 37dea188..7258f505 100644 --- a/kernel/kernel/Networking/UNIX/Socket.cpp +++ b/kernel/kernel/Networking/UNIX/Socket.cpp @@ -346,7 +346,8 @@ namespace Kernel if (m_info.has()) { auto& connection_info = m_info.get(); - if (connection_info.target_closed.compare_exchange(true, false)) + bool expected = true; + if (connection_info.target_closed.compare_exchange(expected, false)) return 0; if (!connection_info.connection) return BAN::Error::from_errno(ENOTCONN);