Compare commits

..

No commits in common. "21f05eb11808a291375518af6024ae4efd743fdb" and "264eff3ad07d6e68d0142fa10914529425e9be6b" have entirely different histories.

72 changed files with 667 additions and 605 deletions

View File

@ -52,14 +52,11 @@ namespace BAN
ErrorOr<void> reserve(size_type);
void remove(const Key&);
void remove(iterator it);
void clear();
T& operator[](const Key&);
const T& operator[](const Key&) const;
iterator find(const Key& key);
const_iterator find(const Key& key) const;
bool contains(const Key&) const;
bool empty() const;
@ -69,8 +66,6 @@ namespace BAN
ErrorOr<void> rebucket(size_type);
LinkedList<Entry>& get_bucket(const Key&);
const LinkedList<Entry>& get_bucket(const Key&) const;
Vector<LinkedList<Entry>>::iterator get_bucket_iterator(const Key&);
Vector<LinkedList<Entry>>::const_iterator get_bucket_iterator(const Key&) const;
private:
Vector<LinkedList<Entry>> m_buckets;
@ -150,16 +145,17 @@ namespace BAN
template<typename Key, typename T, typename HASH>
void HashMap<Key, T, HASH>::remove(const Key& key)
{
auto it = find(key);
if (it != end())
remove(it);
}
template<typename Key, typename T, typename HASH>
void HashMap<Key, T, HASH>::remove(iterator it)
{
it.outer_current()->remove(it.inner_current());
m_size--;
if (empty()) return;
auto& bucket = get_bucket(key);
for (auto it = bucket.begin(); it != bucket.end(); it++)
{
if (it->key == key)
{
bucket.remove(it);
m_size--;
return;
}
}
}
template<typename Key, typename T, typename HASH>
@ -191,34 +187,15 @@ namespace BAN
ASSERT(false);
}
template<typename Key, typename T, typename HASH>
typename HashMap<Key, T, HASH>::iterator HashMap<Key, T, HASH>::find(const Key& key)
{
if (empty())
return end();
auto bucket_it = get_bucket_iterator(key);
for (auto it = bucket_it->begin(); it != bucket_it->end(); it++)
if (it->key == key)
return iterator(m_buckets.end(), bucket_it, it);
return end();
}
template<typename Key, typename T, typename HASH>
typename HashMap<Key, T, HASH>::const_iterator HashMap<Key, T, HASH>::find(const Key& key) const
{
if (empty())
return end();
auto bucket_it = get_bucket_iterator(key);
for (auto it = bucket_it->begin(); it != bucket_it->end(); it++)
if (it->key == key)
return const_iterator(m_buckets.end(), bucket_it, it);
return end();
}
template<typename Key, typename T, typename HASH>
bool HashMap<Key, T, HASH>::contains(const Key& key) const
{
return find(key) != end();
if (empty()) return false;
const auto& bucket = get_bucket(key);
for (const Entry& entry : bucket)
if (entry.key == key)
return true;
return false;
}
template<typename Key, typename T, typename HASH>
@ -259,29 +236,17 @@ namespace BAN
template<typename Key, typename T, typename HASH>
LinkedList<typename HashMap<Key, T, HASH>::Entry>& HashMap<Key, T, HASH>::get_bucket(const Key& key)
{
return *get_bucket_iterator(key);
ASSERT(!m_buckets.empty());
auto index = HASH()(key) % m_buckets.size();
return m_buckets[index];
}
template<typename Key, typename T, typename HASH>
const LinkedList<typename HashMap<Key, T, HASH>::Entry>& HashMap<Key, T, HASH>::get_bucket(const Key& key) const
{
return *get_bucket_iterator(key);
}
template<typename Key, typename T, typename HASH>
Vector<LinkedList<typename HashMap<Key, T, HASH>::Entry>>::iterator HashMap<Key, T, HASH>::get_bucket_iterator(const Key& key)
{
ASSERT(!m_buckets.empty());
auto index = HASH()(key) % m_buckets.size();
return next(m_buckets.begin(), index);
}
template<typename Key, typename T, typename HASH>
Vector<LinkedList<typename HashMap<Key, T, HASH>::Entry>>::const_iterator HashMap<Key, T, HASH>::get_bucket_iterator(const Key& key) const
{
ASSERT(!m_buckets.empty());
auto index = HASH()(key) % m_buckets.size();
return next(m_buckets.begin(), index);
return m_buckets[index];
}
}

View File

@ -284,14 +284,6 @@ namespace BAN
}
}
IteratorDoubleGeneral(const OuterIterator& outer_end, const OuterIterator& outer_current, const InnerIterator& inner_current)
: m_outer_end(outer_end)
, m_outer_current(outer_current)
, m_inner_current(inner_current)
{
find_valid_or_end();
}
void find_valid_or_end()
{
while (m_inner_current == m_outer_current->end())
@ -303,9 +295,6 @@ namespace BAN
}
}
OuterIterator outer_current() { return m_outer_current; }
InnerIterator inner_current() { return m_inner_current; }
private:
OuterIterator m_outer_end;
OuterIterator m_outer_current;

View File

@ -25,7 +25,7 @@ namespace LibELF
BAN::Vector<uint8_t> buffer;
TRY(buffer.resize(inode->size()));
TRY(inode->read(0, buffer.data(), inode->size()));
TRY(inode->read(0, { buffer.data(), inode->size() }));
ELF* elf_ptr = new ELF(BAN::move(buffer));
if (elf_ptr == nullptr)

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Lock/LockGuard.h>
#include <LibELF/LoadableELF.h>
@ -282,6 +281,10 @@ namespace LibELF
elf->reserve_address_space();
ASSERT(&PageTable::current() == &m_page_table);
LockGuard _(m_page_table);
ASSERT(m_page_table.is_page_free(0));
for (const auto& program_header : m_program_headers)
{
switch (program_header.p_type)

View File

@ -42,7 +42,6 @@ set(KERNEL_SOURCES
kernel/Input/PS2/Mouse.cpp
kernel/InterruptController.cpp
kernel/kernel.cpp
kernel/Lock/SpinLock.cpp
kernel/Memory/DMARegion.cpp
kernel/Memory/FileBackedRegion.cpp
kernel/Memory/Heap.cpp
@ -69,6 +68,8 @@ set(KERNEL_SOURCES
kernel/Random.cpp
kernel/Scheduler.cpp
kernel/Semaphore.cpp
kernel/Lock/Mutex.cpp
kernel/Lock/SpinLock.cpp
kernel/SSP.cpp
kernel/Storage/ATA/AHCI/Controller.cpp
kernel/Storage/ATA/AHCI/Device.cpp

View File

@ -1,7 +1,7 @@
#include <kernel/Arch.h>
#include <kernel/CPUID.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Memory/PageTable.h>
@ -17,13 +17,13 @@ extern uint8_t g_userspace_end[];
namespace Kernel
{
SpinLock PageTable::s_fast_page_lock;
static PageTable* s_kernel = nullptr;
static PageTable* s_current = nullptr;
static bool s_has_nxe = false;
static bool s_has_pge = false;
SpinLock PageTable::s_fast_page_lock;
// PML4 entry for kernel memory
static paddr_t s_global_pml4e = 0;
@ -252,7 +252,7 @@ namespace Kernel
BAN::ErrorOr<PageTable*> PageTable::create_userspace()
{
SpinLockGuard _(s_kernel->m_lock);
LockGuard _(s_kernel->m_lock);
PageTable* page_table = new PageTable;
if (page_table == nullptr)
return BAN::Error::from_errno(ENOMEM);
@ -333,7 +333,7 @@ namespace Kernel
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
if (is_page_free(vaddr))
{
@ -355,7 +355,7 @@ namespace Kernel
vaddr_t s_page = vaddr / PAGE_SIZE;
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (vaddr_t page = s_page; page < e_page; page++)
unmap_page(page * PAGE_SIZE);
}
@ -394,7 +394,7 @@ namespace Kernel
// NOTE: we add present here, since it has to be available in higher level structures
flags_t uwr_flags = (flags & (Flags::UserSupervisor | Flags::ReadWrite)) | Flags::Present;
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
if ((pml4[pml4e] & uwr_flags) != uwr_flags)
@ -439,7 +439,7 @@ namespace Kernel
size_t page_count = range_page_count(vaddr, size);
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (size_t page = 0; page < page_count; page++)
map_page_at(paddr + page * PAGE_SIZE, vaddr + page * PAGE_SIZE, flags);
}
@ -456,7 +456,7 @@ namespace Kernel
uint64_t pde = (uc_vaddr >> 21) & 0x1FF;
uint64_t pte = (uc_vaddr >> 12) & 0x1FF;
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
uint64_t* pml4 = (uint64_t*)P2V(m_highest_paging_struct);
if (!(pml4[pml4e] & Flags::Present))
@ -490,7 +490,7 @@ namespace Kernel
bool PageTable::reserve_page(vaddr_t vaddr, bool only_free)
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
ASSERT(vaddr % PAGE_SIZE == 0);
if (only_free && !is_page_free(vaddr))
return false;
@ -504,7 +504,7 @@ namespace Kernel
bytes += PAGE_SIZE - rem;
ASSERT(vaddr % PAGE_SIZE == 0);
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
if (only_free && !is_range_free(vaddr, bytes))
return false;
for (size_t offset = 0; offset < bytes; offset += PAGE_SIZE)
@ -536,7 +536,7 @@ namespace Kernel
const uint16_t e_pde = (uc_vaddr_end >> 21) & 0x1FF;
const uint16_t e_pte = (uc_vaddr_end >> 12) & 0x1FF;
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
// Try to find free page that can be mapped without
// allocations (page table with unused entries)
@ -609,7 +609,7 @@ namespace Kernel
ASSERT(is_canonical(first_address));
ASSERT(is_canonical(last_address));
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (vaddr_t vaddr = first_address; vaddr < last_address;)
{
@ -650,7 +650,7 @@ namespace Kernel
vaddr_t s_page = vaddr / PAGE_SIZE;
vaddr_t e_page = BAN::Math::div_round_up<vaddr_t>(vaddr + size, PAGE_SIZE);
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (vaddr_t page = s_page; page < e_page; page++)
if (!is_page_free(page * PAGE_SIZE))
return false;
@ -673,7 +673,7 @@ namespace Kernel
void PageTable::debug_dump()
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
flags_t flags = 0;
vaddr_t start = 0;

View File

@ -1,4 +1,3 @@
#include <BAN/Atomic.h>
#include <kernel/Panic.h>
#define ATEXIT_MAX_FUNCS 128
@ -114,18 +113,18 @@ void __cxa_finalize(void *f)
namespace __cxxabiv1
{
using __guard = uint64_t;
/* The ABI requires a 64-bit type. */
__extension__ typedef int __guard __attribute__((mode(__DI__)));
int __cxa_guard_acquire (__guard* g)
{
auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
return atomic == 0;
return !*(int*)g;
}
void __cxa_guard_release (__guard* g)
{
auto& atomic = *reinterpret_cast<BAN::Atomic<__guard>*>(g);
atomic = 1;
*(int*)g = 1;
}
void __cxa_guard_abort (__guard*)

View File

@ -2,6 +2,7 @@
#include <BAN/Vector.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
namespace Kernel
@ -58,6 +59,7 @@ namespace Kernel
BAN::Vector<IOAPIC> m_io_apics;
uint8_t m_irq_overrides[0x100] {};
uint8_t m_reserved_gsis[0x100 / 8] {};
SpinLock m_lock;
};
}

View File

@ -1,30 +0,0 @@
#pragma once
#include <BAN/NoCopyMove.h>
#include <stddef.h>
namespace Kernel
{
class CriticalScope
{
BAN_NON_COPYABLE(CriticalScope);
BAN_NON_MOVABLE(CriticalScope);
public:
CriticalScope()
{
asm volatile("pushf; cli; pop %0" : "=r"(m_flags) :: "memory");
}
~CriticalScope()
{
asm volatile("push %0; popf" :: "rm"(m_flags) : "memory", "cc");
}
private:
size_t m_flags;
};
}

View File

@ -5,26 +5,29 @@
#define dprintln(...) \
do { \
Kernel::SpinLockGuard _(Debug::s_debug_lock); \
Debug::s_debug_lock.lock(); \
Debug::print_prefix(__FILE__, __LINE__); \
BAN::Formatter::print(Debug::putchar, __VA_ARGS__); \
BAN::Formatter::print(Debug::putchar, "\r\n"); \
Debug::s_debug_lock.unlock(); \
} while(false)
#define dwarnln(...) \
do { \
Kernel::SpinLockGuard _(Debug::s_debug_lock); \
Debug::s_debug_lock.lock(); \
BAN::Formatter::print(Debug::putchar, "\e[33m"); \
dprintln(__VA_ARGS__); \
BAN::Formatter::print(Debug::putchar, "\e[m"); \
Debug::s_debug_lock.unlock(); \
} while(false)
#define derrorln(...) \
do { \
Kernel::SpinLockGuard _(Debug::s_debug_lock); \
Debug::s_debug_lock.lock(); \
BAN::Formatter::print(Debug::putchar, "\e[31m"); \
dprintln(__VA_ARGS__); \
BAN::Formatter::print(Debug::putchar, "\e[m"); \
Debug::s_debug_lock.unlock(); \
} while(false)
#define dprintln_if(cond, ...) \

View File

@ -3,7 +3,6 @@
#include <BAN/Vector.h>
#include <kernel/Device/Device.h>
#include <kernel/FS/TmpFS/FileSystem.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Semaphore.h>
namespace Kernel

View File

@ -9,7 +9,6 @@
#include <kernel/API/DirectoryEntry.h>
#include <kernel/Credentials.h>
#include <kernel/Debug.h>
#include <kernel/Lock/Mutex.h>
#include <sys/socket.h>
@ -158,7 +157,7 @@ namespace Kernel
virtual BAN::ErrorOr<long> ioctl_impl(int request, void* arg) { return BAN::Error::from_errno(ENOTSUP); }
protected:
mutable PriorityMutex m_mutex;
mutable Mutex m_mutex;
private:
BAN::WeakPtr<SharedFileData> m_shared_region;

View File

@ -20,6 +20,8 @@ namespace Kernel::Input
KeyboardLayout();
private:
SpinLock m_lock;
BAN::Array<Key, 0xFF> m_keycode_to_key_normal;
BAN::Array<Key, 0xFF> m_keycode_to_key_shift;
BAN::Array<Key, 0xFF> m_keycode_to_key_altgr;

View File

@ -5,7 +5,6 @@
#include <kernel/Device/Device.h>
#include <kernel/Input/PS2/Config.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/Mutex.h>
namespace Kernel::Input
{
@ -61,6 +60,7 @@ namespace Kernel::Input
private:
BAN::RefPtr<PS2Device> m_devices[2];
Mutex m_mutex;
RecursiveSpinLock m_cmd_lock;
BAN::CircularQueue<Command, 128> m_command_queue;
uint64_t m_command_send_time { 0 };

View File

@ -45,6 +45,7 @@ namespace Kernel::Input
PS2Keymap m_keymap;
Semaphore m_semaphore;
SpinLock m_event_lock;
protected:
virtual BAN::ErrorOr<size_t> read_impl(off_t, BAN::ByteSpan) override;

View File

@ -38,6 +38,7 @@ namespace Kernel::Input
BAN::CircularQueue<MouseEvent, 128> m_event_queue;
SpinLock m_event_lock;
Semaphore m_semaphore;
protected:

View File

@ -51,11 +51,6 @@ namespace Kernel
bool m_using_apic { false };
};
inline bool interrupts_enabled()
{
uintptr_t flags;
asm volatile("pushf; pop %0" : "=r"(flags) :: "memory");
return flags & (1 << 9);
}
bool interrupts_enabled();
}

View File

@ -2,7 +2,6 @@
#include <BAN/Atomic.h>
#include <BAN/NoCopyMove.h>
#include <kernel/Scheduler.h>
#include <sys/types.h>
@ -17,49 +16,17 @@ namespace Kernel
public:
Mutex() = default;
void lock()
{
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0);
else
{
while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
}
bool try_lock()
{
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0);
else
{
if (!m_locker.compare_exchange(-1, tid))
return false;
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
}
void unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0);
if (--m_lock_depth == 0)
m_locker = -1;
}
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
};
class PriorityMutex
@ -70,62 +37,18 @@ namespace Kernel
public:
PriorityMutex() = default;
void lock()
{
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0);
else
{
bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_length++;
while (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
}
bool try_lock()
{
auto tid = Scheduler::current_tid();
if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0);
else
{
bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (!(has_priority || m_queue_length == 0) || !m_locker.compare_exchange(-1, tid))
return false;
if (has_priority)
m_queue_length++;
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
}
void unlock()
{
auto tid = Scheduler::current_tid();
ASSERT_EQ(m_locker.load(), tid);
ASSERT_GT(m_lock_depth, 0);
if (--m_lock_depth == 0)
{
bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_length--;
m_locker = -1;
}
}
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
BAN::Atomic<uint32_t> m_queue_length { 0 };
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
BAN::Atomic<uint32_t> m_queue_depth { 0 };
};
}

View File

@ -8,8 +8,6 @@
namespace Kernel
{
using InterruptState = bool;
class SpinLock
{
BAN_NON_COPYABLE(SpinLock);
@ -18,11 +16,17 @@ namespace Kernel
public:
SpinLock() = default;
InterruptState lock();
void unlock(InterruptState state);
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return is_locked(); }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uintptr_t m_flags { 0 };
};
class RecursiveSpinLock
@ -33,35 +37,18 @@ namespace Kernel
public:
RecursiveSpinLock() = default;
InterruptState lock();
void unlock(InterruptState state);
void lock();
bool try_lock();
void unlock();
pid_t locker() const { return m_locker; }
bool is_locked() const { return m_locker != -1; }
uint32_t lock_depth() const { return m_lock_depth; }
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
};
template<typename Lock>
class SpinLockGuard
{
BAN_NON_COPYABLE(SpinLockGuard);
BAN_NON_MOVABLE(SpinLockGuard);
public:
SpinLockGuard(Lock& lock)
: m_lock(lock)
{
m_state = m_lock.lock();
}
~SpinLockGuard()
{
m_lock.unlock(m_state);
}
private:
Lock& m_lock;
InterruptState m_state;
uintptr_t m_flags { 0 };
};
}

View File

@ -2,7 +2,6 @@
#include <BAN/Errors.h>
#include <BAN/Traits.h>
#include <kernel/CriticalScope.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/Types.h>
@ -43,27 +42,30 @@ namespace Kernel
static PageTable& kernel();
static PageTable& current();
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
public:
template<with_fast_page_callback F>
static void with_fast_page(paddr_t paddr, F callback)
{
SpinLockGuard _(s_fast_page_lock);
s_fast_page_lock.lock();
map_fast_page(paddr);
callback();
unmap_fast_page();
s_fast_page_lock.unlock();
}
template<with_fast_page_callback_error F>
static BAN::ErrorOr<void> with_fast_page(paddr_t paddr, F callback)
{
SpinLockGuard _(s_fast_page_lock);
s_fast_page_lock.lock();
map_fast_page(paddr);
auto ret = callback();
unmap_fast_page();
s_fast_page_lock.unlock();
return ret;
}
static constexpr vaddr_t fast_page() { return KERNEL_OFFSET; }
// FIXME: implement sized checks, return span, etc
static void* fast_page_as_ptr(size_t offset = 0)
{
@ -111,8 +113,8 @@ namespace Kernel
void load();
InterruptState lock() const { return m_lock.lock(); }
void unlock(InterruptState state) const { m_lock.unlock(state); }
void lock() const { m_lock.lock(); }
void unlock() const { m_lock.unlock(); }
void debug_dump();

View File

@ -18,6 +18,8 @@ namespace Kernel
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr(PageTable&, vaddr_t, size_t, PageTable::flags_t flags, bool preallocate_pages);
// Create virtual range to virtual address range
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_to_vaddr_range(PageTable&, vaddr_t vaddr_start, vaddr_t vaddr_end, size_t, PageTable::flags_t flags, bool preallocate_pages);
// Create virtual range in kernel memory with kmalloc
static BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> create_kmalloc(size_t);
~VirtualRange();
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> clone(PageTable&);
@ -33,13 +35,14 @@ namespace Kernel
void copy_from(size_t offset, const uint8_t* buffer, size_t bytes);
private:
VirtualRange(PageTable&, bool preallocated);
VirtualRange(PageTable&, bool preallocated, bool kmalloc);
void set_zero();
private:
PageTable& m_page_table;
const bool m_preallocated;
const bool m_kmalloc;
vaddr_t m_vaddr { 0 };
size_t m_size { 0 };
PageTable::flags_t m_flags { 0 };

View File

@ -51,6 +51,7 @@ namespace Kernel
};
private:
SpinLock m_pending_lock;
SpinLock m_table_lock;
BAN::HashMap<BAN::IPv4Address, BAN::MACAddress> m_arp_table;

View File

@ -67,6 +67,8 @@ namespace Kernel
bool m_has_eerprom { false };
private:
SpinLock m_lock;
BAN::UniqPtr<DMARegion> m_rx_buffer_region;
BAN::UniqPtr<DMARegion> m_tx_buffer_region;
BAN::UniqPtr<DMARegion> m_rx_descriptor_region;

View File

@ -67,7 +67,8 @@ namespace Kernel
};
private:
RecursiveSpinLock m_bound_socket_lock;
RecursiveSpinLock m_packet_lock;
RecursiveSpinLock m_socket_lock;
BAN::UniqPtr<ARPTable> m_arp_table;
Process* m_process { nullptr };

View File

@ -1,7 +1,6 @@
#pragma once
#include <BAN/Endianness.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Memory/VirtualRange.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkSocket.h>
@ -120,8 +119,7 @@ namespace Kernel
uint64_t m_time_wait_start_ms { 0 };
Mutex m_lock;
Semaphore m_semaphore;
Semaphore m_semaphore;
BAN::Atomic<bool> m_should_ack { false };

View File

@ -2,7 +2,6 @@
#include <BAN/CircularQueue.h>
#include <BAN/Endianness.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/VirtualRange.h>
#include <kernel/Networking/NetworkInterface.h>
#include <kernel/Networking/NetworkSocket.h>

View File

@ -4,7 +4,6 @@
#include <BAN/WeakPtr.h>
#include <kernel/FS/Socket.h>
#include <kernel/FS/TmpFS/Inode.h>
#include <kernel/Lock/SpinLock.h>
namespace Kernel
{
@ -66,7 +65,6 @@ namespace Kernel
BAN::CircularQueue<size_t, 128> m_packet_sizes;
size_t m_packet_size_total { 0 };
BAN::UniqPtr<VirtualRange> m_packet_buffer;
SpinLock m_packet_lock;
Semaphore m_packet_semaphore;
friend class BAN::RefPtr<UnixDomainSocket>;

View File

@ -20,8 +20,11 @@ namespace Kernel
private:
static PIC* create();
friend class InterruptController;
private:
SpinLock m_lock;
uint16_t m_reserved_irqs { 0 };
friend class InterruptController;
};
}

View File

@ -7,7 +7,6 @@
#include <BAN/Vector.h>
#include <kernel/Credentials.h>
#include <kernel/FS/Inode.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryRegion.h>
#include <kernel/OpenFileDescriptorSet.h>
@ -51,6 +50,9 @@ namespace Kernel
void register_to_scheduler();
void exit(int status, int signal);
static void for_each_process(const BAN::Function<BAN::Iteration(Process&)>& callback);
static void for_each_process_in_session(pid_t sid, const BAN::Function<BAN::Iteration(Process&)>& callback);
void add_thread(Thread*);
void on_thread_exit(Thread&);
@ -219,7 +221,8 @@ namespace Kernel
const pid_t m_pid;
const pid_t m_parent;
mutable Mutex m_process_lock;
mutable Mutex m_big_mutex;
SpinLock m_signal_lock;
BAN::String m_working_directory;
BAN::Vector<Thread*> m_threads;

View File

@ -7,6 +7,21 @@
namespace Kernel
{
class SchedulerLock
{
public:
void lock();
void unlock();
void unlock_all();
pid_t locker() const;
private:
BAN::Atomic<pid_t> m_locker { -1 };
uint32_t m_lock_depth { 0 };
friend class Scheduler;
};
class Scheduler
{
public:
@ -19,6 +34,8 @@ namespace Kernel
void reschedule();
void reschedule_if_idling();
void reschedule_current_no_save();
void set_current_thread_sleeping(uint64_t wake_time);
void block_current_thread(Semaphore*, uint64_t wake_time);
@ -29,8 +46,8 @@ namespace Kernel
Thread& current_thread();
static pid_t current_tid();
[[noreturn]] void execute_current_thread();
[[noreturn]] void _execute_current_thread();
BAN::ErrorOr<void> add_thread(Thread*);
[[noreturn]] void delete_current_process_and_thread();
private:
@ -43,7 +60,8 @@ namespace Kernel
void remove_and_advance_current_thread();
void advance_current_thread();
BAN::ErrorOr<void> add_thread(Thread*);
[[noreturn]] void execute_current_thread();
[[noreturn]] void _execute_current_thread();
private:
struct SchedulerThread
@ -57,13 +75,13 @@ namespace Kernel
Semaphore* semaphore;
};
SchedulerLock m_lock;
Thread* m_idle_thread { nullptr };
BAN::LinkedList<SchedulerThread> m_active_threads;
BAN::LinkedList<SchedulerThread> m_sleeping_threads;
BAN::LinkedList<SchedulerThread>::iterator m_current_thread;
friend class Process;
};
}

View File

@ -2,7 +2,6 @@
#include <BAN/Vector.h>
#include <kernel/Device/Device.h>
#include <kernel/Lock/Mutex.h>
#include <kernel/Storage/DiskCache.h>
#include <kernel/Storage/Partition.h>

View File

@ -59,6 +59,7 @@ namespace Kernel
bool initialize();
private:
SpinLock m_lock;
BAN::String m_name;
Serial m_serial;
BAN::CircularQueue<uint8_t, 128> m_input;

View File

@ -3,7 +3,6 @@
#include <BAN/Array.h>
#include <kernel/Device/Device.h>
#include <kernel/Input/KeyEvent.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Terminal/TerminalDriver.h>
#include <kernel/Terminal/termios.h>
#include <kernel/Semaphore.h>
@ -84,8 +83,6 @@ namespace Kernel
Semaphore semaphore;
};
Buffer m_output;
RecursiveSpinLock m_write_lock;
};
}

View File

@ -69,6 +69,8 @@ namespace Kernel
private:
BAN::String m_name;
RecursiveSpinLock m_write_lock;
State m_state { State::Normal };
AnsiState m_ansi_state { };
UTF8State m_utf8_state { };

View File

@ -113,6 +113,8 @@ namespace Kernel
Process* m_process { nullptr };
bool m_is_userspace { false };
mutable RecursiveSpinLock m_lock;
uintptr_t* m_return_rsp { nullptr };
uintptr_t* m_return_rip { nullptr };

View File

@ -30,6 +30,8 @@ namespace Kernel
uint64_t read_main_counter() const;
private:
mutable SpinLock m_lock;
bool m_is_64bit { false };
uint64_t m_last_ticks { 0 };

View File

@ -4,6 +4,7 @@
#include <kernel/CPUID.h>
#include <kernel/Debug.h>
#include <kernel/IDT.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/MMIO.h>
@ -223,7 +224,7 @@ namespace Kernel
void APIC::enable_irq(uint8_t irq)
{
CriticalScope _;
LockGuard _(m_lock);
uint32_t gsi = m_irq_overrides[irq];
@ -268,7 +269,7 @@ namespace Kernel
BAN::ErrorOr<void> APIC::reserve_irq(uint8_t irq)
{
CriticalScope _;
LockGuard _(m_lock);
uint32_t gsi = m_irq_overrides[irq];
@ -301,7 +302,7 @@ namespace Kernel
BAN::Optional<uint8_t> APIC::get_free_irq()
{
CriticalScope _;
LockGuard _(m_lock);
for (int irq = 0; irq <= 0xFF; irq++)
{
uint32_t gsi = m_irq_overrides[irq];

View File

@ -1,6 +1,5 @@
#include <kernel/Debug.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Memory/PageTable.h>
#include <kernel/Terminal/Serial.h>
#include <kernel/Terminal/TTY.h>

View File

@ -20,7 +20,7 @@ namespace Kernel
BAN::ErrorOr<size_t> DebugDevice::write_impl(off_t, BAN::ConstByteSpan buffer)
{
auto ms_since_boot = SystemTimer::get().ms_since_boot();
SpinLockGuard _(Debug::s_debug_lock);
Debug::s_debug_lock.lock();
BAN::Formatter::print(Debug::putchar, "[{5}.{3}] {}: ",
ms_since_boot / 1000,
ms_since_boot % 1000,
@ -28,6 +28,7 @@ namespace Kernel
);
for (size_t i = 0; i < buffer.size(); i++)
Debug::putchar(buffer[i]);
Debug::s_debug_lock.unlock();
return buffer.size();
}

View File

@ -56,9 +56,12 @@ namespace Kernel
sync_process->add_thread(MUST(Thread::create_kernel(
[](void*)
{
// NOTE: we lock the device lock here and unlock
// it only while semaphore is blocking
s_instance->m_device_lock.lock();
while (true)
{
LockGuard _(s_instance->m_device_lock);
while (!s_instance->m_should_sync)
{
LockFreeGuard _(s_instance->m_device_lock);

View File

@ -42,7 +42,6 @@ namespace Kernel
BAN::ErrorOr<size_t> Pipe::read_impl(off_t, BAN::ByteSpan buffer)
{
LockGuard _(m_mutex);
while (m_buffer.empty())
{
if (m_writing_count == 0)
@ -66,8 +65,6 @@ namespace Kernel
BAN::ErrorOr<size_t> Pipe::write_impl(off_t, BAN::ConstByteSpan buffer)
{
LockGuard _(m_mutex);
size_t old_size = m_buffer.size();
TRY(m_buffer.resize(old_size + buffer.size()));

View File

@ -1,6 +1,5 @@
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/ProcFS/Inode.h>
#include <kernel/Lock/LockGuard.h>
namespace Kernel
{

View File

@ -44,6 +44,8 @@ namespace Kernel
BAN::ErrorOr<void> VirtualFileSystem::mount(const Credentials& credentials, BAN::StringView block_device_path, BAN::StringView target)
{
LockGuard _(m_mutex);
auto block_device_file = TRY(file_from_absolute_path(credentials, block_device_path, true));
if (!block_device_file.inode->is_device())
return BAN::Error::from_errno(ENOTBLK);

View File

@ -1,7 +1,7 @@
#include <BAN/HashMap.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/VirtualFileSystem.h>
#include <kernel/Input/KeyboardLayout.h>
#include <kernel/Lock/LockGuard.h>
#include <ctype.h>
@ -74,6 +74,7 @@ namespace Kernel::Input
Key KeyboardLayout::get_key_from_event(KeyEvent event)
{
LockGuard _(m_lock);
if (event.shift())
return m_keycode_to_key_shift[event.keycode];
if (event.ralt())
@ -256,7 +257,7 @@ namespace Kernel::Input
}
}
CriticalScope _;
LockGuard _(m_lock);
for (size_t i = 0; i < new_layout->m_keycode_to_key_normal.size(); i++)
if (new_layout->m_keycode_to_key_normal[i] != Key::None)

View File

@ -101,8 +101,7 @@ namespace Kernel::Input
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t response_size)
{
// NOTE: command queue push/pop must be done without interrupts
CriticalScope _;
LockGuard _(m_cmd_lock);
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
{
dprintln("PS/2 command queue full");
@ -121,8 +120,7 @@ namespace Kernel::Input
bool PS2Controller::append_command_queue(PS2Device* device, uint8_t command, uint8_t data, uint8_t response_size)
{
// NOTE: command queue push/pop must be done without interrupts
CriticalScope _;
LockGuard _(m_cmd_lock);
if (m_command_queue.size() + 1 >= m_command_queue.capacity())
{
dprintln("PS/2 command queue full");
@ -143,6 +141,9 @@ namespace Kernel::Input
{
ASSERT(interrupts_enabled());
// NOTE: CircularQueue reads don't need locking, as long as
// we can guarantee that read element is not popped
if (m_command_queue.empty())
return;
auto& command = m_command_queue.front();
@ -152,6 +153,8 @@ namespace Kernel::Input
{
dwarnln_if(DEBUG_PS2, "Command timedout");
m_devices[command.device_index]->command_timedout(command.out_data, command.out_count);
LockGuard _(m_cmd_lock);
m_command_queue.pop();
}
return;

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/Input/KeyboardLayout.h>
#include <kernel/Input/PS2/Config.h>
@ -165,6 +164,7 @@ namespace Kernel::Input
event.modifier = m_modifiers | (released ? 0 : KeyEvent::Modifier::Pressed);
event.keycode = keycode.value();
LockGuard _(m_event_lock);
if (m_event_queue.full())
{
dwarnln("PS/2 event queue full");
@ -197,7 +197,7 @@ namespace Kernel::Input
if (m_event_queue.empty())
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
CriticalScope _;
LockGuard _(m_event_lock);
if (m_event_queue.empty())
continue;

View File

@ -1,5 +1,4 @@
#include <BAN/ScopeGuard.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/Input/PS2/Config.h>
#include <kernel/Input/PS2/Mouse.h>
@ -158,6 +157,7 @@ namespace Kernel::Input
event.scroll_event.scroll = rel_z;
}
LockGuard _(m_event_lock);
for (int i = 0; i < event_count; i++)
{
if (m_event_queue.full())
@ -181,7 +181,7 @@ namespace Kernel::Input
if (m_event_queue.empty())
TRY(Thread::current().block_or_eintr_indefinite(m_semaphore));
CriticalScope _;
LockGuard _(m_event_lock);
if (m_event_queue.empty())
continue;

View File

@ -67,4 +67,11 @@ namespace Kernel
dwarnln("could not enter acpi mode");
}
bool interrupts_enabled()
{
uintptr_t flags;
asm volatile("pushf; pop %0" : "=r"(flags) :: "memory");
return flags & (1 << 9);
}
}

View File

@ -0,0 +1,68 @@
#include <kernel/Lock/Mutex.h>
#include <kernel/Scheduler.h>
namespace Kernel
{
void Mutex::lock()
{
auto tid = Scheduler::current_tid();
if (tid != m_locker)
while (!m_locker.compare_exchange(-1, tid))
Scheduler::get().reschedule();
m_lock_depth++;
}
bool Mutex::try_lock()
{
auto tid = Scheduler::current_tid();
if (tid != m_locker)
if (!m_locker.compare_exchange(-1, tid))
return false;
m_lock_depth++;
return true;
}
void Mutex::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
if (--m_lock_depth == 0)
m_locker = -1;
}
void PriorityMutex::lock()
{
const auto tid = Scheduler::current_tid();
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_depth++;
if (tid != m_locker)
while ((!has_priority && m_queue_depth > 0) || !m_locker.compare_exchange(-1, tid))
asm volatile("pause");
m_lock_depth++;
}
bool PriorityMutex::try_lock()
{
const auto tid = Scheduler::current_tid();
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (tid != m_locker)
while ((!has_priority && m_queue_depth > 0) || !m_locker.compare_exchange(-1, tid))
return false;
if (has_priority)
m_queue_depth++;
m_lock_depth++;
return true;
}
void PriorityMutex::unlock()
{
const auto tid = Scheduler::current_tid();
const bool has_priority = tid ? !Thread::current().is_userspace() : true;
if (has_priority)
m_queue_depth--;
if (--m_lock_depth)
m_locker = -1;
}
}

View File

@ -1,64 +1,80 @@
#include <kernel/InterruptController.h>
#include <kernel/Lock/SpinLock.h>
#include <kernel/Scheduler.h>
// FIXME: try to move these to header
namespace Kernel
{
InterruptState SpinLock::lock()
static inline uintptr_t get_flags_and_disable_interrupts()
{
auto tid = Scheduler::current_tid();
ASSERT_NEQ(m_locker.load(), tid);
InterruptState state = interrupts_enabled();
DISABLE_INTERRUPTS();
if (!m_locker.compare_exchange(-1, tid))
ASSERT_NOT_REACHED();
return state;
uintptr_t flags;
asm volatile("pushf; cli; pop %0" : "=r"(flags) :: "memory");
return flags;
}
void SpinLock::unlock(InterruptState state)
static inline void restore_flags(uintptr_t flags)
{
asm volatile("push %0; popf" :: "rm"(flags) : "memory", "cc");
}
void SpinLock::lock()
{
const auto tid = Scheduler::current_tid();
ASSERT_NEQ(m_locker.load(), tid);
while (!m_locker.compare_exchange(-1, tid))
__builtin_ia32_pause();
m_flags = get_flags_and_disable_interrupts();
}
bool SpinLock::try_lock()
{
const auto tid = Scheduler::current_tid();
ASSERT_NEQ(m_locker.load(), tid);
if (!m_locker.compare_exchange(-1, tid))
return false;
m_flags = get_flags_and_disable_interrupts();
return true;
}
void SpinLock::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
m_locker.store(-1);
if (state)
ENABLE_INTERRUPTS();
restore_flags(m_flags);
m_locker = -1;
}
InterruptState RecursiveSpinLock::lock()
void RecursiveSpinLock::lock()
{
auto tid = Scheduler::current_tid();
if (m_locker != tid)
{
while (!m_locker.compare_exchange(-1, tid))
__builtin_ia32_pause();
m_flags = get_flags_and_disable_interrupts();
}
m_lock_depth++;
}
InterruptState state = interrupts_enabled();
DISABLE_INTERRUPTS();
if (tid == m_locker)
ASSERT_GT(m_lock_depth, 0);
else
bool RecursiveSpinLock::try_lock()
{
auto tid = Scheduler::current_tid();
if (m_locker != tid)
{
if (!m_locker.compare_exchange(-1, tid))
ASSERT_NOT_REACHED();
ASSERT_EQ(m_lock_depth, 0);
return false;
m_flags = get_flags_and_disable_interrupts();
}
m_lock_depth++;
return state;
return true;
}
void RecursiveSpinLock::unlock(InterruptState state)
void RecursiveSpinLock::unlock()
{
auto tid = Scheduler::current_tid();
ASSERT_EQ(m_locker.load(), tid);
ASSERT_GT(m_lock_depth, 0);
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
if (--m_lock_depth == 0)
{
restore_flags(m_flags);
m_locker = -1;
if (state)
ENABLE_INTERRUPTS();
}
}
}

BIN
kernel/kernel/Lock/spin.o Normal file

Binary file not shown.

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/FileBackedRegion.h>
#include <kernel/Memory/Heap.h>
@ -83,12 +82,9 @@ namespace Kernel
if (pages[page_index] == 0)
return;
{
CriticalScope _;
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
}
PageTable::with_fast_page(pages[page_index], [&] {
memcpy(page_buffer, PageTable::fast_page_as_ptr(), PAGE_SIZE);
});
if (auto ret = inode->write(page_index * PAGE_SIZE, BAN::ConstByteSpan::from(page_buffer)); ret.is_error())
dwarnln("{}", ret.error());

View File

@ -1,4 +1,5 @@
#include <kernel/BootInfo.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>
@ -66,7 +67,7 @@ namespace Kernel
paddr_t Heap::take_free_page()
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.free_pages() >= 1)
return range.reserve_page();
@ -75,7 +76,7 @@ namespace Kernel
void Heap::release_page(paddr_t paddr)
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.contains(paddr))
return range.release_page(paddr);
@ -84,7 +85,7 @@ namespace Kernel
paddr_t Heap::take_free_contiguous_pages(size_t pages)
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.free_pages() >= pages)
if (paddr_t paddr = range.reserve_contiguous_pages(pages))
@ -94,7 +95,7 @@ namespace Kernel
void Heap::release_contiguous_pages(paddr_t paddr, size_t pages)
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
for (auto& range : m_physical_ranges)
if (range.contains(paddr))
return range.release_contiguous_pages(paddr, pages);
@ -103,7 +104,7 @@ namespace Kernel
size_t Heap::used_pages() const
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
size_t result = 0;
for (const auto& range : m_physical_ranges)
result += range.used_pages();
@ -112,7 +113,7 @@ namespace Kernel
size_t Heap::free_pages() const
{
SpinLockGuard _(m_lock);
LockGuard _(m_lock);
size_t result = 0;
for (const auto& range : m_physical_ranges)
result += range.free_pages();

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/MemoryBackedRegion.h>
@ -57,9 +56,10 @@ namespace Kernel
m_page_table.map_page_at(paddr, vaddr, m_flags);
// Zero out the new page
PageTable::with_fast_page(paddr, [&] {
memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE);
});
if (&PageTable::current() == &m_page_table)
memset((void*)vaddr, 0x00, PAGE_SIZE);
else
PageTable::with_fast_page(paddr, [] { memset(PageTable::fast_page_as_ptr(), 0x00, PAGE_SIZE); });
return true;
}
@ -94,9 +94,13 @@ namespace Kernel
TRY(allocate_page_containing(write_vaddr));
PageTable::with_fast_page(m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK), [&] {
memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes);
});
if (&PageTable::current() == &m_page_table)
memcpy((void*)write_vaddr, (void*)(buffer + written), bytes);
else
PageTable::with_fast_page(
m_page_table.physical_address_of(write_vaddr & PAGE_ADDR_MASK),
[&] { memcpy(PageTable::fast_page_as_ptr(page_offset), (void*)(buffer + written), bytes); }
);
written += bytes;
}

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/VirtualRange.h>
@ -12,7 +11,7 @@ namespace Kernel
ASSERT(vaddr % PAGE_SIZE == 0);
ASSERT(vaddr > 0);
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages);
VirtualRange* result_ptr = new VirtualRange(page_table, preallocate_pages, false);
if (result_ptr == nullptr)
return BAN::Error::from_errno(ENOMEM);
@ -68,14 +67,31 @@ namespace Kernel
}
ASSERT(vaddr + size <= vaddr_end);
SpinLockGuard _(page_table);
LockGuard _(page_table);
page_table.unmap_range(vaddr, size); // We have to unmap here to allow reservation in create_to_vaddr()
return create_to_vaddr(page_table, vaddr, size, flags, preallocate_pages);
}
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated)
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::create_kmalloc(size_t size)
{
auto* result_ptr = new VirtualRange(PageTable::kernel(), false, true);
if (!result_ptr)
return BAN::Error::from_errno(ENOMEM);
auto result = BAN::UniqPtr<VirtualRange>::adopt(result_ptr);
result->m_size = size;
result->m_flags = PageTable::Flags::ReadWrite | PageTable::Flags::Present;
result->m_vaddr = (vaddr_t)kmalloc(size);
if (result->m_vaddr == 0)
return BAN::Error::from_errno(ENOMEM);
result->set_zero();
return result;
}
VirtualRange::VirtualRange(PageTable& page_table, bool preallocated, bool kmalloc)
: m_page_table(page_table)
, m_preallocated(preallocated)
, m_kmalloc(kmalloc)
{ }
VirtualRange::~VirtualRange()
@ -83,13 +99,18 @@ namespace Kernel
if (m_vaddr == 0)
return;
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
if (m_kmalloc)
kfree((void*)m_vaddr);
else
{
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
if (paddr)
Heap::get().release_page(paddr);
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
paddr_t paddr = m_page_table.physical_address_of(vaddr() + offset);
if (paddr)
Heap::get().release_page(paddr);
}
m_page_table.unmap_range(vaddr(), size());
}
m_page_table.unmap_range(vaddr(), size());
}
BAN::ErrorOr<BAN::UniqPtr<VirtualRange>> VirtualRange::clone(PageTable& page_table)
@ -99,7 +120,7 @@ namespace Kernel
auto result = TRY(create_to_vaddr(page_table, vaddr(), size(), flags(), m_preallocated));
SpinLockGuard _(m_page_table);
LockGuard _(m_page_table);
for (size_t offset = 0; offset < size(); offset += PAGE_SIZE)
{
if (!m_preallocated && m_page_table.physical_address_of(vaddr() + offset))
@ -120,6 +141,7 @@ namespace Kernel
BAN::ErrorOr<void> VirtualRange::allocate_page_for_demand_paging(vaddr_t address)
{
ASSERT(!m_kmalloc);
ASSERT(!m_preallocated);
ASSERT(contains(address));
ASSERT(&PageTable::current() == &m_page_table);
@ -139,7 +161,9 @@ namespace Kernel
void VirtualRange::set_zero()
{
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
PageTable& page_table = PageTable::current();
if (m_kmalloc || &page_table == &m_page_table)
{
memset((void*)vaddr(), 0, size());
return;
@ -163,7 +187,7 @@ namespace Kernel
ASSERT_LTE(offset, size());
ASSERT_LTE(offset, size() - bytes);
if (&PageTable::current() == &m_page_table || &PageTable::kernel() == &m_page_table)
if (m_kmalloc || &PageTable::current() == &m_page_table)
{
memcpy((void*)(vaddr() + offset), buffer, bytes);
return;

View File

@ -1,10 +1,13 @@
#include <BAN/Errors.h>
#include <kernel/CriticalScope.h>
#include <kernel/kprint.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Thread.h>
using Kernel::LockGuard;
using Kernel::SpinLock;
#define MB (1 << 20)
extern uint8_t g_kernel_end[];
@ -81,6 +84,8 @@ struct kmalloc_info
};
static kmalloc_info s_kmalloc_info;
static SpinLock s_kmalloc_lock;
template<size_t SIZE>
struct kmalloc_fixed_node
{
@ -144,6 +149,8 @@ void kmalloc_initialize()
void kmalloc_dump_info()
{
LockGuard _(s_kmalloc_lock);
kprintln("kmalloc: 0x{8H}->0x{8H}", s_kmalloc_info.base, s_kmalloc_info.end);
kprintln(" used: 0x{8H}", s_kmalloc_info.used);
kprintln(" free: 0x{8H}", s_kmalloc_info.free);
@ -155,14 +162,18 @@ void kmalloc_dump_info()
static bool is_corrupted()
{
LockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
auto* temp = info.first();
for (; temp->end() <= info.end; temp = temp->after());
for (; temp->end() <= info.end; temp = temp->after())
continue;
return (uintptr_t)temp != info.end;
}
[[maybe_unused]] static void debug_dump()
{
LockGuard _(s_kmalloc_lock);
auto& info = s_kmalloc_info;
uint32_t used = 0;
@ -183,6 +194,8 @@ static void* kmalloc_fixed()
{
auto& info = s_kmalloc_fixed_info;
LockGuard _(s_kmalloc_lock);
if (!info.free_list_head)
return nullptr;
@ -225,6 +238,8 @@ static void* kmalloc_impl(size_t size, size_t align)
auto& info = s_kmalloc_info;
LockGuard _(s_kmalloc_lock);
for (auto* node = info.first(); node->end() <= info.end; node = node->after())
{
if (node->used())
@ -304,8 +319,6 @@ void* kmalloc(size_t size, size_t align, bool force_identity_map)
align = s_kmalloc_min_align;
ASSERT(align <= PAGE_SIZE);
Kernel::CriticalScope critical;
if (size == 0 || size >= info.size)
goto no_memory;
@ -338,7 +351,7 @@ void kfree(void* address)
uintptr_t address_uint = (uintptr_t)address;
ASSERT(address_uint % s_kmalloc_min_align == 0);
Kernel::CriticalScope critical;
LockGuard _(s_kmalloc_lock);
if (s_kmalloc_fixed_info.base <= address_uint && address_uint < s_kmalloc_fixed_info.end)
{
@ -399,10 +412,9 @@ void kfree(void* address)
BAN::Optional<Kernel::paddr_t> kmalloc_paddr_of(Kernel::vaddr_t vaddr)
{
using namespace Kernel;
using Kernel::vaddr_t;
LockGuard _(s_kmalloc_lock);
if ((vaddr_t)s_kmalloc_storage <= vaddr && vaddr < (vaddr_t)s_kmalloc_storage + sizeof(s_kmalloc_storage))
return V2P(vaddr);
return {};
}

View File

@ -1,3 +1,4 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Networking/ARPTable.h>
#include <kernel/Scheduler.h>
#include <kernel/Timer/Timer.h>
@ -51,10 +52,9 @@ namespace Kernel
ipv4_address = interface.get_gateway();
{
SpinLockGuard _(m_table_lock);
auto it = m_arp_table.find(ipv4_address);
if (it != m_arp_table.end())
return it->value;
LockGuard _(m_table_lock);
if (m_arp_table.contains(ipv4_address))
return m_arp_table[ipv4_address];
}
ARPPacket arp_request;
@ -74,10 +74,9 @@ namespace Kernel
while (SystemTimer::get().ms_since_boot() < timeout)
{
{
SpinLockGuard _(m_table_lock);
auto it = m_arp_table.find(ipv4_address);
if (it != m_arp_table.end())
return it->value;
LockGuard _(m_table_lock);
if (m_arp_table.contains(ipv4_address))
return m_arp_table[ipv4_address];
}
Scheduler::get().reschedule();
}
@ -115,15 +114,13 @@ namespace Kernel
}
case ARPOperation::Reply:
{
SpinLockGuard _(m_table_lock);
auto it = m_arp_table.find(packet.spa);
if (it != m_arp_table.end())
LockGuard _(m_table_lock);
if (m_arp_table.contains(packet.spa))
{
if (it->value != packet.sha)
if (m_arp_table[packet.spa] != packet.sha)
{
dprintln("Update IPv4 {} MAC to {}", packet.spa, packet.sha);
it->value = packet.sha;
m_arp_table[packet.spa] = packet.sha;
}
}
else
@ -148,7 +145,7 @@ namespace Kernel
BAN::Optional<PendingArpPacket> pending;
{
CriticalScope _;
LockGuard _(m_pending_lock);
if (!m_pending_packets.empty())
{
pending = m_pending_packets.front();
@ -171,12 +168,12 @@ namespace Kernel
{
auto& arp_packet = buffer.as<const ARPPacket>();
LockGuard _(m_pending_lock);
if (m_pending_packets.full())
{
dprintln("arp packet queue full");
return;
}
m_pending_packets.push({ .interface = interface, .packet = arp_packet });
m_pending_semaphore.unblock();
}

View File

@ -261,7 +261,7 @@ namespace Kernel
{
ASSERT_LTE(buffer.size() + sizeof(EthernetHeader), E1000_TX_BUFFER_SIZE);
CriticalScope _;
LockGuard _(m_lock);
size_t tx_current = read32(REG_TDT) % E1000_TX_DESCRIPTOR_COUNT;

View File

@ -70,14 +70,12 @@ namespace Kernel
void IPv4Layer::unbind_socket(BAN::RefPtr<NetworkSocket> socket, uint16_t port)
{
LockGuard _(m_socket_lock);
if (m_bound_sockets.contains(port))
{
SpinLockGuard _(m_bound_socket_lock);
auto it = m_bound_sockets.find(port);
if (it != m_bound_sockets.end())
{
ASSERT(it->value.lock() == socket);
m_bound_sockets.remove(it);
}
ASSERT(m_bound_sockets[port].valid());
ASSERT(m_bound_sockets[port].lock() == socket);
m_bound_sockets.remove(port);
}
NetworkManager::get().TmpFileSystem::remove_from_cache(socket);
}
@ -90,11 +88,11 @@ namespace Kernel
return BAN::Error::from_errno(EAFNOSUPPORT);
auto& sockaddr_in = *reinterpret_cast<const struct sockaddr_in*>(address);
SpinLockGuard _(m_bound_socket_lock);
LockGuard _(m_socket_lock);
uint16_t port = NetworkSocket::PORT_NONE;
for (uint32_t i = 0; i < 100 && port == NetworkSocket::PORT_NONE; i++)
if (uint32_t temp = 0xC000 | (Random::get_u32() & 0x3FFF); !m_bound_sockets.contains(temp))
if (uint32_t temp = 0xC000 | (Random::get_u32() & 0x3FFF); !m_bound_sockets.contains(temp) || !m_bound_sockets[temp].valid())
port = temp;
for (uint32_t temp = 0xC000; temp < 0xFFFF && port == NetworkSocket::PORT_NONE; temp++)
if (!m_bound_sockets.contains(temp))
@ -126,11 +124,17 @@ namespace Kernel
auto& sockaddr_in = *reinterpret_cast<const struct sockaddr_in*>(address);
uint16_t port = BAN::host_to_network_endian(sockaddr_in.sin_port);
SpinLockGuard _(m_bound_socket_lock);
LockGuard _(m_socket_lock);
if (m_bound_sockets.contains(port))
return BAN::Error::from_errno(EADDRINUSE);
TRY(m_bound_sockets.insert(port, TRY(socket->get_weak_ptr())));
if (!m_bound_sockets.contains(port))
TRY(m_bound_sockets.insert(port, TRY(socket->get_weak_ptr())));
else
{
auto& bound = m_bound_sockets[port];
if (bound.valid())
return BAN::Error::from_errno(EADDRINUSE);
bound = TRY(socket->get_weak_ptr());
}
// FIXME: actually determine proper interface
auto interface = NetworkManager::get().interfaces().front();
@ -252,14 +256,13 @@ namespace Kernel
BAN::RefPtr<Kernel::NetworkSocket> bound_socket;
{
SpinLockGuard _(m_bound_socket_lock);
auto it = m_bound_sockets.find(dst_port);
if (it == m_bound_sockets.end())
LockGuard _(m_socket_lock);
if (!m_bound_sockets.contains(dst_port))
{
dprintln_if(DEBUG_IPV4, "no one is listening on port {}", dst_port);
return {};
}
bound_socket = it->value.lock();
bound_socket = m_bound_sockets[dst_port].lock();
}
if (!bound_socket)
@ -290,7 +293,7 @@ namespace Kernel
BAN::Optional<PendingIPv4Packet> pending;
{
CriticalScope _;
LockGuard _(m_packet_lock);
if (!m_pending_packets.empty())
{
pending = m_pending_packets.front();
@ -310,7 +313,7 @@ namespace Kernel
if (auto ret = handle_ipv4_packet(pending->interface, BAN::ByteSpan(buffer_start, ipv4_packet_size)); ret.is_error())
dwarnln("{}", ret.error());
CriticalScope _;
LockGuard _(m_packet_lock);
m_pending_total_size -= ipv4_packet_size;
if (m_pending_total_size)
memmove(buffer_start, buffer_start + ipv4_packet_size, m_pending_total_size);
@ -319,6 +322,8 @@ namespace Kernel
void IPv4Layer::add_ipv4_packet(NetworkInterface& interface, BAN::ConstByteSpan buffer)
{
LockGuard _(m_packet_lock);
if (m_pending_packets.full())
{
dwarnln("IPv4 packet queue full");

View File

@ -68,8 +68,6 @@ namespace Kernel
void TCPSocket::on_close_impl()
{
LockGuard _(m_mutex);
if (!is_bound())
return;
@ -103,8 +101,6 @@ namespace Kernel
if (address_len > (socklen_t)sizeof(sockaddr_storage))
address_len = sizeof(sockaddr_storage);
LockGuard _(m_mutex);
ASSERT(!m_connection_info.has_value());
switch (m_state)
@ -195,6 +191,8 @@ namespace Kernel
void TCPSocket::add_protocol_header(BAN::ByteSpan packet, uint16_t dst_port, PseudoHeader pseudo_header)
{
LockGuard _(m_mutex);
auto& header = packet.as<TCPHeader>();
memset(&header, 0, sizeof(TCPHeader));
memset(header.options, TCPOption::End, m_tcp_options_bytes);
@ -212,7 +210,6 @@ namespace Kernel
{
case State::Closed:
{
LockGuard _(m_mutex);
header.syn = 1;
add_tcp_header_option<0, TCPOption::MaximumSeqmentSize>(header, m_interface->payload_mtu() - m_network_layer.header_size());
add_tcp_header_option<4, TCPOption::WindowScale>(header, 0);
@ -233,7 +230,6 @@ namespace Kernel
break;
case State::CloseWait:
{
LockGuard _(m_mutex);
header.ack = 1;
header.fin = 1;
m_state = State::LastAck;
@ -242,7 +238,6 @@ namespace Kernel
}
case State::FinWait1:
{
LockGuard _(m_mutex);
header.ack = 1;
header.fin = 1;
m_state = State::FinWait2;
@ -250,7 +245,6 @@ namespace Kernel
}
case State::FinWait2:
{
LockGuard _(m_mutex);
header.ack = 1;
m_state = State::TimeWait;
m_time_wait_start_ms = SystemTimer::get().ms_since_boot();
@ -303,6 +297,10 @@ namespace Kernel
auto payload = buffer.slice(header.data_offset * sizeof(uint32_t));
// FIXME: Internet layer packet receive thread should not be able to be
// blocked by inode's mutex
LockGuard _(m_mutex);
switch (m_state)
{
case State::Closed:
@ -312,7 +310,6 @@ namespace Kernel
if (!header.ack || !header.syn)
break;
LockGuard _(m_mutex);
if (header.ack_number != m_send_window.current_seq)
{
@ -345,8 +342,6 @@ namespace Kernel
if (!header.ack)
break;
LockGuard _(m_mutex);
if (header.fin)
{
if (m_recv_window.start_seq + m_recv_window.data_size != header.seq_number)

View File

@ -1,3 +1,4 @@
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Networking/UDPSocket.h>
#include <kernel/Thread.h>
@ -45,7 +46,7 @@ namespace Kernel
//auto& header = packet.as<const UDPHeader>();
auto payload = packet.slice(sizeof(UDPHeader));
SpinLockGuard _(m_packet_lock);
LockGuard _(m_packet_lock);
if (m_packets.full())
{
@ -87,12 +88,12 @@ namespace Kernel
}
ASSERT(m_port != PORT_NONE);
auto state = m_packet_lock.lock();
LockGuard _(m_packet_lock);
while (m_packets.empty())
{
m_packet_lock.unlock(state);
LockFreeGuard free(m_packet_lock);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
state = m_packet_lock.lock();
}
auto packet_info = m_packets.front();
@ -114,8 +115,6 @@ namespace Kernel
m_packet_total_size -= packet_info.packet_size;
m_packet_lock.unlock(state);
if (address && address_len)
{
if (*address_len > (socklen_t)sizeof(sockaddr_storage))

View File

@ -51,10 +51,9 @@ namespace Kernel
{
if (is_bound() && !is_bound_to_unused())
{
SpinLockGuard _(s_bound_socket_lock);
auto it = s_bound_sockets.find(m_bound_path);
if (it != s_bound_sockets.end())
s_bound_sockets.remove(it);
LockGuard _(s_bound_socket_lock);
if (s_bound_sockets.contains(m_bound_path))
s_bound_sockets.remove(m_bound_path);
}
}
@ -72,7 +71,7 @@ namespace Kernel
BAN::RefPtr<UnixDomainSocket> pending;
{
SpinLockGuard _(connection_info.pending_lock);
LockGuard _(connection_info.pending_lock);
pending = connection_info.pending_connections.front();
connection_info.pending_connections.pop();
connection_info.pending_semaphore.unblock();
@ -121,11 +120,10 @@ namespace Kernel
BAN::RefPtr<UnixDomainSocket> target;
{
SpinLockGuard _(s_bound_socket_lock);
auto it = s_bound_sockets.find(file.canonical_path);
if (it == s_bound_sockets.end())
LockGuard _(s_bound_socket_lock);
if (!s_bound_sockets.contains(file.canonical_path))
return BAN::Error::from_errno(ECONNREFUSED);
target = it->value.lock();
target = s_bound_sockets[file.canonical_path].lock();
if (!target)
return BAN::Error::from_errno(ECONNREFUSED);
}
@ -152,7 +150,7 @@ namespace Kernel
{
auto& target_info = target->m_info.get<ConnectionInfo>();
{
SpinLockGuard _(target_info.pending_lock);
LockGuard _(target_info.pending_lock);
if (target_info.pending_connections.size() < target_info.pending_connections.capacity())
{
MUST(target_info.pending_connections.push(this));
@ -207,7 +205,7 @@ namespace Kernel
O_RDWR
));
SpinLockGuard _(s_bound_socket_lock);
LockGuard _(s_bound_socket_lock);
ASSERT(!s_bound_sockets.contains(file.canonical_path));
TRY(s_bound_sockets.emplace(file.canonical_path, TRY(get_weak_ptr())));
m_bound_path = BAN::move(file.canonical_path);
@ -231,12 +229,12 @@ namespace Kernel
BAN::ErrorOr<void> UnixDomainSocket::add_packet(BAN::ConstByteSpan packet)
{
auto state = m_packet_lock.lock();
LockGuard _(m_mutex);
while (m_packet_sizes.full() || m_packet_size_total + packet.size() > s_packet_buffer_size)
{
m_packet_lock.unlock(state);
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
state = m_packet_lock.lock();
}
uint8_t* packet_buffer = reinterpret_cast<uint8_t*>(m_packet_buffer->vaddr() + m_packet_size_total);
@ -247,7 +245,6 @@ namespace Kernel
m_packet_sizes.push(packet.size());
m_packet_semaphore.unblock();
m_packet_lock.unlock(state);
return {};
}
@ -321,11 +318,10 @@ namespace Kernel
canonical_path = BAN::move(file.canonical_path);
}
SpinLockGuard _(s_bound_socket_lock);
auto it = s_bound_sockets.find(canonical_path);
if (it == s_bound_sockets.end())
LockGuard _(s_bound_socket_lock);
if (!s_bound_sockets.contains(canonical_path))
return BAN::Error::from_errno(EDESTADDRREQ);
auto target = it->value.lock();
auto target = s_bound_sockets[canonical_path].lock();
if (!target)
return BAN::Error::from_errno(EDESTADDRREQ);
TRY(target->add_packet(message));
@ -342,12 +338,10 @@ namespace Kernel
return BAN::Error::from_errno(ENOTCONN);
}
auto state = m_packet_lock.lock();
while (m_packet_size_total == 0)
{
m_packet_lock.unlock(state);
LockFreeGuard _(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_packet_semaphore));
state = m_packet_lock.lock();
}
uint8_t* packet_buffer = reinterpret_cast<uint8_t*>(m_packet_buffer->vaddr());
@ -366,7 +360,6 @@ namespace Kernel
m_packet_size_total -= nread;
m_packet_semaphore.unblock();
m_packet_lock.unlock(state);
return nread;
}

View File

@ -1,6 +1,6 @@
#include <kernel/CriticalScope.h>
#include <kernel/IDT.h>
#include <kernel/IO.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/PIC.h>
#include <string.h>
@ -79,7 +79,7 @@ namespace Kernel
void PIC::enable_irq(uint8_t irq)
{
CriticalScope _;
LockGuard _(m_lock);
ASSERT(irq < 16);
ASSERT(m_reserved_irqs & (1 << irq));
@ -99,7 +99,7 @@ namespace Kernel
dwarnln("PIC only supports 16 irqs");
return BAN::Error::from_errno(EFAULT);
}
CriticalScope _;
LockGuard _(m_lock);
if (m_reserved_irqs & (1 << irq))
{
dwarnln("irq {} is already reserved", irq);
@ -111,7 +111,7 @@ namespace Kernel
BAN::Optional<uint8_t> PIC::get_free_irq()
{
CriticalScope _;
LockGuard _(m_lock);
for (int irq = 0; irq < 16; irq++)
{
if (m_reserved_irqs & (1 << irq))

View File

@ -1,6 +1,5 @@
#include <BAN/ScopeGuard.h>
#include <BAN/StringView.h>
#include <kernel/CriticalScope.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/FS/ProcFS/FileSystem.h>
#include <kernel/FS/VirtualFileSystem.h>
@ -32,9 +31,9 @@ namespace Kernel
static BAN::Vector<Process*> s_processes;
static RecursiveSpinLock s_process_lock;
static void for_each_process(const BAN::Function<BAN::Iteration(Process&)>& callback)
void Process::for_each_process(const BAN::Function<BAN::Iteration(Process&)>& callback)
{
SpinLockGuard _(s_process_lock);
LockGuard _(s_process_lock);
for (auto* process : s_processes)
{
@ -45,9 +44,9 @@ namespace Kernel
}
}
static void for_each_process_in_session(pid_t sid, const BAN::Function<BAN::Iteration(Process&)>& callback)
void Process::for_each_process_in_session(pid_t sid, const BAN::Function<BAN::Iteration(Process&)>& callback)
{
SpinLockGuard _(s_process_lock);
LockGuard _(s_process_lock);
for (auto* process : s_processes)
{
@ -66,7 +65,7 @@ namespace Kernel
pid_t pid;
{
CriticalScope _;
LockGuard _(s_process_lock);
pid = s_next_id;
if (sid == 0 && pgrp == 0)
{
@ -89,10 +88,9 @@ namespace Kernel
void Process::register_to_scheduler()
{
{
SpinLockGuard _(s_process_lock);
MUST(s_processes.push_back(this));
}
s_process_lock.lock();
MUST(s_processes.push_back(this));
s_process_lock.unlock();
for (auto* thread : m_threads)
MUST(Scheduler::get().add_thread(thread));
}
@ -194,14 +192,14 @@ namespace Kernel
void Process::add_thread(Thread* thread)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
MUST(m_threads.push_back(thread));
}
void Process::cleanup_function()
{
{
SpinLockGuard _(s_process_lock);
LockGuard _(s_process_lock);
for (size_t i = 0; i < s_processes.size(); i++)
if (s_processes[i] == this)
s_processes.remove(i);
@ -215,7 +213,8 @@ namespace Kernel
while (m_exit_status.waiting > 0)
Scheduler::get().reschedule();
m_process_lock.lock();
// This mutex will no longer be freed
m_big_mutex.lock();
m_open_file_descriptors.close_all();
@ -236,7 +235,7 @@ namespace Kernel
m_threads.clear();
thread.setup_process_cleanup();
Scheduler::get().execute_current_thread();
Scheduler::get().reschedule_current_no_save();
ASSERT_NOT_REACHED();
}
@ -254,7 +253,7 @@ namespace Kernel
void Process::exit(int status, int signal)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
m_exit_status.exit_code = __WGENEXITCODE(status, signal);
for (auto* thread : m_threads)
if (thread != &Thread::current())
@ -275,7 +274,7 @@ namespace Kernel
meminfo.phys_pages = 0;
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
for (auto* thread : m_threads)
{
meminfo.virt_pages += thread->virtual_page_count();
@ -324,13 +323,13 @@ namespace Kernel
size_t Process::proc_cmdline(off_t offset, BAN::ByteSpan buffer) const
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return read_from_vec_of_str(m_cmdline, offset, buffer);
}
size_t Process::proc_environ(off_t offset, BAN::ByteSpan buffer) const
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return read_from_vec_of_str(m_environ, offset, buffer);
}
@ -343,7 +342,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_gettermios(::termios* termios)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(termios, sizeof(::termios)));
@ -362,7 +361,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_settermios(const ::termios* termios)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(termios, sizeof(::termios)));
@ -401,7 +400,7 @@ namespace Kernel
{
auto page_table = BAN::UniqPtr<PageTable>::adopt(TRY(PageTable::create_userspace()));
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
BAN::String working_directory;
TRY(working_directory.append(m_working_directory));
@ -441,7 +440,7 @@ namespace Kernel
{
// NOTE: We scope everything for automatic deletion
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto loadable_elf = TRY(load_elf_for_exec(m_credentials, path, m_working_directory, page_table()));
@ -540,7 +539,7 @@ namespace Kernel
m_has_called_exec = true;
m_threads.front()->setup_exec();
Scheduler::get().execute_current_thread();
Scheduler::get().reschedule_current_no_save();
ASSERT_NOT_REACHED();
}
@ -577,7 +576,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_wait(pid_t pid, int* stat_loc, int options)
{
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(stat_loc, sizeof(int)));
}
@ -610,7 +609,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_nanosleep(const timespec* rqtp, timespec* rmtp)
{
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(rqtp, sizeof(timespec)));
if (rmtp)
TRY(validate_pointer_access(rmtp, sizeof(timespec)));
@ -652,7 +651,7 @@ namespace Kernel
return BAN::Error::from_errno(ENOTSUP);
}
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
auto absolute_path = TRY(absolute_path_of(path));
@ -681,7 +680,7 @@ namespace Kernel
{
ASSERT(&Process::current() == this);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
if (Thread::current().stack().contains(address))
{
@ -709,13 +708,13 @@ namespace Kernel
BAN::ErrorOr<long> Process::open_inode(BAN::RefPtr<Inode> inode, int flags)
{
ASSERT(inode);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.open(inode, flags));
}
BAN::ErrorOr<long> Process::open_file(BAN::StringView path, int flags, mode_t mode)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
BAN::String absolute_path = TRY(absolute_path_of(path));
@ -748,14 +747,14 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_open(const char* path, int flags, mode_t mode)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
return open_file(path, flags, mode);
}
BAN::ErrorOr<long> Process::sys_openat(int fd, const char* path, int flags, mode_t mode)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
@ -771,28 +770,28 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_close(int fd)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(m_open_file_descriptors.close(fd));
return 0;
}
BAN::ErrorOr<long> Process::sys_read(int fd, void* buffer, size_t count)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, count));
return TRY(m_open_file_descriptors.read(fd, BAN::ByteSpan((uint8_t*)buffer, count)));
}
BAN::ErrorOr<long> Process::sys_write(int fd, const void* buffer, size_t count)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, count));
return TRY(m_open_file_descriptors.write(fd, BAN::ByteSpan((uint8_t*)buffer, count)));
}
BAN::ErrorOr<long> Process::sys_create(const char* path, mode_t mode)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
TRY(create_file_or_dir(path, mode));
return 0;
@ -800,7 +799,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_create_dir(const char* path, mode_t mode)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
BAN::StringView path_sv(path);
if (!path_sv.empty() && path_sv.back() == '/')
@ -811,7 +810,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_unlink(const char* path)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto absolute_path = TRY(absolute_path_of(path));
@ -844,7 +843,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_readlink(const char* path, char* buffer, size_t bufsize)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
TRY(validate_pointer_access(buffer, bufsize));
@ -855,7 +854,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_readlinkat(int fd, const char* path, char* buffer, size_t bufsize)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
TRY(validate_pointer_access(buffer, bufsize));
@ -872,7 +871,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_pread(int fd, void* buffer, size_t count, off_t offset)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, count));
auto inode = TRY(m_open_file_descriptors.inode_of(fd));
return TRY(inode->read(offset, { (uint8_t*)buffer, count }));
@ -883,7 +882,7 @@ namespace Kernel
if (mode & S_IFMASK)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto absolute_path = TRY(absolute_path_of(path));
@ -895,7 +894,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_chown(const char* path, uid_t uid, gid_t gid)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
auto absolute_path = TRY(absolute_path_of(path));
@ -907,7 +906,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_socket(int domain, int type, int protocol)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.socket(domain, type, protocol));
}
@ -918,7 +917,7 @@ namespace Kernel
if (!address && address_len)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
if (address)
{
TRY(validate_pointer_access(address_len, sizeof(*address_len)));
@ -934,7 +933,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_bind(int socket, const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(address, address_len));
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
@ -947,7 +946,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_connect(int socket, const sockaddr* address, socklen_t address_len)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(address, address_len));
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
@ -960,7 +959,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_listen(int socket, int backlog)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(socket));
if (!inode->mode().ifsock())
@ -972,7 +971,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_sendto(const sys_sendto_t* arguments)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(arguments, sizeof(sys_sendto_t)));
TRY(validate_pointer_access(arguments->message, arguments->length));
TRY(validate_pointer_access(arguments->dest_addr, arguments->dest_len));
@ -992,7 +991,7 @@ namespace Kernel
if (!arguments->address && arguments->address_len)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(arguments, sizeof(sys_recvfrom_t)));
TRY(validate_pointer_access(arguments->buffer, arguments->length));
if (arguments->address)
@ -1011,14 +1010,14 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_ioctl(int fildes, int request, void* arg)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(fildes));
return TRY(inode->ioctl(request, arg));
}
BAN::ErrorOr<long> Process::sys_pselect(sys_pselect_t* arguments)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(arguments, sizeof(sys_pselect_t)));
if (arguments->readfds)
@ -1087,7 +1086,7 @@ namespace Kernel
if (set_bits > 0)
break;
LockFreeGuard free(m_process_lock);
LockFreeGuard free(m_big_mutex);
SystemTimer::get().sleep(1);
}
@ -1113,7 +1112,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_pipe(int fildes[2])
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(fildes, sizeof(int) * 2));
TRY(m_open_file_descriptors.pipe(fildes));
return 0;
@ -1121,32 +1120,32 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_dup(int fildes)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.dup(fildes));
}
BAN::ErrorOr<long> Process::sys_dup2(int fildes, int fildes2)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.dup2(fildes, fildes2));
}
BAN::ErrorOr<long> Process::sys_fcntl(int fildes, int cmd, int extra)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.fcntl(fildes, cmd, extra));
}
BAN::ErrorOr<long> Process::sys_seek(int fd, off_t offset, int whence)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(m_open_file_descriptors.seek(fd, offset, whence));
return 0;
}
BAN::ErrorOr<long> Process::sys_tell(int fd)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
return TRY(m_open_file_descriptors.tell(fd));
}
@ -1154,7 +1153,7 @@ namespace Kernel
{
BAN::String absolute_source, absolute_target;
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(absolute_source.append(TRY(absolute_path_of(source))));
TRY(absolute_target.append(TRY(absolute_path_of(target))));
}
@ -1164,7 +1163,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_fstat(int fd, struct stat* buf)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buf, sizeof(struct stat)));
TRY(m_open_file_descriptors.fstat(fd, buf));
return 0;
@ -1172,7 +1171,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_fstatat(int fd, const char* path, struct stat* buf, int flag)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buf, sizeof(struct stat)));
TRY(m_open_file_descriptors.fstatat(fd, path, buf, flag));
return 0;
@ -1180,7 +1179,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_stat(const char* path, struct stat* buf, int flag)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buf, sizeof(struct stat)));
TRY(m_open_file_descriptors.stat(TRY(absolute_path_of(path)), buf, flag));
return 0;
@ -1233,7 +1232,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_readdir(int fd, DirectoryEntryList* list, size_t list_size)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(list, list_size));
TRY(m_open_file_descriptors.read_dir_entries(fd, list, list_size));
return 0;
@ -1244,7 +1243,7 @@ namespace Kernel
BAN::String absolute_path;
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
absolute_path = TRY(absolute_path_of(path));
}
@ -1253,7 +1252,7 @@ namespace Kernel
if (!file.inode->mode().ifdir())
return BAN::Error::from_errno(ENOTDIR);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
m_working_directory = BAN::move(file.canonical_path);
return 0;
@ -1261,7 +1260,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_getpwd(char* buffer, size_t size)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(buffer, size));
@ -1277,7 +1276,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_mmap(const sys_mmap_t* args)
{
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(args, sizeof(sys_mmap_t)));
}
@ -1318,7 +1317,7 @@ namespace Kernel
region_type, page_flags
));
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(m_mapped_regions.push_back(BAN::move(region)));
return m_mapped_regions.back()->vaddr();
}
@ -1326,7 +1325,7 @@ namespace Kernel
if (args->addr != nullptr)
return BAN::Error::from_errno(ENOTSUP);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(args->fildes));
@ -1374,7 +1373,7 @@ namespace Kernel
if (vaddr % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// FIXME: We should only map partial regions
for (size_t i = 0; i < m_mapped_regions.size(); i++)
@ -1393,7 +1392,7 @@ namespace Kernel
if (vaddr % PAGE_SIZE != 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
for (auto& mapped_region : m_mapped_regions)
if (mapped_region->overlaps(vaddr, len))
@ -1404,7 +1403,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_tty_ctrl(int fildes, int command, int flags)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
auto inode = TRY(m_open_file_descriptors.inode_of(fildes));
if (!inode->is_tty())
@ -1417,7 +1416,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_termid(char* buffer)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(buffer));
@ -1438,7 +1437,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_clock_gettime(clockid_t clock_id, timespec* tp)
{
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access(tp, sizeof(timespec)));
}
@ -1463,7 +1462,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_load_keymap(const char* path)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_string_access(path));
if (!m_credentials.is_superuser())
@ -1480,11 +1479,11 @@ namespace Kernel
return BAN::Error::from_errno(EINVAL);
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
TRY(validate_pointer_access((void*)handler, sizeof(handler)));
}
CriticalScope _;
LockGuard _(m_signal_lock);
m_signal_handlers[signal] = (vaddr_t)handler;
return 0;
}
@ -1498,7 +1497,7 @@ namespace Kernel
if (pid == Process::current().pid())
{
CriticalScope _;
LockGuard _(m_signal_lock);
Process::current().m_signal_pending_mask |= 1 << signal;
return 0;
}
@ -1512,7 +1511,7 @@ namespace Kernel
found = true;
if (signal)
{
CriticalScope _;
LockGuard _(m_signal_lock);
process.m_signal_pending_mask |= 1 << signal;
// FIXME: This is super hacky
Scheduler::get().unblock_thread(process.m_threads.front()->tid());
@ -1530,7 +1529,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_tcsetpgrp(int fd, pid_t pgrp)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
if (!m_controlling_terminal)
return BAN::Error::from_errno(ENOTTY);
@ -1566,7 +1565,7 @@ namespace Kernel
if (uid < 0 || uid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// If the process has appropriate privileges, setuid() shall set the real user ID, effective user ID, and the saved
// set-user-ID of the calling process to uid.
@ -1596,7 +1595,7 @@ namespace Kernel
if (gid < 0 || gid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// If the process has appropriate privileges, setgid() shall set the real group ID, effective group ID, and the saved
// set-group-ID of the calling process to gid.
@ -1624,7 +1623,7 @@ namespace Kernel
if (uid < 0 || uid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// If uid is equal to the real user ID or the saved set-user-ID, or if the process has appropriate privileges, seteuid()
// shall set the effective user ID of the calling process to uid; the real user ID and saved set-user-ID shall remain unchanged.
@ -1643,7 +1642,7 @@ namespace Kernel
if (gid < 0 || gid >= 1'000'000'000)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// If gid is equal to the real group ID or the saved set-group-ID, or if the process has appropriate privileges, setegid()
// shall set the effective group ID of the calling process to gid; the real group ID, saved set-group-ID, and any
@ -1671,7 +1670,7 @@ namespace Kernel
// by the ruid and euid arguments. If ruid or euid is -1, the corresponding effective or real user ID of the current
// process shall be left unchanged.
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// A process with appropriate privileges can set either ID to any value.
if (!m_credentials.is_superuser())
@ -1719,7 +1718,7 @@ namespace Kernel
// The real and effective group IDs may be set to different values in the same call.
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
// Only a process with appropriate privileges can set the real group ID and the effective group ID to any valid value.
if (!m_credentials.is_superuser())
@ -1752,7 +1751,7 @@ namespace Kernel
if (pgid < 0)
return BAN::Error::from_errno(EINVAL);
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
if (pid == 0)
pid = m_pid;
@ -1817,7 +1816,7 @@ namespace Kernel
BAN::ErrorOr<long> Process::sys_getpgid(pid_t pid)
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
if (pid == 0 || pid == m_pid)
return m_pgrp;
@ -1849,7 +1848,7 @@ namespace Kernel
BAN::ErrorOr<BAN::String> Process::absolute_path_of(BAN::StringView path) const
{
LockGuard _(m_process_lock);
LockGuard _(m_big_mutex);
if (path.empty() || path == "."sv)
return m_working_directory;

View File

@ -1,8 +1,8 @@
#include <kernel/Arch.h>
#include <kernel/Attributes.h>
#include <kernel/CriticalScope.h>
#include <kernel/GDT.h>
#include <kernel/InterruptController.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
#include <kernel/Timer/Timer.h>
@ -32,13 +32,45 @@ namespace Kernel
asm volatile("movq %0, %%rsp" :: "r"(s_temp_stack + sizeof(s_temp_stack)));
}
void SchedulerLock::lock()
{
auto tid = Scheduler::current_tid();
if (tid != m_locker)
{
while (!m_locker.compare_exchange(-1, tid))
__builtin_ia32_pause();
ASSERT_EQ(m_lock_depth, 0);
}
m_lock_depth++;
}
void SchedulerLock::unlock()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0);
if (--m_lock_depth == 0)
m_locker = -1;
}
void SchedulerLock::unlock_all()
{
ASSERT_EQ(m_locker.load(), Scheduler::current_tid());
ASSERT_GT(m_lock_depth, 0);
m_lock_depth = 0;
m_locker = -1;
}
pid_t SchedulerLock::locker() const
{
return m_locker;
}
BAN::ErrorOr<void> Scheduler::initialize()
{
ASSERT(s_instance == nullptr);
Scheduler* scheduler = new Scheduler();
ASSERT(scheduler);
scheduler->m_idle_thread = TRY(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
s_instance = scheduler;
s_instance = new Scheduler();
ASSERT(s_instance);
s_instance->m_idle_thread = TRY(Thread::create_kernel([](void*) { for (;;) asm volatile("hlt"); }, nullptr, nullptr));
return {};
}
@ -53,6 +85,7 @@ namespace Kernel
VERIFY_CLI();
ASSERT(!m_active_threads.empty());
m_current_thread = m_active_threads.begin();
m_lock.lock();
execute_current_thread();
ASSERT_NOT_REACHED();
}
@ -64,7 +97,7 @@ namespace Kernel
pid_t Scheduler::current_tid()
{
if (s_instance == nullptr)
if (s_instance == nullptr || s_instance->m_idle_thread == nullptr)
return 0;
return Scheduler::get().current_thread().tid();
}
@ -72,6 +105,7 @@ namespace Kernel
void Scheduler::timer_reschedule()
{
VERIFY_CLI();
m_lock.lock();
wake_threads();
@ -85,6 +119,7 @@ namespace Kernel
void Scheduler::reschedule()
{
DISABLE_INTERRUPTS();
m_lock.lock();
if (save_current_thread())
{
@ -99,20 +134,30 @@ namespace Kernel
void Scheduler::reschedule_if_idling()
{
VERIFY_CLI();
m_lock.lock();
if (m_active_threads.empty() || &current_thread() != m_idle_thread)
return;
return m_lock.unlock();
if (save_current_thread())
return;
m_current_thread = m_active_threads.begin();
m_current_thread = {};
advance_current_thread();
execute_current_thread();
ASSERT_NOT_REACHED();
}
void Scheduler::reschedule_current_no_save()
{
VERIFY_CLI();
m_lock.lock();
execute_current_thread();
}
void Scheduler::wake_threads()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
uint64_t current_time = SystemTimer::get().ms_since_boot();
while (!m_sleeping_threads.empty() && m_sleeping_threads.front().wake_time <= current_time)
@ -127,7 +172,7 @@ namespace Kernel
BAN::ErrorOr<void> Scheduler::add_thread(Thread* thread)
{
CriticalScope _;
LockGuard _(m_lock);
TRY(m_active_threads.emplace_back(thread));
return {};
}
@ -135,19 +180,20 @@ namespace Kernel
void Scheduler::advance_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
if (m_active_threads.empty())
{
m_current_thread = {};
return;
}
if (!m_current_thread || ++m_current_thread == m_active_threads.end())
else if (!m_current_thread || ++m_current_thread == m_active_threads.end())
m_current_thread = m_active_threads.begin();
m_lock.m_locker = current_tid();
}
void Scheduler::remove_and_advance_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
ASSERT(m_current_thread);
@ -162,6 +208,8 @@ namespace Kernel
advance_current_thread();
m_active_threads.remove(temp);
}
m_lock.m_locker = current_tid();
}
// NOTE: this is declared always inline, so we don't corrupt the stack
@ -169,6 +217,7 @@ namespace Kernel
ALWAYS_INLINE bool Scheduler::save_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
uintptr_t rsp, rip;
push_callee_saved();
@ -191,6 +240,7 @@ namespace Kernel
void Scheduler::delete_current_process_and_thread()
{
DISABLE_INTERRUPTS();
m_lock.lock();
load_temp_stack();
PageTable::kernel().load();
@ -211,6 +261,7 @@ namespace Kernel
void Scheduler::execute_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
load_temp_stack();
PageTable::kernel().load();
@ -221,6 +272,7 @@ namespace Kernel
NEVER_INLINE void Scheduler::_execute_current_thread()
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
#if SCHEDULER_VERIFY_STACK
vaddr_t rsp;
@ -267,10 +319,12 @@ namespace Kernel
{
case Thread::State::NotStarted:
current->set_started();
m_lock.unlock_all();
start_thread(current->rsp(), current->rip());
case Thread::State::Executing:
while (current->can_add_signal_to_execute())
current->handle_signal();
m_lock.unlock_all();
continue_thread(current->rsp(), current->rip());
case Thread::State::Terminated:
ASSERT_NOT_REACHED();
@ -282,6 +336,7 @@ namespace Kernel
void Scheduler::set_current_thread_sleeping_impl(uint64_t wake_time)
{
VERIFY_CLI();
ASSERT_EQ(m_lock.locker(), current_tid());
if (save_current_thread())
{
@ -302,6 +357,7 @@ namespace Kernel
);
m_current_thread = {};
m_lock.m_locker = current_tid();
advance_current_thread();
execute_current_thread();
@ -312,6 +368,7 @@ namespace Kernel
{
VERIFY_STI();
DISABLE_INTERRUPTS();
m_lock.lock();
ASSERT(m_current_thread);
@ -323,6 +380,7 @@ namespace Kernel
{
VERIFY_STI();
DISABLE_INTERRUPTS();
m_lock.lock();
ASSERT(m_current_thread);
@ -332,7 +390,7 @@ namespace Kernel
void Scheduler::unblock_threads(Semaphore* semaphore)
{
CriticalScope critical;
LockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end();)
{
@ -353,7 +411,7 @@ namespace Kernel
void Scheduler::unblock_thread(pid_t tid)
{
CriticalScope _;
LockGuard _(m_lock);
for (auto it = m_sleeping_threads.begin(); it != m_sleeping_threads.end(); it++)
{

View File

@ -1,4 +1,3 @@
#include <kernel/CriticalScope.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/Heap.h>
#include <kernel/Memory/PageTable.h>

View File

@ -224,15 +224,12 @@ namespace Kernel
{
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return read_sectors_impl(lba, sector_count, buffer);
}
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return read_sectors_impl(lba, sector_count, buffer);
for (uint64_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_mutex);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->read_from_cache(lba + offset, sector_buffer))
continue;
@ -247,15 +244,12 @@ namespace Kernel
{
ASSERT(buffer.size() >= sector_count * sector_size());
{
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return write_sectors_impl(lba, sector_count, buffer);
}
LockGuard _(m_mutex);
if (!m_disk_cache.has_value())
return write_sectors_impl(lba, sector_count, buffer);
for (uint8_t offset = 0; offset < sector_count; offset++)
{
LockGuard _(m_mutex);
auto sector_buffer = buffer.slice(offset * sector_size(), sector_size());
if (m_disk_cache->write_to_cache(lba + offset, sector_buffer, true).is_error())
TRY(write_sectors_impl(lba + offset, 1, sector_buffer));

View File

@ -1,5 +1,4 @@
#include <BAN/Array.h>
#include <kernel/CriticalScope.h>
#include <kernel/Device/DeviceNumbers.h>
#include <kernel/FS/DevFS/FileSystem.h>
#include <kernel/IDT.h>
@ -235,7 +234,7 @@ namespace Kernel
uint8_t buffer[128];
{
CriticalScope _;
LockGuard _(m_lock);
if (m_input.empty())
return;
uint8_t* ptr = buffer;

View File

@ -310,16 +310,17 @@ namespace Kernel
void TTY::putchar(uint8_t ch)
{
SpinLockGuard _(m_write_lock);
LockGuard _(m_mutex);
if (m_tty_ctrl.draw_graphics)
putchar_impl(ch);
}
BAN::ErrorOr<size_t> TTY::read_impl(off_t, BAN::ByteSpan buffer)
{
LockGuard _(m_mutex);
while (!m_output.flush)
{
LockFreeGuard _(m_mutex);
LockFreeGuard free(m_mutex);
TRY(Thread::current().block_or_eintr_indefinite(m_output.semaphore));
}
@ -345,7 +346,7 @@ namespace Kernel
BAN::ErrorOr<size_t> TTY::write_impl(off_t, BAN::ConstByteSpan buffer)
{
SpinLockGuard _(m_write_lock);
LockGuard _(m_mutex);
for (size_t i = 0; i < buffer.size(); i++)
putchar(buffer[i]);
return buffer.size();
@ -354,7 +355,7 @@ namespace Kernel
void TTY::putchar_current(uint8_t ch)
{
ASSERT(s_tty);
SpinLockGuard _(s_tty->m_write_lock);
LockGuard _(s_tty->m_mutex);
s_tty->putchar(ch);
}

View File

@ -57,6 +57,7 @@ namespace Kernel
void VirtualTTY::clear()
{
LockGuard _(m_write_lock);
for (uint32_t i = 0; i < m_width * m_height; i++)
m_buffer[i] = { .foreground = m_foreground, .background = m_background, .codepoint = ' ' };
m_terminal_driver->clear(m_background);
@ -64,6 +65,7 @@ namespace Kernel
void VirtualTTY::set_font(const Kernel::Font& font)
{
LockGuard _(m_write_lock);
m_terminal_driver->set_font(font);
uint32_t new_width = m_terminal_driver->width();
@ -306,6 +308,8 @@ namespace Kernel
void VirtualTTY::putchar_impl(uint8_t ch)
{
LockGuard _(m_write_lock);
uint32_t codepoint = ch;
switch (m_state)

View File

@ -3,6 +3,7 @@
#include <kernel/GDT.h>
#include <kernel/InterruptController.h>
#include <kernel/InterruptStack.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/Memory/kmalloc.h>
#include <kernel/Process.h>
#include <kernel/Scheduler.h>
@ -29,10 +30,10 @@ namespace Kernel
void Thread::terminate()
{
CriticalScope _;
LockGuard _(m_lock);
m_state = Thread::State::Terminated;
if (this == &Thread::current())
Scheduler::get().execute_current_thread();
Scheduler::get().reschedule_current_no_save();
}
static pid_t s_next_tid = 1;
@ -46,14 +47,7 @@ namespace Kernel
BAN::ScopeGuard thread_deleter([thread] { delete thread; });
// Initialize stack and registers
thread->m_stack = TRY(VirtualRange::create_to_vaddr_range(
PageTable::kernel(),
KERNEL_OFFSET,
~(uintptr_t)0,
m_kernel_stack_size,
PageTable::Flags::ReadWrite | PageTable::Flags::Present,
true
));
thread->m_stack = TRY(VirtualRange::create_kmalloc(m_kernel_stack_size));
thread->m_rsp = thread->stack_base() + thread->stack_size();
thread->m_rip = (uintptr_t)entry;
@ -137,6 +131,8 @@ namespace Kernel
BAN::ErrorOr<Thread*> Thread::clone(Process* new_process, uintptr_t rsp, uintptr_t rip)
{
LockGuard _(m_lock);
ASSERT(m_is_userspace);
ASSERT(m_state == State::Executing);
@ -162,6 +158,8 @@ namespace Kernel
void Thread::setup_exec()
{
LockGuard _(m_lock);
ASSERT(is_userspace());
m_state = State::NotStarted;
static entry_t entry_trampoline(
@ -178,9 +176,12 @@ namespace Kernel
// Signal mask is inherited
// Setup stack for returning
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u);
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
uintptr_t offset = m_rsp % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
ASSERT_GTE(offset, 4 * sizeof(uintptr_t));
PageTable::with_fast_page(process().page_table().physical_address_of((m_rsp - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK), [&] {
uintptr_t rsp = PageTable::fast_page() + offset;
write_to_stack(rsp, nullptr); // alignment
write_to_stack(rsp, this);
write_to_stack(rsp, &Thread::on_exit);
@ -191,6 +192,8 @@ namespace Kernel
void Thread::setup_process_cleanup()
{
LockGuard _(m_lock);
m_state = State::NotStarted;
static entry_t entry(
[](void* process)
@ -206,9 +209,12 @@ namespace Kernel
m_signal_pending_mask = 0;
m_signal_block_mask = ~0ull;
ASSERT_EQ(m_rsp % PAGE_SIZE, 0u);
PageTable::with_fast_page(process().page_table().physical_address_of(m_rsp - PAGE_SIZE), [&] {
uintptr_t rsp = PageTable::fast_page() + PAGE_SIZE;
uintptr_t offset = m_rsp % PAGE_SIZE;
if (offset == 0)
offset = PAGE_SIZE;
ASSERT_GTE(offset, 4 * sizeof(uintptr_t));
PageTable::with_fast_page(process().page_table().physical_address_of((m_rsp - 4 * sizeof(uintptr_t)) & PAGE_ADDR_MASK), [&] {
uintptr_t rsp = PageTable::fast_page() + offset;
write_to_stack(rsp, nullptr); // alignment
write_to_stack(rsp, this);
write_to_stack(rsp, &Thread::on_exit);
@ -219,6 +225,7 @@ namespace Kernel
bool Thread::is_interrupted_by_signal()
{
LockGuard _(m_lock);
while (can_add_signal_to_execute())
handle_signal();
return will_execute_signal();
@ -226,6 +233,7 @@ namespace Kernel
bool Thread::can_add_signal_to_execute() const
{
LockGuard _(m_lock);
if (!is_userspace() || m_state != State::Executing)
return false;
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
@ -237,6 +245,7 @@ namespace Kernel
bool Thread::will_execute_signal() const
{
LockGuard _(m_lock);
if (!is_userspace() || m_state != State::Executing)
return false;
auto& interrupt_stack = *reinterpret_cast<InterruptStack*>(interrupt_stack_base() + interrupt_stack_size() - sizeof(InterruptStack));
@ -245,6 +254,7 @@ namespace Kernel
void Thread::handle_signal(int signal)
{
LockGuard _(m_lock);
ASSERT(!interrupts_enabled());
ASSERT(&Thread::current() == this);
ASSERT(is_userspace());
@ -338,6 +348,7 @@ namespace Kernel
bool Thread::add_signal(int signal)
{
LockGuard _(m_lock);
ASSERT(!interrupts_enabled());
uint64_t mask = 1ull << signal;
if (!(m_signal_block_mask & mask))
@ -380,6 +391,7 @@ namespace Kernel
void Thread::validate_stack() const
{
LockGuard _(m_lock);
if (stack_base() <= m_rsp && m_rsp <= stack_base() + stack_size())
return;
if (interrupt_stack_base() <= m_rsp && m_rsp <= interrupt_stack_base() + interrupt_stack_size())

View File

@ -1,4 +1,5 @@
#include <BAN/ScopeGuard.h>
#include <kernel/Lock/LockGuard.h>
#include <kernel/ACPI.h>
#include <kernel/IDT.h>
#include <kernel/InterruptController.h>
@ -244,7 +245,7 @@ namespace Kernel
if (m_is_64bit)
return regs.main_counter.full;
CriticalScope _;
LockGuard _(m_lock);
uint32_t current_low = regs.main_counter.low;
uint32_t wraps = m_32bit_wraps;
if (current_low < (uint32_t)m_last_ticks)
@ -256,19 +257,21 @@ namespace Kernel
{
auto& regs = registers();
uint64_t current_ticks;
if (m_is_64bit)
current_ticks = regs.main_counter.full;
else
{
uint32_t current_low = regs.main_counter.low;
if (current_low < (uint32_t)m_last_ticks)
m_32bit_wraps++;
current_ticks = ((uint64_t)m_32bit_wraps << 32) | current_low;
}
LockGuard _(m_lock);
m_last_ticks = current_ticks;
uint64_t current_ticks;
if (m_is_64bit)
current_ticks = regs.main_counter.full;
else
{
uint32_t current_low = regs.main_counter.low;
if (current_low < (uint32_t)m_last_ticks)
m_32bit_wraps++;
current_ticks = ((uint64_t)m_32bit_wraps << 32) | current_low;
}
m_last_ticks = current_ticks;
}
Scheduler::get().timer_reschedule();
}